Connection error: docker compose: Loopback4 + Mongo - express

I run the project with:
clean volumes & containers, then docker compose up --build
Tried creating different users for mongo. Made different settings for the docker file. For docker compose.
I assume that the error is somewhere between loopback & mongodb container.
For watch changes i use tsc-watch. Dont know it cause any bugs here, not sure.
Configuration
package.json
{
"name": "lympha-backend",
"version": "0.0.1",
"description": "lympha backend",
"keywords": [
"loopback-application",
"loopback"
],
"main": "dist/index.js",
"types": "dist/index.d.ts",
"engines": {
"node": "14 || 16 || 18 || 19"
},
"scripts": {
"dev": "tsc-watch --target es2017 --outDir ./dist --onSuccess \"node .\"",
"build": "lb-tsc",
"build:watch": "lb-tsc --watch",
"watch": "lb-tsc --watch",
"lint": "yarn run eslint && yarn run prettier:check",
"lint:fix": "yarn run eslint:fix && yarn run prettier:fix",
"prettier:cli": "lb-prettier \"**/*.ts\" \"**/*.js\"",
"prettier:check": "yarn run prettier:cli -l",
"prettier:fix": "yarn run prettier:cli --write",
"eslint": "lb-eslint --report-unused-disable-directives .",
"eslint:fix": "yarn run eslint --fix",
"pretest": "yarn run rebuild",
"test": "lb-mocha --allow-console-logs \"dist/__tests__\"",
"posttest": "yarn run lint",
"test:dev": "lb-mocha --allow-console-logs dist/__tests__/**/*.js && yarn run posttest",
"docker:build": "docker build -t lympha-backend .",
"docker:run": "docker run -p 3000:3000 -d lympha-backend",
"premigrate": "yarn run build",
"migrate": "node ./dist/migrate",
"preopenapi-spec": "yarn run build",
"openapi-spec": "node ./dist/openapi-spec",
"prestart": "yarn run rebuild",
"start": "node -r source-map-support/register .",
"clean": "lb-clean dist *.tsbuildinfo .eslintcache",
"rebuild": "yarn run clean && yarn run build"
},
"repository": {
"type": "git",
"url": ""
},
"license": "",
"files": [
"README.md",
"dist",
"src",
"!*/__tests__"
],
"dependencies": {
"#loopback/boot": "^5.0.7",
"#loopback/core": "^4.0.7",
"#loopback/repository": "^5.1.2",
"#loopback/rest": "^12.0.7",
"#loopback/rest-crud": "^0.15.6",
"#loopback/rest-explorer": "^5.0.7",
"#loopback/service-proxy": "^5.0.7",
"loopback-connector-mongodb": "^5.2.3",
"tsc-watch": "^6.0.0",
"tslib": "^2.0.0"
},
"devDependencies": {
"#loopback/build": "^9.0.7",
"#loopback/eslint-config": "^13.0.7",
"#loopback/testlab": "^5.0.7",
"#types/node": "^14.18.36",
"eslint": "^8.30.0",
"source-map-support": "^0.5.21",
"typescript": "~4.9.4"
}
}
Docker compose
version: '3.9'
services:
mongodb:
image: mongo
container_name: mongodb
ports:
- "27017:27017"
environment:
- MONGO_INITDB_ROOT_USERNAME=root
- MONGO_INITDB_ROOT_PASSWORD=password
- MONGO_INITDB_DATABASE=admin
restart: always
volumes:
- ./mongo-init.js:/docker-entrypoint-initdb.d/mongo-init.js:ro
- mongodb:/data/db
backend:
container_name: backend
build:
context: .
dockerfile: ./lympha-backend/Dockerfile
command: ["yarn", "dev"]
ports:
- 4000:3000
environment:
NAME: TEST_DEVELOPMENT
PORT: 3000
DB_NAME: lympha_db
DB_USER: root
DB_PASS: password
restart: always
volumes:
- ./lympha-backend:/home/node/app
depends_on:
- mongodb
links:
- mongodb
volumes:
backend:
mongodb:
Dockerfile
# Check out https://hub.docker.com/_/node to select a new base image
FROM node:16-slim
# Set to a non-root built-in user `node`
USER node
# Create app directory (with user `node`)
RUN mkdir -p /home/node/app
RUN mkdir -p /home/node/app/dist
WORKDIR /home/node/app
RUN pwd
COPY --chown=node package*.json ./
# RUN npm install
RUN yarn
# Bundle app source code
COPY --chown=node . .
# Bind to all network interfaces so that it can be mapped to the host OS
ENV HOST=0.0.0.0 PORT=3000
EXPOSE ${PORT}
mongo-init.js
db.createUser({
user: 'admin',
pwd: 'password',
roles: [
{ role: 'root', db: 'admin' },
]
});
db = db.getSiblingDB('lympha_db');
db.createCollection("lympha_db"); //MongoDB creates the database when you first store data in that database
db.createUser(
{
user: "lympha",
pwd: "lympha",
roles: [
{
role: "readWrite",
db: "lympha_db"
}
]
}
);
Mongo terminal
Look at the picture
Logs
Mongodb logs
Look at the picture
Backend logs
{
name: 'mongodb',
connector: 'mongodb',
host: 'localhost',
port: 27017,
user: 'admin',
password: 'password',
database: 'admin'
}
Server is running at http://127.0.0.1:3000
Try http://127.0.0.1:3000/ping
Connection fails: MongoServerSelectionError: connect ECONNREFUSED 127.0.0.1:27017
It will be retried for the next request.
/home/node/app/node_modules/mongodb/lib/utils.js:698
throw error;
^
MongoServerSelectionError: connect ECONNREFUSED 127.0.0.1:27017
at Timeout.\_onTimeout (/home/node/app/node_modules/mongodb/lib/core/sdam/topology.js:438:30)
at listOnTimeout (node:internal/timers:559:17)
at processTimers (node:internal/timers:502:7)
Emitted 'error' event on MongoDataSource instance at:
at MongoDataSource.postInit (/home/node/app/node_modules/loopback-datasource-juggler/lib/datasource.js:502:16)
at onError (/home/node/app/node_modules/loopback-connector-mongodb/lib/mongodb.js:325:21)
at /home/node/app/node_modules/loopback-connector-mongodb/lib/mongodb.js:333:9
at /home/node/app/node_modules/mongodb/lib/utils.js:695:9
at /home/node/app/node_modules/mongodb/lib/mongo_client.js:285:23
at connectCallback (/home/node/app/node_modules/mongodb/lib/operations/connect.js:367:5)
at /home/node/app/node_modules/mongodb/lib/operations/connect.js:554:14
at connectHandler (/home/node/app/node_modules/mongodb/lib/core/sdam/topology.js:286:11)
at Object.callback (/home/node/app/node_modules/mongodb/lib/core/sdam/topology.js:672:9)
at Timeout.\_onTimeout (/home/node/app/node_modules/mongodb/lib/core/sdam/topology.js:443:25)
at listOnTimeout (node:internal/timers:559:17)
at processTimers (node:internal/timers:502:7) {
reason: TopologyDescription {
type: 'Single',
setName: null,
maxSetVersion: null,
maxElectionId: null,
servers: Map(1) {
'localhost:27017' =\> ServerDescription {
address: 'localhost:27017',
error: Error: connect ECONNREFUSED 127.0.0.1:27017
at TCPConnectWrap.afterConnect \[as oncomplete\] (node:net:1278:16) {
name: 'MongoNetworkError'
},
roundTripTime: -1,
lastUpdateTime: 7989795,
lastWriteDate: null,
opTime: null,
type: 'Unknown',
topologyVersion: undefined,
minWireVersion: 0,
maxWireVersion: 0,
hosts: \[\],
passives: \[\],
arbiters: \[\],
tags: \[\]
}
},
stale: false,
compatible: true,
compatibilityError: null,
logicalSessionTimeoutMinutes: null,
heartbeatFrequencyMS: 10000,
localThresholdMS: 15,
commonWireVersion: null
}
}
I cant connect lb4 and mongo.
I can start for scratch to figure out what is going on. Ask everything you want, i will do everything is needed as well. Ping me please.

When you start in compose, it sets up single network. (https://docs.docker.com/compose/networking/)
In short, each container can see another by service name. In your case - mongodb, not localhost
try to change:
- host: 'localhost',
+ host: 'mongodb',

Related

How to login to AWS CodeArtifact before npm installs

How can I trigger the login before the install script in node v18.12.1/npm 8.19.2?
I expect the npm preinstall script will run before the install script based on the documentation. When running node v14.21.1/npm 6.14.17 I was able to follow the instructions in this tutorial to trigger a CodeArtifact login with the preinstall script. All packages were installed from my CodeArtifact repository on running npm install as expected.
After updating to node v18.12.1/npm 8.19.2, npm install fails with error:
Unable to authenticate, your authentication token seems to be invalid
What I have tested (each in a fresh environment with no npm cache):
npm run preinstall successfully logs on
∴ AWS credentials are configured and the preinstall script works when executed
npm install fails authentication
npm run preinstall; npm install successfully logs on and installs from CodeArtifact
∴ the preinstall script is not running. I get no results from filtering the debug logs suggesting the co:login step is not run:
$ cat /root/.npm/_logs/2022-12-06T07_12_47_353Z-debug-0.log | grep co:login
$ echo $?
1
After reverting to node v14.21.1/npm 6.14.17, npm install behaves as expected, logging in and installing packages from CodeArtifact.
I have recreated the problem with a minimal package.json which can be tested in a docker container:
docker run --rm -it -v ~/.aws:/root/.aws:ro --name app node:18.12.1 /bin/bash
wget --quiet -O "awscliv2.zip" "https://awscli.amazonaws.com/awscli-exe-linux-$(uname -m).zip" && unzip -q awscliv2.zip
./aws/install
rm awscliv2.zip && rm -r ./aws
Copy files in from a separate terminal
docker cp package.json app:/package.json
docker cp package-lock.json app:/package-lock.json
And then install with npm install in the docker container.
package.json template would require you to substitute your own CodeArtifact repository, domain and owner
{
"name": "app",
"version": "1.0.0",
"description": "",
"main": "index.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1",
"preinstall": "npm run co:login",
"co:login": "aws codeartifact login --tool npm --repository my-repository --domain my-domain --domain-owner 123456789 --region ap-southeast-2"
},
"author": "",
"license": "ISC",
"dependencies": {
"dayjs": "^1.11.6"
}
}
package-lock.json template would require you to substitute your own CodeArtifact url
{
"name": "app",
"version": "1.0.0",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "app",
"version": "1.0.0",
"hasInstallScript": true,
"license": "ISC",
"dependencies": {
"dayjs": "^1.11.6"
}
},
"node_modules/dayjs": {
"version": "1.11.6",
"resolved": "https://mydomain-123456789.d.codeartifact.ap-southeast-2.amazonaws.com:443/npm/my-repository/dayjs/-/dayjs-1.11.6.tgz",
"integrity": "sha512-zZbY5giJAinCG+7AGaw0wIhNZ6J8AhWuSXKvuc1KAyMiRsvGQWqh4L+MomvhdAYjN+lqvVCMq1I41e3YHvXkyQ=="
}
},
"dependencies": {
"dayjs": {
"version": "1.11.6",
"resolved": "https://mydomain-123456789.d.codeartifact.ap-southeast-2.amazonaws.com:443/npm/my-repository/dayjs/-/dayjs-1.11.6.tgz",
"integrity": "sha512-zZbY5giJAinCG+7AGaw0wIhNZ6J8AhWuSXKvuc1KAyMiRsvGQWqh4L+MomvhdAYjN+lqvVCMq1I41e3YHvXkyQ=="
}
}
}

Nuxt 3 + Vite & HMR : infinite reload & failed

On a fresh install of Nuxt3, using Docker, I have this error on the console and an infinite reload of the page :
client.ts:28 WebSocket connection to 'wss://shop.store.local/_nuxt/'
failed: (anonyme) # client:188 client.ts:224 [vite] server connection
lost. polling for restart...
Here is the configuration of my vite server (via nuxt.config.js):
vite: {
server: {
hmr: {
host: 'shop.store.local',
port: 443,
}
}
}
The docker-compose describes the Traefik labels:
vuejs:
labels:
- "traefik.http.routers.front_store.rule=Host(`shop.store.local`)"
- "traefik.http.routers.front_store.tls=true"
- "traefik.http.services.front_store.loadbalancer.server.port=3000"
What I've tried too, in my package.json file:
"scripts": {
"dev": "nuxi dev --host=0.0.0.0",
"build": "nuxi build",
"start": "node .output/server/index.mjs"
},
Any idea ? I looked over internet, people have the problem, but no solution...
Expose ports for nuxt container.
ports:
3000:3000
24678:24678
Also edit your nuxt.config:
vite: {
server: {
host: "0.0.0.0",
hmr: {
},
},
},

Next.js - yarn dev: error Command failed with signal "SIGSEGV"

I am having this problem, whenever I am running yarn dev or npm run dev:
yarn run v1.22.10
warning ../../../../package.json: No license field
$ next dev
ready - started server on 0.0.0.0:3000, url: http://localhost:3000
info - Using webpack 5. Reason: no next.config.js https://nextjs.org/docs/messages/webpack5
event - compiled successfully
event - build page: /
wait - compiling...
error Command failed with signal "SIGSEGV".
info Visit https://yarnpkg.com/en/docs/cli/run for documentation about this command.
package.json:
{
"name": "nextjs",
"version": "0.1.0",
"private": true,
"scripts": {
"dev": "next dev",
"build": "next build",
"start": "next start"
},
"dependencies": {
"next": "^10.2.3",
"react": "17.x",
"react-dom": "17.x",
"webpack": "^5.39.1"
},
"devDependencies": {
"webpack-cli": "^4.7.2"
}
}
Appears to be a problem with Mac M1, as #Fernando Gomes commented.
In order to get it working on my environment, I am using Docker with following parameters:
docker buildx build --platform linux/amd64 .

Docker-compose node express container doesn't show error logs and restart continuously

I am trying to make nginx, node express work on the container by using docker compose.
But problem is express container doesn't show any error log why it is died.
This is my docker-compose.yml
version: '3'
services:
proxy:
image: nginx:latest
container_name: proxy
ports:
- "80:80"
volumes:
- ./proxy/nginx.conf:/etc/nginx/nginx.conf
restart: "unless-stopped"
express:
build:
context: ./server
container_name: express
expose:
- "3000"
volumes:
- ./source:/source
- /source/node_modules
restart: "unless-stopped"
This is my directory structure.
source directory, i move all files and directories from express-generator outputs.
This is my Dockerfile.
FROM node:12
COPY package*.json /source/
WORKDIR /source
RUN npm install
CMD [ "node", "app.js" ]
This is my package.json
{
"name": "docker_web_app",
"version": "1.0.0",
"description": "",
"main": "server.js",
"scripts": {
"start": "node app.js",
"test": "echo \"Error: no test specified\" && exit 1"
},
"keywords": [],
"author": "sjk5766",
"license": "ISC",
"dependencies": {
"cookie-parser": "~1.4.4",
"debug": "~2.6.9",
"express": "~4.16.1",
"http-errors": "~1.6.3",
"jade": "~1.11.0",
"morgan": "~1.9.1"
}
}
When i did docker ps after docker-compose up -d, result is like below.
When i did docker logs express, there is nothing i can see.
I really wanna know what is the problem.
Considering your express application is running fine without docker.
you can change your Dockerfile as below
FROM node:12
WORKDIR /source
COPY . .
RUN npm install
EXPOSE 3000
CMD [ "node", "app.js" ]
COPY command will copy your local directory code to /server directory in docker
try docker-compose file as below
version: '3'
services:
proxy:
image: nginx:latest
container_name: proxy
ports:
- "80:80"
volumes:
- ./proxy/nginx.conf:/etc/nginx/nginx.conf
networks:
- test_bridge
restart: "unless-stopped"
express:
build: ./server
container_name: express
ports:
- "3000:3000"
networks:
- test_bridge
networks:
test_bridge:
driver: bridge

Webdriver instances not created for custom protractor.conf file

I want to integrate my E2E suite in Travis, so I followed this article. As mentioned in the article I've created a custom protractor.ci.conf.js file of the Travis build. I've placed this file inside my e2e folder (path: e2e/protractor.ci.conf.js).
The only difference in my custom e2e/protractor.ci.conf.js and angular generated protractor.conf.js files is the value in args property displayed below.
e2e/protractor.ci.conf.js
chromeOptions: {
args: [
'--headless',
'window-size=1920,1080'
]
}
protractor.conf.js
const SpecReporter = require('jasmine-spec-reporter').SpecReporter;
exports.config = {
allScriptsTimeout: 11000,
specs: [
'./e2e/**/*.e2e-spec.ts'
],
capabilities: {
shardTestFiles: true,
maxInstances: 2,
'browserName': 'chrome',
chromeOptions: {
args: ['--start-maximized']
}
},
directConnect: true,
baseUrl: 'localhost:4000/',
framework: 'jasmine',
jasmineNodeOpts: {
showColors: true,
defaultTimeoutInterval: 300000,
print: function () {
}
},
useAllAngular2AppRoots: true,
onPrepare: function () {
jasmine.getEnv().addReporter(new SpecReporter());
require('ts-node').register({
project: 'e2e/tsconfig.json'
});
}
};
In my package.json file there are 2 scripts one for running tests locally and one on Travis.
Package.json (at the same level where protractor.conf.js is located)
"scripts": {
...
"test": "ng test --watch=false",
"pree2e": "webdriver-manager update",
"e2e": "concurrently --kill-others \"ng e2e --port=4000\" \"npm run _server:run\"",
"e2e:ci": "concurrently --kill-others \"ng e2e --port=4000 --protractor-config=e2e/protractor.ci.conf.js\" \"npm run _server:run\"",
"_server:run": "tsc -p ./server && concurrently \"tsc -w -p ./server\" \"nodemon dist/server/index.js\" ",
...
},
.travis.yml
branches:
only:
- staging
- prod
- functional-testing
script:
...
- if [[ $TRAVIS_COMMIT_MESSAGE == *"[skip e2e]"* ]]; then echo "skipping E2E test"; else npm run e2e:ci; fi
...
before_deploy:
- sed -i '/dist/d' .gitignore
- git add . && git commit -m "latest build"
- cd $TRAVIS_BUILD_DIR/dist
PROBLEM
When simply running npm run e2e, every test is working fine. But when I'm using npm run e2e:ci command scripts hangs and no instance of WebDriver runs.
I/launcher — Running 0 instances of WebDriver
is coming instead of 1 or 2 instances.
That's because since you made a new config file and apparently placed in the folder
/e2e instead of the default root folder.
The path to the test files in your case should also be updated.
So './e2e/**/*.e2e-spec.ts' will get changed to './**/*.e2e-spec.ts'
Since, currently the test is not able to find any files specified, it doesn't run any instances.