I am trying to migrate a socket io service from GCP (App Engine) to a kubernetes cluster.
Everything works fine on the GCP side (we have one instance of the server without replicas).
The migration to k8s is going very well, except that when connecting the client socket to the server, it does not receive some information:
In transport 'polling': Of course, as there are two pods, this doesn't work properly anymore and the client socket keeps deconnecting / reconnecting in loop.
In 'websocket' transport: The connection is correctly established, the client can receive data from the server in 'broadcast to all client' mode => socket.emit('getDeviceList', os.hostname()) but, as soon as the server tries to send data only to the concerned client io.of(namespace).to(socket.id).emit('getDeviceList', JSON.stringify(obj)), this one doesn't receive anything...
Moreover, I modified my service to have only one pod for a test, the polling mode works correctly, but, I find myself in the same case as the websocket mode => I can't send an information to a precise client...
Of course, the same code on the App Engine side works correctly and the client receives everything correctly.
I'm working with:
"socket.io": "^3.1.0",
"socket.io-redis": "^5.2.0",
"vue": "^2.5.18",
"vue-socket.io": "3.0.7",
My server side configuration:
var io = require('socket.io')(server, {
pingTimeout: 5000,
pingInterval : 2000,
cors: {
origin: true,
methods: ["GET", "POST"],
transports: ['websocket', 'polling'],
credentials: true
},
allowEIO3: true
});
io.adapter(redis({ host: redis_host, port: redis_port }))
My front side configuration:
Vue.use(new VueSocketIO({
debug: true,
connection: 'path_to_the_socket_io/namespace,
options: {
query: `id=..._timestamp`,
transports: ['polling']
}
}));
My ingress side annotation:
kubernetes.io/ingress.class: nginx kubernetes.io/ingress.global-static-ip-name: ip-loadbalancer
meta.helm.sh/release-name: xxx
meta.helm.sh/release-namespace: xxx -release nginx.ingress.kubernetes.io/affinity: cookie nginx.ingress.kubernetes.io/affinity-mode: persistent nginx.ingress.kubernetes.io/force-ssl-redirect: true nginx.ingress.kubernetes.io/proxy-connect-timeout: 10800
nginx.ingress.kubernetes.io/proxy-read-timeout: 10800
nginx.ingress.kubernetes.io/proxy-send-timeout: 10800
nginx.org/websocket-services: app-sockets-cluster-ip-service
My question is : why i can get broadcast to all user message and not specific message to my socket ?
Can someone try to help me ? :)
Thanks a lot !
I found the solution in the day.and share it.
In fact, the problem is not due to the kubernetes Cluster but due to the socket io and socket io redis adapter version.
I was using socket.io: 3.x.x and using socket.io-redis: 5.x.x
In fact, i need to use the socket.io-redis: 6.x.x with this version of socket io :)
You can find the compatible version of socket io and redis adapter here:
https://github.com/socketio/socket.io-redis-adapter#compatibility-table
Thanks a lot.
Related
I'm using the Mercure hub 0.13, everything works fine on my development machine, but on my test server the hub keeps on trying to bind on port 80, resulting in a error, as nginx is already running on port 80.
run: loading initial config: loading new config: http app module: start: tcp: listening on :80: listen tcp :80: bind: address already in use
I'm starting the hub with the following command:
MERCURE_PUBLISHER_JWT_KEY=$(cat publisher.key.pub) \
MERCURE_PUBLISHER_JWT_ALG=RS256 \
MERCURE_SUBSCRIBER_JWT_KEY=$(cat publisher.key.pub) \
MERCURE_SUBSCRIBER_JWT_ALG=RS256 \
./mercure run -config Caddyfile.dev
Caddyfile.dev is as follows:
# Learn how to configure the Mercure.rocks Hub on https://mercure.rocks/docs/hub/config
{
{$GLOBAL_OPTIONS}
}
{$SERVER_NAME:localhost:3000}
log
route {
redir / /.well-known/mercure/ui/
encode zstd gzip
mercure {
# Transport to use (default to Bolt)
transport_url {$MERCURE_TRANSPORT_URL:bolt://mercure.db}
# Publisher JWT key
publisher_jwt {env.MERCURE_PUBLISHER_JWT_KEY} {env.MERCURE_PUBLISHER_JWT_ALG}
# Subscriber JWT key
subscriber_jwt {env.MERCURE_SUBSCRIBER_JWT_KEY} {env.MERCURE_SUBSCRIBER_JWT_ALG}
# Permissive configuration for the development environment
cors_origins *
publish_origins *
demo
anonymous
subscriptions
# Extra directives
{$MERCURE_EXTRA_DIRECTIVES}
}
respond /healthz 200
respond "Not Found" 404
}
When I provider the SERVER_NAME as an environment variable, without a domain, SERVER_NAME=:3000, the hub actually starts on port 3000, but runs in http mode, which only allows for anonymous subscriptions and is not what I need.
Server:
Operating System: CentOS Stream 8
Kernel: Linux 4.18.0-383.el8.x86_64
Architecture: x86-64
Full output when trying to start the Mercure hub:
2022/05/10 04:50:29.605 INFO using provided configuration {"config_file": "Caddyfile.dev", "config_adapter": ""}
2022/05/10 04:50:29.606 WARN input is not formatted with 'caddy fmt' {"adapter": "caddyfile", "file": "Caddyfile.dev", "line": 3}
2022/05/10 04:50:29.609 INFO admin admin endpoint started {"address": "tcp/localhost:2019", "enforce_origin": false, "origins": ["localhost:2019", "[::1]:2019", "127.0.0.1:2019"]}
2022/05/10 04:50:29.610 INFO http enabling automatic HTTP->HTTPS redirects {"server_name": "srv0"}
2022/05/10 04:50:29.610 INFO tls.cache.maintenance started background certificate maintenance {"cache": "0xc0003d6150"}
2022/05/10 04:50:29.627 INFO tls cleaning storage unit {"description": "FileStorage:/root/.local/share/caddy"}
2022/05/10 04:50:29.628 INFO tls finished cleaning storage units
2022/05/10 04:50:29.642 INFO pki.ca.local root certificate is already trusted by system {"path": "storage:pki/authorities/local/root.crt"}
2022/05/10 04:50:29.643 INFO tls.cache.maintenance stopped background certificate maintenance {"cache": "0xc0003d6150"}
run: loading initial config: loading new config: http app module: start: tcp: listening on :80: listen tcp :80: bind: address already in use
I'm a bit late, but I hope that will help someone.
As mentionned here, you can specify the http_port manually in your caddy configuration file.
Problem
When I upgraded from the Heroku Redis Hobby plan to the Heroku Redis Premium 0 plan, Heroku kept crashing with an H10 error.
Cause
Redis 6 requires TLS to connect. However, Heroku manages requests from the router level to the application level involving Self Signed Certs. Turns out, Heroku terminates SSL at the router level and requests are forwarded from there to the application via HTTP while everything is behind Heroku's Firewall and security measures.
Links that helped track down the cause:
https://ogirginc.github.io/en/heroku-redis-ssl-error
How to enable TLS for Redis 6 on Sidekiq?
Solution
Customize the options passed into Redis so that tls.rejectUnauthorized is set to false.
const Queue = require('bull');
const redisUrlParse = require('redis-url-parse');
const REDIS_URL = process.env.REDIS_URL || 'redis://127.0.0.1:6379';
const redisUrlParsed = redisUrlParse(REDIS_URL);
const { host, port, password } = redisUrlParsed;
const bullOptions = REDIS_URL.includes('rediss://')
? {
redis: {
port: Number(port),
host,
password,
tls: {
rejectUnauthorized: false,
},
},
}
: REDIS_URL;
const workQueue = new Queue('work', bullOptions);
Adding (on top of yeoman great answer) -
If you find yourself hitting SSL verification errors on Heroku when using django-rq and the latest Redis add-on -
Know that the RQ_QUEUES definition on django's settings.py supports SSL_CERT_REQS and you may specifically set it to None for solving these issues.
( inspired from https://paltman.com/how-to-turn-off-ssl-verify-django-rq-heroku-redis/ ).
Note that it requires boosting django-rq to a version >= 2.5.1.
That might be relevant for all users who apply Redis only for queuing (e.g. with RedisRQ) and not for caching.
I prepared server ubuntu like from docs. I created SSL cert to my domin and i have open required ports. I installed red5pro in to /usr/local/red5pro/ and server fine. When i will go to http://example.com:5080/ i can see home page red5pro and is ok. But when i click on broadcast i have a info: No suitable Publisher found. WebRTC & Flash not supported. Ok, maybe because is http not https. I decided create test index page in to /var/www/test/index.html and i have basic configuration like:
var config = {
protocol: 'wss',
host: 'example.com',
port: 443,
app: 'live',
streamName: 'abccaccaa',
rtcConfiguration: {
iceServers: [{urls: 'stun:stun2.l.google.com:19302'}],
iceCandidatePoolSize: 2,
bundlePolicy: 'max-bundle'
} // See https://developer.mozilla.org/en-US/docs/Web/API/RTCPeerConnection/RTCPeerConnection#RTCConfiguration_dictionary
};
And now when i try broadcast have an info: WebSocket connection to 'wss://example.com/live/?id=abccaccaa' failed: Error during WebSocket handshake: Unexpected response code: 404
Looks like have no example.com/live and cant figure out what is wrong :( since 2 days. Maybe someone could give me an advice ? Or alternative on other application than red5pro
What I am trying to do is -
For Clients to Broker communication - use OAUTHBEARER authentication
For Broker to Broker communication - use PLAIN authentication
I Have following JAAS configuration:
{
KafkaServer {
org.apache.kafka.common.security.plain.PlainLoginModule required
username="inter"
password="inter-secret"
user_inter="inter-secret"
user_admin="YvNzcbmqhA0DfxjP";
org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required;
};
Client {
org.apache.zookeeper.server.auth.DigestLoginModule required
username="zookeeper"
password="zookeeper-secret";
};
}
And I have following configs in server.properties:
sasl.enabled.mechanisms=PLAIN,OAUTHBEARER
sasl.mechanism.inter.broker.protocol=PLAIN
sasl.server.callback.handler.class=br.com.jairsjunior.security.oauthbearer.OauthAuthenticateValidatorCallbackHandler
But if start the kafka service I am seeing the error like below:
used by: java.lang.IllegalArgumentException: Must supply exactly 1 non-null JAAS mechanism configuration (size was 2)
at org.apache.kafka.common.security.oauthbearer.internals.unsecured.OAuthBearerUnsecuredValidatorCallbackHandler.configure(OAuthBearerUnsecuredValidatorCallbackHandler.java:114)
at org.apache.kafka.common.network.SaslChannelBuilder.configure(SaslChannelBuilder.java:122)
... 17 more
which indicates kafka is not allowing to specify multiple JAAS mechanism configurations.
So how can I specify multiple JAAS configs, and setup authentication mechanisms like below:
CLient to Broker ----> OAUTHBEARER
Broker to Broker ----> PLAIN
Thanks!
I am currently also working on the problem to use plain and oauthbearer simultaniously, which I have not solved yet but I solved your specific question in the following way.
This is my Jaas Configuration:
internal.KafkaServer {
org.apache.kafka.common.security.plain.PlainLoginModule required
username="admin"
password="admin-secret"
user_admin="admin-secret"
user_test="test";
};
external.KafkaServer {
org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required
};
Client {
org.apache.zookeeper.server.auth.DigestLoginModule required
username="username"
password="pw";
};
Then I set the setting in the server.properties the following way:
inter.broker.listener.name: INTERNAL
sasl.mechanism.inter.broker.protocol: PLAIN
listener.security.protocol.map: INTERNAL:SASL_PLAINTEXT,EXTERNAL:SASL_SSL
listeners: "INTERNAL://0.0.0.0:9092,EXTERNAL://0.0.0.0:19092"
sasl.enabled.mechanisms: PLAIN,OAUTHBEARER
listener.name.external.oauthbearer.sasl.server.callback.handler.class: my.module.kafka.security.oauthbearer.OauthAuthenticateValidatorCallbackHandler
listener.name.external.oauthbearer.sasl.login.callback.handler.class: my.module.kafka.security.oauthbearer.OauthAuthenticateLoginCallbackHandler
When you it this way you won't get your error. Sadly I get another error when the broker want to set up the external connection:
javax.security.auth.callback.UnsupportedCallbackException: Unrecognized SASL Login callback
at org.apache.kafka.common.security.authenticator.AbstractLogin$DefaultLoginCallbackHandler.handle(AbstractLogin.java:105)
at org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule.identifyToken(OAuthBearerLoginModule.java:316)
at org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule.login(OAuthBearerLoginModule.java:301)
... 32 more
It seems like the kafka brokers are ignoring oauthbearer callbackhandler. This is a bit strange because external is working perfectly when I configure it as the only listener.
I hope it helps you with your problem!
Is there any node modules for sailsjs framework to make ssl certificate using let's encrypt?
There is a middleware that enables http->https redirect and also handles the ACME-validation requests from Let's Encrypt. As far as I can tell, it does not actually trigger the renewal, nor writes anything, but I believe that the ACME-scripts handle that as cron-jobs every 3 months or so, allowing you app to just validate automatically when they run. I haven't implemented this myself yet though.
I would also ask you to really consider using CloudFlare or some other SSL-termination service, as that also gives you a lot of other benefits like DDoS protection, some CDN-features etc.
Docs:#sailshq/lifejacket
As has been mentioned, you should consider the best overall solution in terms of CloudFlare or SSL-offload via nginx etc.
However, you can use greenlock-express.js for this to achieve SSL with LetsEncrypt directly within the Sails node environment.
The example below:
Configures an HTTP express app using greenlock on port 80 that handles the
redirects to HTTPS and the LetsEncrypt business logic.
Uses the greenlock SSL configuration to configure the primary Sails app as HTTPS on port 443.
Sample configuration for config/local.js:
// returns an instance of greenlock.js with additional helper methods
var glx = require('greenlock-express').create({
server: 'https://acme-v02.api.letsencrypt.org/directory'
, version: 'draft-11' // Let's Encrypt v2 (ACME v2)
, telemetry: true
, servername: 'domainname.com'
, configDir: '/tmp/acme/'
, email: 'myemail#somewhere.com'
, agreeTos: true
, communityMember: true
, approveDomains: [ 'domainname.com', 'www.domainname.com' ]
, debug: true
});
// handles acme-challenge and redirects to https
require('http').createServer(glx.middleware(require('redirect-https')())).listen(80, function () {
console.log("Listening for ACME http-01 challenges on", this.address());
});
module.exports = {
port: 443,
ssl: true,
http: {
serverOptions: glx.httpsOptions,
},
};
Refer to the greenlock documentation for fine-tuning configuration detail, but the above gets an out-of-the-box LetsEncrypt working with Sails.
Note also, that you may wish to place this configuration in somewhere like config/env/production.js as appropriate.