Im using The Npm Package Named Lien For My web server, I Have My Certificate Located As Follows in The Image. But I Always Get an Error
TypeError [ERR_INVALID_ARG_TYPE]: The "path" argument must be of type string or an instance of Buffer or URL. Received undefined
This is how Lien handles SSL
if (options.ssl) {
options.ssl._key = options.ssl._key || fs.readFileSync(options.ssl.key);
options.ssl._cert = options.ssl._cert || fs.readFileSync(options.ssl.cert);
_this3.server = https.createServer({
key: options.ssl._key,
cert: options.ssl._cert
}, _this3.app);
} else {
_this3.server = http.createServer(_this3.app);
}
and This is how I Have Defined Lien
let server = new Lien({
host: "localhost"
, port: 5000,
ssl: {
cer: path.join(__dirname + '/' + 'certificate.cer'),
key: path.join(__dirname + '/' +'privatekey.txt')
}
});
path: https://i.imgur.com/95nhPis.png
That seems to be a typo -- it should be cert instead of cer:
let server = new Lien({
host: "localhost"
, port: 5000
, ssl: {
// v---- This has to be `cert`
cert: path.join(__dirname + '/' + 'certificate.cer'),
key: path.join(__dirname + '/' +'privatekey.txt')
}
});
Related
I have a Wildcard SSL Certificate in my KeyVault. I've got multiple hostnames that needs to use the Wildcard SSL Certificate for. I want to create 3x HTTPS Listerners in my Application Gateway, each for the different hostnames (hostname1, hostname2 & hostname3). I can use the same certificate for all 3x HTTPS Listerners if I manually create the Listerners in the Azure Portal but once I try to do it via Terraform, it gives me a duplicate SSL Certificate error.
app_gateway.tf
http_listener {
frontend_ip_configuration_name = "AppGWPublicFrontendIP"
frontend_port_name = "fp-443"
host_names = ["${var.ENV}.hostname1.company.com"]
name = "fl-hostname-https-443"
protocol = "Https"
ssl_certificate_name = "star.company.com-cert"
}
http_listener {
frontend_ip_configuration_name = "AppGWPublicFrontendIP"
frontend_port_name = "fp-443"
host_names = ["${var.ENV}.hostname2.company.com"]
name = "fl-hostname2-https-443"
protocol = "Https"
ssl_certificate_name = "star.company.com-cert"
}
http_listener {
frontend_ip_configuration_name = "AppGWPublicFrontendIP"
frontend_port_name = "fp-443"
host_names = ["${var.ENV}.hostname3.company.com"]
name = "fl-hostname3-https-443"
protocol = "Https"
ssl_certificate_name = "star.company.com-cert"
ssl_certificate {
name = "star.company.com-cert"
key_vault_secret_id = "https://keyvault.vault.azure.net/certificates/star-company-com/${var.certificate_secret_id}"
}
Error:
│ Error: updating Application Gateway: (Name "AppGateway_Name" / Resource Group "ResourceGroup_Name"): network.ApplicationGatewaysClient#CreateOrUpdate: Failure sending request: StatusCode=400 -- Original Error: Code="ApplicationGatewayDuplicateSslCertificate" Message="Application Gateway /subscriptions/00000000-0000-0000-0000-0000000000/resourceGroups/ResourceGroup_Name/providers/Microsoft.Network/applicationGateways/AppGateway_Name cannot have same certificate used across two Ssl Certificate elements. Certificate for /subscriptions/00000000-0000-0000-0000-0000000000/resourceGroups/ResourceGroup_Name/providers/Microsoft.Network/applicationGateways/AppGateway_Name/sslCertificates/cert-***-env-hostname-cert and /subscriptions/00000000-0000-0000-0000-0000000000/resourceGroups/ResourceGroup_Name/providers/Microsoft.Network/applicationGateways/AppGateway_Name/sslCertificates/cert-***-env-hostname-cert are same." Details=[]
You are attempting to create a certificate with the same name as an already-existing certificate, as shown by the error you are seeing. It just needs to be created once, and then referenced in all three HTTP listeners since you're using the same certificate for all three of them.
Here is an updated code sample:
resource "azurerm_key_vault_certificate" "ssl_cert" {
name = "star.company.com-cert"
key_vault_id = azurerm_key_vault.example.id
}
resource "azurerm_application_gateway_http_listener" "http_listeners" {
for_each = {
"hostname1.company.com" = "fl-hostname-https-443",
"hostname2.company.com" = "fl-hostname2-https-443",
"hostname3.company.com" = "fl-hostname3-https-443",
}
name = each.value
resource_group_name = azurerm_resource_group.example.name
application_gateway_name = azurerm_application_gateway.example.name
frontend_ip_configuration_name = azurerm_application_gateway_public_ip.example.name
frontend_port_name = azurerm_application_gateway_frontend_port.example.name
protocol = "Https"
ssl_certificate_name = azurerm_key_vault_certificate.ssl_cert.name
host_names = [each.key]
}
how to retrieve the certificate using the data source, consider the following code:
data "azurerm_key_vault_certificate" "star-company-com" {
name = "star-company-com"
key_vault_id = "/subscriptions/<subscription-id>/resourceGroups/<resource-group>/providers/Microsoft.KeyVault/vaults/<key-vault-name>"
}
resource "azurerm_application_gateway" "example" {
# ...
frontend_port {
name = "https"
port = 443
}
frontend_ip_configuration {
name = "PublicIPAddress"
public_ip_address_id = azurerm_public_ip.example.id
private_ip_address_allocation = "Dynamic"
}
ssl_certificate {
name = "star-company-com"
data = data.azurerm_key_vault_certificate.star-company-com.certificate_data
password = "MyCertPassword"
}
http_listener {
name = "listener1"
frontend_ip_configuration_name = azurerm_application_gateway.example.frontend_ip_configuration[0].name
frontend_port_name = azurerm_application_gateway.example.frontend_port[0].name
protocol = "Https"
ssl_certificate_name = "star-company-com"
}
# ...
}
I'm facing some issues while dealing with certificates in terraform.
Before writing the code below, i've already made a CSR request.
I need to say that certificate_pem and private_key are both encoded in base64, particularly private_key is encrypted.
In the code below, i would like to use private_key and certificate_pem.
resource "kubernetes_secret" "my-secret" {
data = {
"tls.crt" = data.my_data.my-configuration-secret.data["certificate_pem"]
"tls.key" = data.my_data.my-configuration-secret.data["private_key"]
}
metadata {
name = "my-secret"
namespace = "my-namespace"
}
}
Now, in the Ingress ressource, i use this secret name
resource "kubernetes_ingress" "my-sni" {
metadata {
name = "my-sni"
namespace = "my_namespace"
annotations = {
"kubernetes.io/ingress.class" = "my_namespace"
"kubernetes.io/ingress.allow-http" = "true"
"nginx.ingress.kubernetes.io/ssl-redirect" = "false"
"nginx.ingress.kubernetes.io/force-ssl-redirect" = "false"
"nginx.ingress.kubernetes.io/ssl-passthrough" = "false"
"nginx.ingress.kubernetes.io/secure-backends" = "false"
"nginx.ingress.kubernetes.io/proxy-body-size" = "0"
"nginx.ingress.kubernetes.io/proxy-read-timeout" = "3600000"
"nginx.ingress.kubernetes.io/rewrite-target" = "/$1"
"nginx.ingress.kubernetes.io/proxy-send-timeout" = "400000"
"nginx.ingress.kubernetes.io/backend-protocol" = "HTTP"
}
}
spec {
tls {
hosts = ["my_host"]
secret_name = "my-secret"
}
rule {
host = "my_host"
http {
path {
path = "/?(.*)"
backend {
service_name = "my-service"
service_port = 8080
}
}
}
}
}
}
Everything is fine with terraform apply, but i can't go on the host to check if i can access to the microservice.
Someone told me i've to uncypher the private_key.
I don't know how to do that
I am trying to setup electron auto-updater with amazon s3 bucket. I don't get any errors but when I publish a new version, the auto updater doesn't show any updates on the app screen. But the latest version that has been published shows up in amazon s3 bucket. Below shows how its added:
require('dotenv').config({path: __dirname + '/.env'});
const aws4 = require('aws4');
const pkg = require('./package.json');
const {app, BrowserWindow, Menu, protocol, ipcMain} = require('electron');
const log = require('electron-log');
const {autoUpdater} = require("electron-updater");
autoUpdater.on('checking-for-update', () => {
alert('checking')
console.log('checking for updates')
const opts = {
service: 's3',
region: pkg.build.publish.region,
method: 'GET',
host: `s3-${pkg.build.publish.region}.amazonaws.com`,
path: path.join('/', pkg.build.publish.bucket, latest_yml_path)
};
aws4.sign(opts, {
accessKeyId: 'access key',
secretAccessKey: 'secret access key'
});
// signer.sign(opts); --remove this line --
autoUpdater.requestHeaders = opts.headers
document.getElementById('messages').innerText = "checking for updates"
sendStatusToWindow('Checking for update...');
})
autoUpdater.on('update-available', (info) => {
alert('update available')
sendStatusToWindow('Update available.');
})
autoUpdater.on('update-not-available', (info) => {
sendStatusToWindow('Update not available.');
})
autoUpdater.on('error', (err) => {
sendStatusToWindow('Error in auto-updater. ' + err);
})
autoUpdater.on('download-progress', (progressObj) => {
let log_message = "Download speed: " + progressObj.bytesPerSecond;
log_message = log_message + ' - Downloaded ' + progressObj.percent + '%';
log_message = log_message + ' (' + progressObj.transferred + "/" + progressObj.total + ')';
sendStatusToWindow(log_message);
})
autoUpdater.on('update-downloaded', (info) => {
sendStatusToWindow('Update downloaded');
});
app.on('ready', function() {
// Create the Menu
const menu = Menu.buildFromTemplate(template);
Menu.setApplicationMenu(menu);
console.log('ready')
createDefaultWindow();
autoUpdater.checkForUpdatesAndNotify();
});
No error shows up, but no messages show up too. Where is it possibly going wrong?
I suggest some tips to you.
check opts.headers' that header is correct. (If request url from aws4 doesn't correct, it is not work.)
setFeedURL after autoUpdater header setting.
when download update package, you change autoUpdater header(and setFeedUrl) for updater package path. Because when checking for updates, you set header about yaml file path to autoUpdater.
The code I use currently on my website
var client = null;
var device_is_on = null;
var hostname = "********";
var port = "8003";
var clientId = "mqtt_js_" + parseInt(Math.random() * 100000, 10);
var device_topic = "stat/Device_001/POWER";
var status_topic = "cmnd/Device_001/power";
function connect(){
client = new Paho.MQTT.Client(hostname, Number(port), clientId);
client.onConnectionLost = onConnectionLost;
client.onMessageArrived = onMessageArrived;
var options = {
useSSL: true,
userName : "***",
password : "********",
onSuccess: onConnect,
onFailure: onFail
};
client.connect(options);
}
function onConnect(context) {
options = {qos:0}
client.subscribe(device_topic, options);
client.subscribe(status_topic, options);
var payloadd = "6";
message = new Paho.MQTT.Message(payloadd);
message.destinationName = status_topic;
message.retained = true;
client.send(message);
}
function onFail(context) {
}
function onConnectionLost(responseObject) {
if (responseObject.errorCode !== 0) {
window.alert("Connection Lost!\nPlease Refresh.");
}
}
function onMessageArrived(message) {
if (message.destinationName == device_topic){
var temperature_heading = document.getElementById("device_display");
temperature_heading.innerHTML = "Air Conditioner: " + message.payloadString;
if (message.payloadString == "ON" || message.payloadString == "o"){
device_is_on = true;
} else {
device_is_on = false;
}
}
}
function device_toggle(){
if (device_is_on){
var payload = "off";
device_is_on = false;
} else {
var payload = "on";
device_is_on = true;
}
message = new Paho.MQTT.Message(payload);
message.destinationName = status_topic;
message.retained = true;
client.send(message);
}
What should I put under the "" var options "" section? currently I am getting the error ERR_CERT_AUTHORITY_INVALID in the console of Google Chrome.
Note 1: This code functions perfectly over http but I am converting to https.
Note 2: I use Mosquitto as my MQTT broker.
Help in much appreciated.
It looks like you are using a self signed certificate. This will not be trusted by your browser so it will not connect, raising the error you have shown.
You have 2 options:
Import the certificate into your browser and mark it as trusted (how you do this will vary depending on what browser you are using). This is only really useful for testing/development because normal users should not be importing random certificates as this opens them up to all kinds of security problems.
Get a real trusted certificate for your website and broker. The simplest/cheapest way to do this will be to use letsencrypt. You can then configure mosquitto to use this certificate.
TLS javascript paho client is available: Github paho.mqtt.javascript/issues/88
Trying to use socketcluster to exchange events between browser windows.
On the sender side I have :
var options = {
hostname: "myserver.com",
secure: true,
port: 443,
connectTimeout: 86400000,
autoReconnectOptions: {
initialDelay: 100, //milliseconds
randomness: 10, //milliseconds
multiplier: 1.5, //decimal
maxDelay: 60000 //milliseconds
}
};
// Initiate the connection to the server
var socket = socketCluster.connect(options);
socket.on('connect', function () {
console.log('CONNECTED');
});
function sendTime() {
var currentDate = new Date();
var theId = document.getElementById("object_id").value;
count++;
console.log("id "+theId);
socket.emit('identity1', { timestamp: currentDate, id: theId, counter:count});
}
Then on the server I have the worker publish a new event :
socket.on('identity1', function (data) {
count++;
console.log('Handled identity1', data);
scServer.exchange.publish('identity-' + data.id, data);
});
And on the receiver side I have :
// Initiate the connection to the server
var socket = socketCluster.connect(options);
socket.on('connect', function () {
console.log('CONNECTED');
identityChannel = socket.subscribe('identity-' + document.getElementById("object_id").value);
identityChannel.watch(function (data) {
var theTime=data.timestamp;
console.log('ID:' + data.id + ' TIME: ' + theTime);
document.getElementById("data2").innerHTML = 'TIME: ' + theTime + 'COUNTER : ' + data.counter ;
});
});
In the js console of Chrome I see that after 10s on both sides, the client connection is refused like that :
socketcluster.js:678 Uncaught SocketProtocolError {name: "SocketProtocolError", message: "Socket hung up", code: 1006, stack: "SocketProtocolError: Socket hung up↵ at SCSocke…myserver.com/socketcluster.js:1392:10)"}
(anonymous) # socketcluster.js:678
setTimeout (async)
SCSocket._onSCError # socketcluster.js:676
SCSocket._onSCClose # socketcluster.js:781
(anonymous) # socketcluster.js:426
Emitter.emit # socketcluster.js:4152
SCTransport._onClose # socketcluster.js:1494
wsSocket.onclose # socketcluster.js:1392
sender.html:26 CONNECTED
I see that when reconnecting some events are lost.
Q : is this normal ?
Q : can the 10s limit be tuned ?
Actually you have to set the GCP load balancer connection timeout other than the default value of 10s.