How to deploy an Azure IoT Hub storage account archiving in bicep? - azure-iot-hub

I try to use Azure Resource Manager and bicep to deploy an IoT Hub and a storage account. IoT Hub has the feature to store all messages in an storage account for archiving purpose. The IoT Hub should access the storage account with an User-assigned Managed Identity.
I would like to deploy all these things in a single ARM deployment written in bicep. The problem is deploying the IoT Hub with a User-assigned Identity and setting up the archive custom route. I get the error:
{
"code": "DeploymentFailed",
"message": "At least one resource deployment operation failed. Please list deployment operations for details. Please see https://aka.ms/DeployOperations for usage details.",
"details": [
{
"code": "400140",
"message": "endpointName:messageArchive, exceptionMessage:Invalid operation: Managed identity is not enabled for IotHub ... errorcode: IH400140."
}
]
}
My bicep file looks like this
resource messageArchive 'Microsoft.Storage/storageAccounts#2021-04-01' = {
name: 'messagearchive4631'
location: resourceGroup().location
kind: 'StorageV2'
sku: {
name: 'Standard_GRS'
}
properties: {
accessTier: 'Hot'
supportsHttpsTrafficOnly: true
}
}
resource messageArchiveBlobService 'Microsoft.Storage/storageAccounts/blobServices#2021-04-01' = {
name: 'default'
parent: messageArchive
resource messageArchiveContainer 'containers#2021-02-01' = {
name: 'iot-test-4631-container'
properties: {
publicAccess: 'None'
}
}
}
resource iotIdentity 'Microsoft.ManagedIdentity/userAssignedIdentities#2018-11-30' = {
name: 'iot-test-access-archive-4631'
location: resourceGroup().location
}
resource iotAccesToStorage 'Microsoft.Authorization/roleAssignments#2020-08-01-preview' = {
name: guid(extensionResourceId(messageArchive.id, messageArchive.type, 'iot-test-access-archive-4631'))
scope: messageArchive
properties: {
roleDefinitionId: '/subscriptions/${subscription().subscriptionId}/providers/Microsoft.Authorization/roleDefinitions/ba92f5b4-2d11-453d-a403-e96b0029c9fe'
principalId: iotIdentity.properties.principalId
description: 'Allow acces for IoT Hub'
}
}
resource iothub 'Microsoft.Devices/IotHubs#2021-03-31' = {
name: 'iot-test-4631'
location: resourceGroup().location
sku: {
name: 'B1'
capacity: 1
}
identity: {
type: 'UserAssigned'
userAssignedIdentities:{
'${iotIdentity.id}': {}
}
}
dependsOn:[
iotAccesToStorage
]
properties: {
features: 'None'
eventHubEndpoints: {
events: {
retentionTimeInDays: 1
partitionCount: 4
}
}
routing: {
endpoints: {
storageContainers: [
{
name: 'messageArchive'
endpointUri: 'https://messagearchive4631.blob.core.windows.net/'
containerName: 'iot-test-4631-container'
batchFrequencyInSeconds: 100
maxChunkSizeInBytes: 104857600
encoding: 'Avro'
fileNameFormat: '{iothub}/{YYYY}/{MM}/{DD}/{HH}/{mm}_{partition}.avro'
authenticationType: 'identityBased'
}
]
}
routes: [
{
name: 'EventHub'
source: 'DeviceMessages'
endpointNames: [
'events'
]
isEnabled: true
}
{
name: 'messageArchiveRoute'
source: 'DeviceMessages'
endpointNames: [
'messageArchive'
]
isEnabled: true
}
]
fallbackRoute: {
source: 'DeviceMessages'
endpointNames: [
'events'
]
isEnabled: true
}
}
}
}
I tried removing the message routing block in IoT Hub
endpoints: {
storageContainers: [
{
name: 'messageArchive'
endpointUri: 'https://messagearchive4631.blob.core.windows.net/'
containerName: 'iot-test-4631-container'
batchFrequencyInSeconds: 100
maxChunkSizeInBytes: 104857600
encoding: 'Avro'
fileNameFormat: '{iothub}/{YYYY}/{MM}/{DD}/{HH}/{mm}_{partition}.avro'
authenticationType: 'identityBased'
}
]
}
and deploy it one time. This deployment works. If I then include the message routing block and deploy it again, then it works as expected.
Is it possible to do this in a single deployment?

I figured it out by myself. I'm using a user-assigned Managed Identity and therfore I was missing this in IoT Hub endpoint storage container configuration:
authenticationType: 'identityBased'
identity: {
userAssignedIdentity: iotIdentity.id
}
The complete IoT Hub endpoint configuration looks like this
endpoints: {
storageContainers: [
{
name: 'RawDataStore'
endpointUri: 'https://${nameRawDataStore}.blob.${environment().suffixes.storage}/'
containerName: nameIotHub
batchFrequencyInSeconds: 100
maxChunkSizeInBytes: 104857600
encoding: 'Avro'
fileNameFormat: '{iothub}/{YYYY}/{MM}/{DD}/{HH}/{mm}_{partition}.avro'
authenticationType: 'identityBased'
identity: {
userAssignedIdentity: iotIdentity.id
}
}
]
}

Related

Which class in AWS CDK have option to configure Dynamic partitioning for Kinesis delivery stream

I'm using kinesis delivery stream to send stream, from event bridge to s3 bucket. But i can't seem to find which class have the option to configure dynamic partitioning?
this is my code for delivery stream:
new CfnDeliveryStream(this, `Export-delivery-stream`, {
s3DestinationConfiguration: {
bucketArn: bucket.bucketArn,
roleArn: kinesisFirehoseRole.roleArn,
prefix: `test/!{timestamp:yyyy/MM/dd}/`
}
});
I have been working on the same issue for a few days, and have finally gotten something to work. Here is an example of how it can be implemented in CDK. In short, the partitioning has to be enables as you have done, but you need to set the key and .jq expression in the so-called processingConfiguration.
Our incomming json data looks something like this:
{
"data":
{
"timestamp":1633521266990,
"defaultTopic":"Topic",
"data":
{
"OUT1":"Inactive",
"Current_mA":3.92
}
}
}
The CDK code looks as following:
const DeliveryStream = new CfnDeliveryStream(this, 'deliverystream', {
deliveryStreamName: 'deliverystream',
extendedS3DestinationConfiguration: {
cloudWatchLoggingOptions: {
enabled: true,
},
bucketArn: Bucket.bucketArn,
roleArn: deliveryStreamRole.roleArn,
prefix: 'defaultTopic=!{partitionKeyFromQuery:defaultTopic}/!{timestamp:yyyy/MM/dd}/',
errorOutputPrefix: 'error/!{firehose:error-output-type}/',
bufferingHints: {
intervalInSeconds: 60,
},
dynamicPartitioningConfiguration: {
enabled: true,
},
processingConfiguration: {
enabled: true,
processors: [
{
type: 'MetadataExtraction',
parameters: [
{
parameterName: 'MetadataExtractionQuery',
parameterValue: '{defaultTopic: .data.defaultTopic}',
},
{
parameterName: 'JsonParsingEngine',
parameterValue: 'JQ-1.6',
},
],
},
{
type: 'AppendDelimiterToRecord',
parameters: [
{
parameterName: 'Delimiter',
parameterValue: '\\n',
},
],
},
],
},
},
})

Fastify and json schema validation

I'm trying to learn fastify throught the official documentation. I'm really intrested in the validation of an incoming post request with a json schema. Following the instructions i added to my routes:
fastify.addSchema({
$id: 'http://example.com/',
type: 'object',
properties: {
hello: { type: 'string' }
}
})
fastify.post('/', {
handler() { },
schema: {
body: {
type: 'array',
items: { $ref: 'http://example.com#/properties/hello' }
}
}
})
Now the problem is that I can not write a json that can be accepted by this schema. From my basic understanding a simple post request like the following should be accepted
[
{
"hello": "bye"
},
{
"hello": "bye bye"
}
]
However server keeps telling me that body[0] should be string. Where am I wrong?
The reference $ref: 'http://example.com#/properties/hello' points to the hello property schema value, which is { type: 'string' }.
This means the schema in fastify.post('/', { expects the body to be an array of strings.

Installing Rabbitmq using helm3 from bitnami throws chart.metadata is required

I am trying to install rabbitmq:8.6.1 from bitnami chart repository using terraform:0.12.18.
My helm version is 3.4.2
while installing I am getting following error
Error: validation: chart.metadata is required
My terraform file is as below
resource "kubernetes_secret" "rabbitmq_load_definition" {
metadata {
name = "rabbitmq-load-definition"
namespace = kubernetes_namespace.kylas_sales.metadata[0].name
}
type = "Opaque"
data = {
"load_definition.json" = jsonencode({
"users": [
{
name: "sales",
tags: "administrator",
password: var.rabbitmq_password
}
],
"vhosts": [
{
name: "/"
}
],
"permissions": [
{
user: "sales",
vhost: "/",
configure: ".*",
write: ".*",
read: ".*"
}
],
"exchanges": [
{
name: "ex.iam",
vhost: "/",
type: "topic",
durable: true,
auto_delete: false,
internal: false,
arguments: {}
}
]
})
}
}
resource "helm_release" "rabbitmq" {
chart = "rabbitmq"
name = "rabbitmq"
version = "8.6.1"
timeout = 600
repository = "https://charts.bitnami.com/bitnami"
namespace = "sales"
depends_on = [
kubernetes_secret.rabbitmq_load_definition
]
}
After looking issue(509) at terraform-provider-helm,
If your module/subdirectory name is same as your chart name (In my case directory name is rabbitmq and my helm_resource name is also same rabbitmq), so I am getting this error, still not able to identify why, With reference to,
Solution: I change my directory name from rabbitmq to rabbitmq-resource and this error is gone.

How to set function.events.sns.arn in a serverless.ts file?

I am setting up a new Serverless TypeScript monorepo, using the aws-nodejs-typescript template. The template produces a serverless.ts file for configuration.
Adding an AWS SNS event listener for an existing SNS Topic is supported:
https://www.serverless.com/framework/docs/providers/aws/events/sns/
However, there is no corresponding attribute in the type definitions for the sns event:
TS2322: Type ‘{ arn: string; }’ is not assignable to type ‘Sns’. Object literal may only specify known properties, and ‘arn’ does not exist in type ‘Sns’.
awsProvider.d.ts(487, 9): The expected type comes from property ‘sns’ which is declared here on type ‘Event’
How can I configure an event for an existing SNS Topic using serverless.ts?
The answer is .. type coercion! Had to downcast to unknown, then upcast to the Sns event type. Here's a minimal serverless.ts from my setup:
import type { Serverless, ApiGateway, Sns } from 'serverless/aws';
const serverlessConfiguration: Serverless = {
service: {
name: 'foo',
},
frameworkVersion: '2',
custom: {
webpack: {
webpackConfig: './webpack.config.js',
packager: 'yarn',
includeModules: true,
},
},
package: {
individually: true,
},
plugins: ['serverless-webpack', 'serverless-jest-plugin'],
provider: {
name: 'aws',
runtime: 'nodejs12.x',
region: 'us-west-2',
stage: "${opt:stage, 'dev'}",
apiGateway: {
minimumCompressionSize: 1024,
shouldStartNameWithService: true,
} as ApiGateway,
environment: {
AWS_NODEJS_CONNECTION_REUSE_ENABLED: '1',
},
},
functions: {
bar: {
handler: 'src/baz.handler',
timeout: 900,
reservedConcurrency: 1,
events: [
{
sns: ({
arn:
'${cf:another-stack.TopicArn}',
} as unknown) as Sns,
},
],
},
},
};
module.exports = serverlessConfiguration;

logging in hapijs server with good-http plugin issue

I'm using good plugin for my app, and I have copied the config parameters straight from the sample in the page, console and file are working , but good-http is not working!
here is my configuration:
myHTTPReporter: [
{
module: 'good-squeeze',
name: 'Squeeze',
args: [{error: '*', log: 'error', request: 'error'}]
},
{
module: 'good-http',
args: [
'http://localhost/log-request/index.php?test=23424', // <- test file I created to see if data is being sent or not
{
wreck: {
headers: {'x-api-key': 12345}
}
}
]
}
]
the only argument that is actually working is the ops: * and none of the error: *, ... is working
Am I missing something in my config parameters?
Maybe you should change the threshold parameter of the plugin, is set by default to 20 so it only send data after 20 events. If you want immediate results yo have to change it to 0.
myHTTPReporter: [
{
module: 'good-squeeze',
name: 'Squeeze',
args: [{error: '*', log: 'error', request: 'error'}]
},
{
module: 'good-http',
args: [
'http://localhost/log-request/index.php?test=23424', // <- test file I created to see if data is being sent or not
{
threshold: 0,
wreck: {
headers: {'x-api-key': 12345}
}
}
]
}
]