i'm studying kafka with schema registry.
i create schema with topic users-value to schema registry directly.
and than i use avro plugin(davidmc24) to convert.
servlet.service() for servlet [dispatcherServlet] in context with path [] threw exception [Request processing failed; nested exception is org.apache.kafka.common.errors.SerializationException: Error retrieving Avro schema{"type":"record","name":"User","namespace":"io.confluent.developer","fields":[{"name":"name","type":{"type":"string","avro.java.string":"String"},"avro.java.string":"String"},{"name":"age","type":"int"}]}] with root cause error occured.
i don't know what is the prolem.
can anyone help me please?
below is my code.
schema
it is also in src/main/avro/sample.avsc
{
"namespace": "io.confluent.developer",
"type": "record",
"name": "User",
"fields": [
{
"name": "name",
"type": "string",
"avro.java.string": "String"
},
{
"name": "age",
"type": "int"
}
]
}
application
#SpringBootApplication()
class Application
fun main(args: Array<String>) {
runApplication<Application>(*args)
}
consumer
#Service
class Consumer(
private val logger: Logger = LoggerFactory.getLogger(Producer::class.java)
) {
#KafkaListener(topics = ["users"], groupId = "group_id")
#Throws(IOException::class)
fun consume(message: ConsumerRecord<String, User>?) {
if (message != null) {
logger.info(String.format("#### -> Consumed message -> %s", message.value()))
}
}
}
controller
#RestController
#RequestMapping(value = ["/kafka"])
class KafkaController #Autowired internal constructor(private val producer: Producer) {
#PostMapping(value = ["/publish"])
fun sendMessageToKafkaTopic(#RequestParam("name") name: String, #RequestParam("age") age: Int) {
producer.sendMessage(User(name, age))
}
}
producer
#Service
class Producer (
private val kafkaTemplate: KafkaTemplate<String, User>? = null,
#Value("\${topic.name}")
private val TOPIC: String? = null
) {
fun sendMessage(user: User) {
this.kafkaTemplate?.send(this.TOPIC!!, user.getName(), user)
logger.info(String.format("Produced user -> %s", user))
}
companion object {
private val logger: Logger = LoggerFactory.getLogger(Producer::class.java)
}
}
application.yml
server:
port: 9000
spring:
kafka:
bootstrap-servers:
- localhost:9092
properties:
schema.registry.url: http://localhost:8085
auto:
register:
schemas: false
consumer:
group-id: group_id
auto-offset-reset: earliest
key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
value-deserializer: io.confluent.kafka.serializers.KafkaAvroDeserializer
producer:
key-serializer: org.apache.kafka.common.serialization.StringSerializer
value-serializer: io.confluent.kafka.serializers.KafkaAvroSerializer
topic:
name: users
build.gradle
buildscript {
repositories {
maven {
url "https://plugins.gradle.org/m2/"
}
maven {
url "https://packages.confluent.io/maven/"
}
maven {
url = uri("https://jitpack.io")
}
}
}
plugins {
id "com.github.davidmc24.gradle.plugin.avro" version "1.2.0"
id "com.github.imflog.kafka-schema-registry-gradle-plugin" version "1.6.0"
}
apply plugin: 'kotlin-jpa'
apply plugin: 'org.springframework.boot'
apply plugin: 'kotlin-allopen'
apply plugin: "com.github.imflog.kafka-schema-registry-gradle-plugin"
group = 'com.example.exercise'
version = '0.0.1-alpha.kafka'
configurations {
developmentOnly
runtimeClasspath {
extendsFrom developmentOnly
}
}
processResources {
def props = ['version': project.properties.get('version')]
expand(props)
}
allOpen {
annotation 'javax.persistence.Entity'
}
ext {
avroVersion = "1.11.0"
}
dependencies {
implementation 'org.springframework.boot:spring-boot-starter-validation'
implementation 'org.springframework.kafka:spring-kafka:2.8.5'
implementation "org.apache.avro:avro:${avroVersion}"
implementation 'io.confluent:kafka-avro-serializer:7.1.1'
implementation 'org.springframework.boot:spring-boot-starter-web'
developmentOnly 'org.springframework.boot:spring-boot-devtools'
testImplementation 'org.springframework.boot:spring-boot-starter-test'
}
schemaRegistry {
url = 'http://registry-url:8085/'
quiet = true
}
docker-compose.yml
version: '2'
services:
kafka:
image: wurstmeister/kafka
container_name: kafka-exercise
ports:
- "9092:9092"
environment:
- KAFKA_ADVERTISED_HOST_NAME=127.0.0.1
- KAFKA_ADVERTISED_PORT=9092
- KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
- KAFKA_LISTENERS=PLAINTEXT://:29092,EXTERNAL://:9092
- KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://kafka:29092,EXTERNAL://localhost:9092
- KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=PLAINTEXT:PLAINTEXT,EXTERNAL:PLAINTEXT
- KAFKA_INTER_BROKER_LISTENER_NAME=PLAINTEXT
- KAFKA_SCHEMA_REGISTRY_URL=schemaregistry:8085
depends_on:
- zookeeper
zookeeper:
image: wurstmeister/zookeeper
container_name: zookeeper-exercise
ports:
- "2181:2181"
environment:
- KAFKA_ADVERTISED_HOST_NAME=zookeeper
schemaregistry:
image: confluentinc/cp-schema-registry:6.2.0
restart: always
container_name: registry-exercise
depends_on:
- zookeeper
environment:
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: "zookeeper:2181"
SCHEMA_REGISTRY_HOST_NAME: schemaregistry
SCHEMA_REGISTRY_LISTENERS: "http://0.0.0.0:8085"
ports:
- 8085:8085
kafka-ui:
image: provectuslabs/kafka-ui
container_name: kafka-ui-exercise
depends_on:
- kafka
- zookeeper
- schemaregistry
ports:
- "8083:8080"
restart: always
environment:
- KAFKA_CLUSTERS_0_NAME=local
- KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=kafka:29092
- KAFKA_CLUSTERS_0_ZOOKEEPER=zookeeper:2181
- KAFKA_CLUSTERS_0_SCHEMAREGISTRY=http://schemaregistry:8085
this is whole code that i use.
ps. if i comment out auto.register.schemas: false part, it run but creating new schema.
how to make it work, without commenting out auto.register.schemas: false?
i changed avro file.
the problem is avro.
{
"type": "record",
"name": "User",
"namespace": "io.confluent.developer",
"fields": [
{
"name": "name",
"type": {
"type": "string",
"avro.java.string": "String"
},
"avro.java.string": "String"
},
{
"name": "age",
"type": "int"
}
]
}
now it works!
Related
I have recently started using Ktor and got stuck at the very beginning itself.
I have a very simple response, which could have content like below -
{
"result": true,
"data": [
{
"Name": "Danish",
"Credit": "80"
},
{
"Name": "Kumar",
"Credit": "310"
}
]
}
Or it could be like this -
{
"result": false,
"data": [],
"message": "No data available, use default user",
"default": [
{
"Name": "Default User",
"Credit": "100"
}
]
}
And my response class is like -
#Serializable
data class UserResponse(
#SerialName("result") var result: Boolean? = null,
#SerialName("data") var data: ArrayList<User?>? = null,
#SerialName("message") var message: String? = null,
#SerialName("default") var default: ArrayList<User?>? = null
)
#Serializable
data class UserResponse(
#SerialName("Name") var name: String? = null,
#SerialName("Credit") var credit: String? = null,
)
io.ktor.client.call.NoTransformationFoundException: No transformation found: class io.ktor.utils.io.ByteBufferChannel
And I am getting NoTransformationFoundException, I think it could be due to data object being empty, but how to fix this?
According to this, we can catch this exception, but I can't use this as I need other data to be used.
Exception looks like you haven't install Json content negotiation plugin, when creating ktor client. It should be like this:
val httpClient = HttpClient {
install(ContentNegotiation) {
json()
}
}
Then you can use this client like this:
val response: UserResponse = httpClient.get("URL").body()
I'm using kinesis delivery stream to send stream, from event bridge to s3 bucket. But i can't seem to find which class have the option to configure dynamic partitioning?
this is my code for delivery stream:
new CfnDeliveryStream(this, `Export-delivery-stream`, {
s3DestinationConfiguration: {
bucketArn: bucket.bucketArn,
roleArn: kinesisFirehoseRole.roleArn,
prefix: `test/!{timestamp:yyyy/MM/dd}/`
}
});
I have been working on the same issue for a few days, and have finally gotten something to work. Here is an example of how it can be implemented in CDK. In short, the partitioning has to be enables as you have done, but you need to set the key and .jq expression in the so-called processingConfiguration.
Our incomming json data looks something like this:
{
"data":
{
"timestamp":1633521266990,
"defaultTopic":"Topic",
"data":
{
"OUT1":"Inactive",
"Current_mA":3.92
}
}
}
The CDK code looks as following:
const DeliveryStream = new CfnDeliveryStream(this, 'deliverystream', {
deliveryStreamName: 'deliverystream',
extendedS3DestinationConfiguration: {
cloudWatchLoggingOptions: {
enabled: true,
},
bucketArn: Bucket.bucketArn,
roleArn: deliveryStreamRole.roleArn,
prefix: 'defaultTopic=!{partitionKeyFromQuery:defaultTopic}/!{timestamp:yyyy/MM/dd}/',
errorOutputPrefix: 'error/!{firehose:error-output-type}/',
bufferingHints: {
intervalInSeconds: 60,
},
dynamicPartitioningConfiguration: {
enabled: true,
},
processingConfiguration: {
enabled: true,
processors: [
{
type: 'MetadataExtraction',
parameters: [
{
parameterName: 'MetadataExtractionQuery',
parameterValue: '{defaultTopic: .data.defaultTopic}',
},
{
parameterName: 'JsonParsingEngine',
parameterValue: 'JQ-1.6',
},
],
},
{
type: 'AppendDelimiterToRecord',
parameters: [
{
parameterName: 'Delimiter',
parameterValue: '\\n',
},
],
},
],
},
},
})
I try to use Azure Resource Manager and bicep to deploy an IoT Hub and a storage account. IoT Hub has the feature to store all messages in an storage account for archiving purpose. The IoT Hub should access the storage account with an User-assigned Managed Identity.
I would like to deploy all these things in a single ARM deployment written in bicep. The problem is deploying the IoT Hub with a User-assigned Identity and setting up the archive custom route. I get the error:
{
"code": "DeploymentFailed",
"message": "At least one resource deployment operation failed. Please list deployment operations for details. Please see https://aka.ms/DeployOperations for usage details.",
"details": [
{
"code": "400140",
"message": "endpointName:messageArchive, exceptionMessage:Invalid operation: Managed identity is not enabled for IotHub ... errorcode: IH400140."
}
]
}
My bicep file looks like this
resource messageArchive 'Microsoft.Storage/storageAccounts#2021-04-01' = {
name: 'messagearchive4631'
location: resourceGroup().location
kind: 'StorageV2'
sku: {
name: 'Standard_GRS'
}
properties: {
accessTier: 'Hot'
supportsHttpsTrafficOnly: true
}
}
resource messageArchiveBlobService 'Microsoft.Storage/storageAccounts/blobServices#2021-04-01' = {
name: 'default'
parent: messageArchive
resource messageArchiveContainer 'containers#2021-02-01' = {
name: 'iot-test-4631-container'
properties: {
publicAccess: 'None'
}
}
}
resource iotIdentity 'Microsoft.ManagedIdentity/userAssignedIdentities#2018-11-30' = {
name: 'iot-test-access-archive-4631'
location: resourceGroup().location
}
resource iotAccesToStorage 'Microsoft.Authorization/roleAssignments#2020-08-01-preview' = {
name: guid(extensionResourceId(messageArchive.id, messageArchive.type, 'iot-test-access-archive-4631'))
scope: messageArchive
properties: {
roleDefinitionId: '/subscriptions/${subscription().subscriptionId}/providers/Microsoft.Authorization/roleDefinitions/ba92f5b4-2d11-453d-a403-e96b0029c9fe'
principalId: iotIdentity.properties.principalId
description: 'Allow acces for IoT Hub'
}
}
resource iothub 'Microsoft.Devices/IotHubs#2021-03-31' = {
name: 'iot-test-4631'
location: resourceGroup().location
sku: {
name: 'B1'
capacity: 1
}
identity: {
type: 'UserAssigned'
userAssignedIdentities:{
'${iotIdentity.id}': {}
}
}
dependsOn:[
iotAccesToStorage
]
properties: {
features: 'None'
eventHubEndpoints: {
events: {
retentionTimeInDays: 1
partitionCount: 4
}
}
routing: {
endpoints: {
storageContainers: [
{
name: 'messageArchive'
endpointUri: 'https://messagearchive4631.blob.core.windows.net/'
containerName: 'iot-test-4631-container'
batchFrequencyInSeconds: 100
maxChunkSizeInBytes: 104857600
encoding: 'Avro'
fileNameFormat: '{iothub}/{YYYY}/{MM}/{DD}/{HH}/{mm}_{partition}.avro'
authenticationType: 'identityBased'
}
]
}
routes: [
{
name: 'EventHub'
source: 'DeviceMessages'
endpointNames: [
'events'
]
isEnabled: true
}
{
name: 'messageArchiveRoute'
source: 'DeviceMessages'
endpointNames: [
'messageArchive'
]
isEnabled: true
}
]
fallbackRoute: {
source: 'DeviceMessages'
endpointNames: [
'events'
]
isEnabled: true
}
}
}
}
I tried removing the message routing block in IoT Hub
endpoints: {
storageContainers: [
{
name: 'messageArchive'
endpointUri: 'https://messagearchive4631.blob.core.windows.net/'
containerName: 'iot-test-4631-container'
batchFrequencyInSeconds: 100
maxChunkSizeInBytes: 104857600
encoding: 'Avro'
fileNameFormat: '{iothub}/{YYYY}/{MM}/{DD}/{HH}/{mm}_{partition}.avro'
authenticationType: 'identityBased'
}
]
}
and deploy it one time. This deployment works. If I then include the message routing block and deploy it again, then it works as expected.
Is it possible to do this in a single deployment?
I figured it out by myself. I'm using a user-assigned Managed Identity and therfore I was missing this in IoT Hub endpoint storage container configuration:
authenticationType: 'identityBased'
identity: {
userAssignedIdentity: iotIdentity.id
}
The complete IoT Hub endpoint configuration looks like this
endpoints: {
storageContainers: [
{
name: 'RawDataStore'
endpointUri: 'https://${nameRawDataStore}.blob.${environment().suffixes.storage}/'
containerName: nameIotHub
batchFrequencyInSeconds: 100
maxChunkSizeInBytes: 104857600
encoding: 'Avro'
fileNameFormat: '{iothub}/{YYYY}/{MM}/{DD}/{HH}/{mm}_{partition}.avro'
authenticationType: 'identityBased'
identity: {
userAssignedIdentity: iotIdentity.id
}
}
]
}
I am trying to install rabbitmq:8.6.1 from bitnami chart repository using terraform:0.12.18.
My helm version is 3.4.2
while installing I am getting following error
Error: validation: chart.metadata is required
My terraform file is as below
resource "kubernetes_secret" "rabbitmq_load_definition" {
metadata {
name = "rabbitmq-load-definition"
namespace = kubernetes_namespace.kylas_sales.metadata[0].name
}
type = "Opaque"
data = {
"load_definition.json" = jsonencode({
"users": [
{
name: "sales",
tags: "administrator",
password: var.rabbitmq_password
}
],
"vhosts": [
{
name: "/"
}
],
"permissions": [
{
user: "sales",
vhost: "/",
configure: ".*",
write: ".*",
read: ".*"
}
],
"exchanges": [
{
name: "ex.iam",
vhost: "/",
type: "topic",
durable: true,
auto_delete: false,
internal: false,
arguments: {}
}
]
})
}
}
resource "helm_release" "rabbitmq" {
chart = "rabbitmq"
name = "rabbitmq"
version = "8.6.1"
timeout = 600
repository = "https://charts.bitnami.com/bitnami"
namespace = "sales"
depends_on = [
kubernetes_secret.rabbitmq_load_definition
]
}
After looking issue(509) at terraform-provider-helm,
If your module/subdirectory name is same as your chart name (In my case directory name is rabbitmq and my helm_resource name is also same rabbitmq), so I am getting this error, still not able to identify why, With reference to,
Solution: I change my directory name from rabbitmq to rabbitmq-resource and this error is gone.
I have a RabbitMQ queue that is declared with the following options:
{
"queue": "events/online",
"durable": true,
"args": {
"x-max-priority": 10
}
}
I am trying to connect to the queue from Node-RED, using the node-red-contrib-amqp plugin, with the following Topology that is set under the connection source:
{
"queues": [
{
"name": "events/online",
"durable": true,
"options": {
"x-max-priority": 10
}
}
]
}
I am getting the following error:
"AMQP input node disconnect error: Operation failed: QueueDeclare; 406
(PRECONDITION-FAILED) with message "PRECONDITION_FAILED - inequivalent
arg 'x-max-priority' for queue 'myqueue' in vhost 'vhost': received
none but current is the value '10' of type 'signedint'""
Turns out the answer is as follows.
Make sure the following checkbox is selected: "use AMQP topology definition (JSON) defined below"
{
"queues": [
{
"name": "events/online",
"durable": true,
"options": {
"maxPriority": 10
}
}
]
}