ActiveMQ Artemis replication colocated not working - replication

I have a cluster of ActiveMQ Artemis and config them to replication colocated over network.
Here is two configs of them:
Broker1
<?xml version='1.0'?>
<configuration xmlns="urn:activemq"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xi="http://www.w3.org/2001/XInclude"
xsi:schemaLocation="urn:activemq /schema/artemis-configuration.xsd">
<core xmlns="urn:activemq:core" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="urn:activemq:core ">
<name>10.1.1.130</name>
<persistence-enabled>true</persistence-enabled>
<!-- this could be ASYNCIO, MAPPED, NIO
ASYNCIO: Linux Libaio
MAPPED: mmap files
NIO: Plain Java Files
-->
<journal-type>ASYNCIO</journal-type>
<paging-directory>data/paging</paging-directory>
<bindings-directory>data/bindings</bindings-directory>
<journal-directory>data/journal</journal-directory>
<large-messages-directory>data/large-messages</large-messages-directory>
<journal-datasync>true</journal-datasync>
<journal-min-files>2</journal-min-files>
<journal-pool-files>10</journal-pool-files>
<journal-device-block-size>4096</journal-device-block-size>
<journal-file-size>10M</journal-file-size>
<journal-buffer-timeout>23940000</journal-buffer-timeout>
<journal-max-io>1</journal-max-io>
<!-- how often we are looking for how many bytes are being used on the disk in ms -->
<disk-scan-period>5000</disk-scan-period>
<!-- once the disk hits this limit the system will block, or close the connection in certain protocols
that won't support flow control. -->
<max-disk-usage>90</max-disk-usage>
<!-- should the broker detect dead locks and other issues -->
<critical-analyzer>true</critical-analyzer>
<critical-analyzer-timeout>120000</critical-analyzer-timeout>
<critical-analyzer-check-period>60000</critical-analyzer-check-period>
<critical-analyzer-policy>HALT</critical-analyzer-policy>
<!-- Clustering configuration -->
<connectors>
<connector name="netty-connector">tcp://localhost:61616</connector>
<!-- connector to the server2 -->
<connector name="server2-connector">tcp://10.1.1.131:61616</connector>
</connectors>
<ha-policy>
<replication>
<colocated>
<backup-request-retries>-1</backup-request-retries>
<backup-request-retry-interval>2000</backup-request-retry-interval>
<excludes>
<connector-ref>server2-connector</connector-ref>
<connector-ref>netty-connector</connector-ref>
</excludes>
<max-backups>1</max-backups>
<request-backup>true</request-backup>
<master>
</master>
<slave>
</slave>
</colocated>
</replication>
</ha-policy>
<cluster-user>ACTIVEMQ.CLUSTER.ADMIN.USER</cluster-user>
<cluster-password>123456</cluster-password>
<cluster-connections>
<cluster-connection name="my-cluster">
<connector-ref>netty-connector</connector-ref>
<retry-interval>500</retry-interval>
<use-duplicate-detection>true</use-duplicate-detection>
<message-load-balancing>STRICT</message-load-balancing>
<max-hops>1</max-hops>
<static-connectors>
<connector-ref>server2-connector</connector-ref>
</static-connectors>
</cluster-connection>
</cluster-connections>
<acceptors>
<!-- Acceptor for every supported protocol -->
<acceptor name="artemis">tcp://0.0.0.0:61616?tcpSendBufferSize=1048576;tcpReceiveBufferSize=1048576;protocols=CORE,AMQP,STOMP,HORNETQ,MQTT,OPENWIRE;useEpoll=true;amqpCredits=1000;amqpLowCredits=300</acceptor>
<!-- AMQP Acceptor. Listens on default AMQP port for AMQP traffic.-->
<acceptor name="amqp">tcp://0.0.0.0:5672?tcpSendBufferSize=1048576;tcpReceiveBufferSize=1048576;protocols=AMQP;useEpoll=true;amqpCredits=1000;amqpLowCredits=300</acceptor>
<!-- STOMP Acceptor. -->
<acceptor name="stomp">tcp://0.0.0.0:61613?tcpSendBufferSize=1048576;tcpReceiveBufferSize=1048576;protocols=STOMP;useEpoll=true</acceptor>
<!-- HornetQ Compatibility Acceptor. Enables HornetQ Core and STOMP for legacy HornetQ clients. -->
<acceptor name="hornetq">tcp://0.0.0.0:5445?anycastPrefix=jms.queue.;multicastPrefix=jms.topic.;protocols=HORNETQ,STOMP;useEpoll=true</acceptor>
<!-- MQTT Acceptor -->
<acceptor name="mqtt">tcp://0.0.0.0:4883?tcpSendBufferSize=1048576;tcpReceiveBufferSize=1048576;protocols=MQTT;useEpoll=true</acceptor>
</acceptors>
<security-settings>
<security-setting match="#">
<permission type="createNonDurableQueue" roles="amq"/>
<permission type="deleteNonDurableQueue" roles="amq"/>
<permission type="createDurableQueue" roles="amq"/>
<permission type="deleteDurableQueue" roles="amq"/>
<permission type="createAddress" roles="amq"/>
<permission type="deleteAddress" roles="amq"/>
<permission type="consume" roles="amq"/>
<permission type="browse" roles="amq"/>
<permission type="send" roles="amq"/>
<!-- we need this otherwise ./artemis data imp wouldn't work -->
<permission type="manage" roles="amq"/>
</security-setting>
</security-settings>
Broker2:
<?xml version='1.0'?>
<configuration xmlns="urn:activemq"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xi="http://www.w3.org/2001/XInclude"
xsi:schemaLocation="urn:activemq /schema/artemis-configuration.xsd">
<core xmlns="urn:activemq:core" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="urn:activemq:core ">
<name>10.1.1.131</name>
<persistence-enabled>true</persistence-enabled>
<!-- this could be ASYNCIO, MAPPED, NIO
ASYNCIO: Linux Libaio
MAPPED: mmap files
NIO: Plain Java Files
-->
<journal-type>ASYNCIO</journal-type>
<paging-directory>data/paging</paging-directory>
<bindings-directory>data/bindings</bindings-directory>
<journal-directory>data/journal</journal-directory>
<large-messages-directory>data/large-messages</large-messages-directory>
<journal-datasync>true</journal-datasync>
<journal-min-files>2</journal-min-files>
<journal-pool-files>10</journal-pool-files>
<journal-device-block-size>4096</journal-device-block-size>
<journal-file-size>10M</journal-file-size>
<journal-buffer-timeout>23940000</journal-buffer-timeout>
<journal-max-io>1</journal-max-io>
<!-- how often we are looking for how many bytes are being used on the disk in ms -->
<disk-scan-period>5000</disk-scan-period>
<!-- once the disk hits this limit the system will block, or close the connection in certain protocols
that won't support flow control. -->
<max-disk-usage>90</max-disk-usage>
<!-- should the broker detect dead locks and other issues -->
<critical-analyzer>true</critical-analyzer>
<critical-analyzer-timeout>120000</critical-analyzer-timeout>
<critical-analyzer-check-period>60000</critical-analyzer-check-period>
<critical-analyzer-policy>HALT</critical-analyzer-policy>
<!-- Clustering configuration -->
<connectors>
<connector name="netty-connector">tcp://localhost:61616</connector>
<!-- connector to the server1 -->
<connector name="server1-connector">tcp://10.1.1.130:61616</connector>
</connectors>
<ha-policy>
<replication>
<colocated>
<backup-request-retries>-1</backup-request-retries>
<backup-request-retry-interval>2000</backup-request-retry-interval>
<excludes>
<connector-ref>server1-connector</connector-ref>
<connector-ref>netty-connector</connector-ref>
</excludes>
<max-backups>1</max-backups>
<request-backup>true</request-backup>
<master>
</master>
<slave>
</slave>
</colocated>
</replication>
</ha-policy>
<cluster-user>ACTIVEMQ.CLUSTER.ADMIN.USER</cluster-user>
<cluster-password>123456</cluster-password>
<cluster-connections>
<cluster-connection name="my-cluster">
<connector-ref>netty-connector</connector-ref>
<retry-interval>500</retry-interval>
<use-duplicate-detection>true</use-duplicate-detection>
<message-load-balancing>STRICT</message-load-balancing>
<max-hops>1</max-hops>
<static-connectors>
<connector-ref>server1-connector</connector-ref>
</static-connectors>
</cluster-connection>
</cluster-connections>
<acceptors>
<!-- Acceptor for every supported protocol -->
<acceptor name="artemis">tcp://0.0.0.0:61616?tcpSendBufferSize=1048576;tcpReceiveBufferSize=1048576;protocols=CORE,AMQP,STOMP,HORNETQ,MQTT,OPENWIRE;useEpoll=true;amqpCredits=1000;amqpLowCredits=300</acceptor>
<!-- AMQP Acceptor. Listens on default AMQP port for AMQP traffic.-->
<acceptor name="amqp">tcp://0.0.0.0:5672?tcpSendBufferSize=1048576;tcpReceiveBufferSize=1048576;protocols=AMQP;useEpoll=true;amqpCredits=1000;amqpLowCredits=300</acceptor>
<!-- STOMP Acceptor. -->
<acceptor name="stomp">tcp://0.0.0.0:61613?tcpSendBufferSize=1048576;tcpReceiveBufferSize=1048576;protocols=STOMP;useEpoll=true</acceptor>
<!-- HornetQ Compatibility Acceptor. Enables HornetQ Core and STOMP for legacy HornetQ clients. -->
<acceptor name="hornetq">tcp://0.0.0.0:5445?anycastPrefix=jms.queue.;multicastPrefix=jms.topic.;protocols=HORNETQ,STOMP;useEpoll=true</acceptor>
<!-- MQTT Acceptor -->
<acceptor name="mqtt">tcp://0.0.0.0:4883?tcpSendBufferSize=1048576;tcpReceiveBufferSize=1048576;protocols=MQTT;useEpoll=true</acceptor>
</acceptors>
<security-settings>
<security-setting match="#">
<permission type="createNonDurableQueue" roles="amq"/>
<permission type="deleteNonDurableQueue" roles="amq"/>
<permission type="createDurableQueue" roles="amq"/>
<permission type="deleteDurableQueue" roles="amq"/>
<permission type="createAddress" roles="amq"/>
<permission type="deleteAddress" roles="amq"/>
<permission type="consume" roles="amq"/>
<permission type="browse" roles="amq"/>
<permission type="send" roles="amq"/>
<!-- we need this otherwise ./artemis data imp wouldn't work -->
<permission type="manage" roles="amq"/>
</security-setting>
</security-settings>
Here is Srping Boot java Code to send message:
#Configuration
public class ArtemisProducerConfig extends BaseObject {
#Value("${artemis.broker-url}")
private String brokerUrl;
#Bean
public ActiveMQConnectionFactory senderActiveMQConnectionFactory() {
return new ActiveMQConnectionFactory(brokerUrl);
}
#Bean
public CachingConnectionFactory cachingConnectionFactory() {
return new CachingConnectionFactory(senderActiveMQConnectionFactory());
}
#Bean
public JmsTemplate jmsTemplate() {
JmsTemplate template = new JmsTemplate(cachingConnectionFactory());
template.setExplicitQosEnabled(true);
template.setDeliveryPersistent(true);
return template;
}
}
jmsTemplate.convertAndSend("test.address::test.queue", inputData.getData(), new MessagePostProcessor() {
#Override
public Message postProcessMessage(Message message) throws JMSException {
// TODO Auto-generated method stub
message.setJMSCorrelationID(inputData.getCorrID());
return message;
}
});
The Messsage is send to Broker1 success, I can view this on the site http://10.1.1.130:8161.
But on Broker2 no message available now.I understand that the Messages must be backed up to Broker2 to meet HA.
Can someone help me an example to configure Artemis to replication colocated over network?
Thank you!

ActiveMQ Artemis uses a active/passive scheme to achieve high-availability. In the replicated use-case therefore an active, "live" broker has a passive, "slave" broker to which it replicates messages. In a replicated & colocated configuration each JVM actually has 2 brokers (i.e. the two brokers are colocated in the same JVM). One broker is live and the other broker serves as a backup for another broker in the cluster. You won't "see" the messages on the slave until the live broker fails at which point the slave will activate and become the master.
To confirm that replication is happening as expected you can check the log files on the master and the slave. The slave will have something like this first:
INFO [org.apache.activemq.artemis.core.server] AMQ221109: Apache ActiveMQ Artemis Backup Server version X.X.X [null] started, waiting live to fail before it gets active
Then the live will have something like this:
INFO [org.apache.activemq.artemis.core.server] AMQ221025: Replication: sending NIOSequentialFile /path/to/data/journal/activemq-data-2.amq (size=10,485,760) to replica.
INFO [org.apache.activemq.artemis.core.server] AMQ221025: Replication: sending NIOSequentialFile /path/to/data/bindings/activemq-bindings-3.bindings (size=1,048,576) to replica.
INFO [org.apache.activemq.artemis.core.server] AMQ221025: Replication: sending NIOSequentialFile /path/to/data/bindings/activemq-bindings-2.bindings (size=1,048,576) to replica.
INFO [org.apache.activemq.artemis.core.server] AMQ221024: Backup server ActiveMQServerImpl::serverUUID=8d7477d0-1518-11ea-abd1-a0afbd82eaba is synchronized with live-server.
Then finally the slave will have:
INFO [org.apache.activemq.artemis.core.server] AMQ221031: backup announced

Related

Apache Ignite Structured Logging

I am looking to enable structured logging for Ignite.
Ignite runs inside a docker container.
I enabled the log4j2 module and added a log4j2 configuration file that tries to use <JsonTemplateLayout.../> as described here but in the logs i get the message:
Console contains an invalid element or attribute "JsonTemplateLayout"
Which is probably caused by not having the log4j-layout-template-json dependency available inside ignite. Is there a way how to add the dependency to Ignite or is there another option on how to get structured logging working?
Ignite configuration:
<beans xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:util="http://www.springframework.org/schema/util"
xsi:schemaLocation="
http://www.springframework.org/schema/beans
http://www.springframework.org/schema/beans/spring-beans.xsd
http://www.springframework.org/schema/util
http://www.springframework.org/schema/util/spring-util.xsd">
<bean class="org.apache.ignite.configuration.IgniteConfiguration">
...
<property name="gridLogger">
<bean class="org.apache.ignite.logger.log4j2.Log4J2Logger">
<constructor-arg type="java.lang.String" value="config/ignite-log4j2-custom.xml"/>
</bean>
</property>
</bean>
</beans>
log4j2 configuration:
<?xml version="1.0" encoding="UTF-8"?>
<Configuration monitorInterval="60" status="debug">
<Appenders>
<Console name="CONSOLE" target="SYSTEM_OUT">
<!-- <PatternLayout pattern="[%d{ISO8601}][%-5p][%t][%c{1}]%notEmpty{[%markerSimpleName]} %m%n"/> -->
<ThresholdFilter level="ERROR" onMatch="DENY" onMismatch="ACCEPT"/>
<JsonTemplateLayout eventTemplateUri="classpath:EcsLayout.json"/>
</Console>
<Console name="CONSOLE_ERR" target="SYSTEM_ERR">
<!-- <PatternLayout pattern="[%d{ISO8601}][%-5p][%t][%c{1}]%notEmpty{[%markerSimpleName]} %m%n"/> -->
<JsonTemplateLayout eventTemplateUri="classpath:EcsLayout.json"/>
</Console>
<File name="CONSISTENCY" fileName="${sys:IGNITE_HOME}/work/log/consistency.log">
<PatternLayout>
<Pattern>"[%d{ISO8601}][%-5p][%t][%c{1}] %m%n"</Pattern>
</PatternLayout>
</File>
<Routing name="FILE">
<Routes pattern="$${sys:nodeId}">
<Route>
<RollingFile name="Rolling-${sys:nodeId}" fileName="${sys:IGNITE_HOME}/work/log/${sys:appId}-${sys:nodeId}.log"
filePattern="${sys:IGNITE_HOME}/work/log/${sys:appId}-${sys:nodeId}-%i-%d{yyyy-MM-dd}.log.gz">
<PatternLayout pattern="[%d{ISO8601}][%-5p][%t][%c{1}]%notEmpty{[%markerSimpleName]} %m%n"/>
<Policies>
<TimeBasedTriggeringPolicy interval="6" modulate="true" />
<SizeBasedTriggeringPolicy size="10 MB" />
</Policies>
</RollingFile>
</Route>
</Routes>
</Routing>
</Appenders>
<Loggers>
<!-- <Logger name="org.apache.ignite" level="INFO"/> -->
<!--
Uncomment to disable courtesy notices, such as SPI configuration
consistency warnings.
-->
<!--
<Logger name="org.apache.ignite.CourtesyConfigNotice" level=OFF/>
-->
<Logger name="org.springframework" level="WARN"/>
<Logger name="org.eclipse.jetty" level="WARN"/>
<Logger name="org.apache.ignite.internal.visor.consistency" additivity="false" level="INFO">
<AppenderRef ref="CONSISTENCY"/>
</Logger>
<!--
Avoid warnings about failed bind attempt when multiple nodes running on the same host.
-->
<Logger name="org.eclipse.jetty.util.log" level="ERROR"/>
<Logger name="org.eclipse.jetty.util.component" level="ERROR"/>
<Logger name="com.amazonaws" level="WARN"/>
<Root level="INFO">
<!-- Uncomment to enable logging to console. -->
<AppenderRef ref="CONSOLE" level="INFO"/>
<AppenderRef ref="CONSOLE_ERR" level="ERROR"/>
<AppenderRef ref="FILE" level="DEBUG"/>
</Root>
</Loggers>
</Configuration>
When adding the JAR to libs (as suggested by Stanislav below) i get a step further but also get an error (not a java person so any hint is highly appreciated):
main ERROR An exception occurred processing Appender CONSOLE org.apache.logging.log4j.core.appender.AppenderLoggingException: java.lang.IllegalAccessError: class org.apache.logging.log4j.layout.template.json.JsonTemplateLayout$StringBuilderEncoder tried to access method 'void org.apache.logging.log4j.core.layout.TextEncoderHelper.encodeText(java.nio.charset.CharsetEncoder, java.nio.CharBuffer, java.nio.ByteBuffer, java.lang.StringBuilder, org.apache.logging.log4j.core.layout.ByteBufferDestination)' (org.apache.logging.log4j.layout.template.json.JsonTemplateLayout$StringBuilderEncoder and org.apache.logging.log4j.core.layout.TextEncoderHelper are in unnamed module of loader 'app')
at org.apache.logging.log4j.core.config.AppenderControl.tryCallAppender(AppenderControl.java:165)
at org.apache.logging.log4j.core.config.AppenderControl.callAppender0(AppenderControl.java:134)
at org.apache.logging.log4j.core.config.AppenderControl.callAppenderPreventRecursion(AppenderControl.java:125)
at org.apache.logging.log4j.core.config.AppenderControl.callAppender(AppenderControl.java:89)
at org.apache.logging.log4j.core.config.LoggerConfig.callAppenders(LoggerConfig.java:542)
at org.apache.logging.log4j.core.config.LoggerConfig.processLogEvent(LoggerConfig.java:500)
at org.apache.logging.log4j.core.config.LoggerConfig.log(LoggerConfig.java:483)
at org.apache.logging.log4j.core.config.LoggerConfig.log(LoggerConfig.java:417)
at org.apache.logging.log4j.core.config.AwaitCompletionReliabilityStrategy.log(AwaitCompletionReliabilityStrategy.java:82)
at org.apache.logging.log4j.core.Logger.log(Logger.java:161)
at org.apache.logging.log4j.spi.AbstractLogger.tryLogMessage(AbstractLogger.java:2205)
at org.apache.logging.log4j.spi.AbstractLogger.logMessageTrackRecursion(AbstractLogger.java:2159)
at org.apache.logging.log4j.spi.AbstractLogger.logMessageSafely(AbstractLogger.java:2142)
at org.apache.logging.log4j.spi.AbstractLogger.logMessage(AbstractLogger.java:2017)
at org.apache.logging.log4j.spi.AbstractLogger.logIfEnabled(AbstractLogger.java:1983)
at org.apache.logging.log4j.spi.AbstractLogger.info(AbstractLogger.java:1275)
at org.apache.ignite.logger.log4j2.Log4J2Logger.info(Log4J2Logger.java:472)
at org.apache.ignite.logger.log4j2.Log4J2Logger.info(Log4J2Logger.java:464)
at org.apache.ignite.internal.GridLoggerProxy.info(GridLoggerProxy.java:137)
at org.apache.ignite.internal.plugin.IgniteLogInfoProviderImpl.ackConfiguration(IgniteLogInfoProviderImpl.java:222)
at org.apache.ignite.internal.plugin.IgniteLogInfoProviderImpl.ackKernalInited(IgniteLogInfoProviderImpl.java:98)
at org.apache.ignite.internal.IgniteKernal.start(IgniteKernal.java:902)
at org.apache.ignite.internal.IgnitionEx$IgniteNamedInstance.start0(IgnitionEx.java:1799)
at org.apache.ignite.internal.IgnitionEx$IgniteNamedInstance.start(IgnitionEx.java:1721)
at org.apache.ignite.internal.IgnitionEx.start0(IgnitionEx.java:1160)
at org.apache.ignite.internal.IgnitionEx.startConfigurations(IgnitionEx.java:1054)
at org.apache.ignite.internal.IgnitionEx.start(IgnitionEx.java:940)
at org.apache.ignite.internal.IgnitionEx.start(IgnitionEx.java:839)
at org.apache.ignite.internal.IgnitionEx.start(IgnitionEx.java:709)
at org.apache.ignite.internal.IgnitionEx.start(IgnitionEx.java:678)
at org.apache.ignite.Ignition.start(Ignition.java:353)
at org.apache.ignite.startup.cmdline.CommandLineStartup.main(CommandLineStartup.java:365)
Caused by: java.lang.IllegalAccessError: class org.apache.logging.log4j.layout.template.json.JsonTemplateLayout$StringBuilderEncoder tried to access method 'void org.apache.logging.log4j.core.layout.TextEncoderHelper.encodeText(java.nio.charset.CharsetEncoder, java.nio.CharBuffer, java.nio.ByteBuffer, java.lang.StringBuilder, org.apache.logging.log4j.core.layout.ByteBufferDestination)' (org.apache.logging.log4j.layout.template.json.JsonTemplateLayout$StringBuilderEncoder and org.apache.logging.log4j.core.layout.TextEncoderHelper are in unnamed module of loader 'app')
at org.apache.logging.log4j.layout.template.json.JsonTemplateLayout$StringBuilderEncoder.encode(JsonTemplateLayout.java:241)
at org.apache.logging.log4j.layout.template.json.JsonTemplateLayout$StringBuilderEncoder.encode(JsonTemplateLayout.java:216)
at org.apache.logging.log4j.layout.template.json.JsonTemplateLayout.encode(JsonTemplateLayout.java:304)
at org.apache.logging.log4j.layout.template.json.JsonTemplateLayout.encode(JsonTemplateLayout.java:58)
at org.apache.logging.log4j.core.appender.AbstractOutputStreamAppender.directEncodeEvent(AbstractOutputStreamAppender.java:197)
at org.apache.logging.log4j.core.appender.AbstractOutputStreamAppender.tryAppend(AbstractOutputStreamAppender.java:190)
at org.apache.logging.log4j.core.appender.AbstractOutputStreamAppender.append(AbstractOutputStreamAppender.java:181)
at org.apache.logging.log4j.core.config.AppenderControl.tryCallAppender(AppenderControl.java:161)
... 31 more
Solution
As Stanislav Lukyanov (see accepted answer) suggested the solution was to just download the JAR and place it below $IGNITE_HOME/libs. The error mentioned above was caused by a version mismatch. Having the following JARs with correct version made it work:
log4j-api-2.17.1.jar (default provided by ignite distribution)
log4j-core-2.17.1.jar (default provided by ignite distribution)
ignite-log4j2-2.13.0.jar (default provided by ignite distribution)
log4j-layout-template-json-2.17.1.jar (added, did not work with version 2.18.x)
If you run Ignite using Maven, you'll need to add the required dependency to your application POM, as described in the documentation:
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-layout-template-json</artifactId>
<version>2.18.0</version>
</dependency>
If you run Ignite using a ZIP distribution, you'll need to download the dependency as a JAR, e.g. from here and add it to the $IGNITE_HOME/libs.

ActiveMQ Artemis not starting without SSL enabled configuration

I'm using ActiveMQ Artemis 2.18.0. Firstly I configured setup with SSL and artemis was starting without errors then I wanted to test my setup without SSL, I removed all SSL related settings from broker.xml and bootstrap.xml and now when I trying to run Artemis I'm getting this:
2021-10-13 07:34:26,047 INFO [org.apache.activemq.artemis.core.server] AMQ221001: Apache ActiveMQ Artemis Message Broker version 2.18.0 [amq1, nodeID=bee15e5b-2bf7-11ec-887f-0800277c53f8]
2021-10-13 07:34:26,263 INFO [org.apache.activemq.hawtio.branding.PluginContextListener] Initialized activemq-branding plugin
2021-10-13 07:34:26,297 INFO [org.apache.activemq.hawtio.plugin.PluginContextListener] Initialized artemis-plugin plugin
2021-10-13 07:34:26,548 INFO [io.hawt.HawtioContextListener] Initialising hawtio services
2021-10-13 07:34:26,571 INFO [io.hawt.system.ConfigManager] Configuration will be discovered via system properties
2021-10-13 07:34:26,573 INFO [io.hawt.jmx.JmxTreeWatcher] Welcome to Hawtio 2.13.5
2021-10-13 07:34:26,580 INFO [io.hawt.web.auth.AuthenticationConfiguration] Starting hawtio authentication filter, JAAS realm: "activemq" authorized role(s): "amq" role principal classes: "org.apache.activemq.artemis.spi.core.security.jaas.RolePrincipal"
2021-10-13 07:34:26,595 INFO [io.hawt.web.proxy.ProxyServlet] Proxy servlet is disabled
2021-10-13 07:34:26,600 INFO [io.hawt.web.servlets.JolokiaConfiguredAgentServlet] Jolokia overridden property: [key=policyLocation, value=file:/home/vagrant/artemis-broker/etc/jolokia-access.xml]
java.lang.IllegalStateException: /home/vagrant/artemis-broker/etc/keystore.jks is not a valid keystore
at org.eclipse.jetty.util.security.CertificateUtils.getKeyStore(CertificateUtils.java:50)
at org.eclipse.jetty.util.ssl.SslContextFactory.loadKeyStore(SslContextFactory.java:1203)
at org.eclipse.jetty.util.ssl.SslContextFactory.load(SslContextFactory.java:322)
at org.eclipse.jetty.util.ssl.SslContextFactory.doStart(SslContextFactory.java:244)
at org.eclipse.jetty.util.component.AbstractLifeCycle.start(AbstractLifeCycle.java:73)
at org.eclipse.jetty.util.component.ContainerLifeCycle.start(ContainerLifeCycle.java:169)
at org.eclipse.jetty.util.component.ContainerLifeCycle.doStart(ContainerLifeCycle.java:117)
at org.eclipse.jetty.server.SslConnectionFactory.doStart(SslConnectionFactory.java:97)
at org.eclipse.jetty.util.component.AbstractLifeCycle.start(AbstractLifeCycle.java:73)
at org.eclipse.jetty.util.component.ContainerLifeCycle.start(ContainerLifeCycle.java:169)
at org.eclipse.jetty.util.component.ContainerLifeCycle.doStart(ContainerLifeCycle.java:117)
at org.eclipse.jetty.server.AbstractConnector.doStart(AbstractConnector.java:321)
at org.eclipse.jetty.server.AbstractNetworkConnector.doStart(AbstractNetworkConnector.java:81)
at org.eclipse.jetty.server.ServerConnector.doStart(ServerConnector.java:234)
at org.eclipse.jetty.util.component.AbstractLifeCycle.start(AbstractLifeCycle.java:73)
at org.eclipse.jetty.server.Server.doStart(Server.java:401)
at org.eclipse.jetty.util.component.AbstractLifeCycle.start(AbstractLifeCycle.java:73)
at org.apache.activemq.artemis.component.WebServerComponent.start(WebServerComponent.java:263)
at org.apache.activemq.artemis.core.server.impl.ActiveMQServerImpl.addExternalComponent(ActiveMQServerImpl.java:908)
at org.apache.activemq.artemis.cli.commands.Run.execute(Run.java:126)
at org.apache.activemq.artemis.cli.Artemis.internalExecute(Artemis.java:155)
at org.apache.activemq.artemis.cli.Artemis.execute(Artemis.java:103)
at org.apache.activemq.artemis.cli.Artemis.execute(Artemis.java:130)
at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.base/java.lang.reflect.Method.invoke(Method.java:564)
at org.apache.activemq.artemis.boot.Artemis.execute(Artemis.java:134)
at org.apache.activemq.artemis.boot.Artemis.main(Artemis.java:50)
2021-10-13 07:34:26,847 INFO [io.hawt.web.auth.AuthenticationFilter] Destroying hawtio authentication filter
2021-10-13 07:34:26,848 INFO [io.hawt.HawtioContextListener] Destroying hawtio services
2021-10-13 07:34:26,875 INFO [org.apache.activemq.hawtio.plugin.PluginContextListener] Destroyed artemis-plugin plugin
2021-10-13 07:34:26,878 INFO [org.apache.activemq.hawtio.branding.PluginContextListener] Destroyed activemq-branding plugin
2021-10-13 07:34:26,902 INFO [org.apache.activemq.artemis.core.server] AMQ221002: Apache ActiveMQ Artemis Message Broker version 2.18.0 [bee15e5b-2bf7-11ec-887f-0800277c53f8] stopped, uptime 11.619 seconds
Not sure what Jolokia property is overriden. Did I forgot to do something else?
bootstrap.xml:
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<broker xmlns="http://activemq.org/schema">
<jaas-security domain="activemq"/>
<server configuration="file:/home/vagrant/artemis-broker/etc//broker.xml"/>
<web bind="https://0.0.0.0:8161" path="web">
<app url="activemq-branding" war="activemq-branding.war"/>
<app url="artemis-plugin" war="artemis-plugin.war"/>
<app url="console" war="console.war"/>
</web>
</broker>
broker.xml:
<?xml version='1.0'?>
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<configuration xmlns="urn:activemq"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xi="http://www.w3.org/2001/XInclude"
xsi:schemaLocation="urn:activemq /schema/artemis-configuration.xsd">
<core xmlns="urn:activemq:core" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="urn:activemq:core ">
<name>amq1</name>
<persistence-enabled>true</persistence-enabled>
<!-- this could be ASYNCIO, MAPPED, NIO
ASYNCIO: Linux Libaio
MAPPED: mmap files
NIO: Plain Java Files
-->
<journal-type>ASYNCIO</journal-type>
<paging-directory>data/paging</paging-directory>
<bindings-directory>data/bindings</bindings-directory>
<journal-directory>data/journal</journal-directory>
<large-messages-directory>data/large-messages</large-messages-directory>
<journal-datasync>true</journal-datasync>
<journal-min-files>2</journal-min-files>
<journal-pool-files>10</journal-pool-files>
<journal-device-block-size>4096</journal-device-block-size>
<journal-file-size>10M</journal-file-size>
<!--
This value was determined through a calculation.
Your system could perform 31.25 writes per millisecond
on the current journal configuration.
That translates as a sync write every 32000 nanoseconds.
Note: If you specify 0 the system will perform writes directly to the disk.
We recommend this to be 0 if you are using journalType=MAPPED and journal-datasync=false.
-->
<journal-buffer-timeout>28000</journal-buffer-timeout>
<!--
When using ASYNCIO, this will determine the writing queue depth for libaio.
-->
<journal-max-io>4096</journal-max-io>
<max-disk-usage>100</max-disk-usage>
<!-- should the broker detect dead locks and other issues -->
<critical-analyzer>true</critical-analyzer>
<critical-analyzer-timeout>150000</critical-analyzer-timeout>
<critical-analyzer-check-period>60000</critical-analyzer-check-period>
<critical-analyzer-policy>HALT</critical-analyzer-policy>
<page-sync-timeout>1628000</page-sync-timeout>
<global-max-size>204Mb</global-max-size>
<connectors>
<connector name="amq1">tcp://amq1:61616</connector>
<connector name="amq2">tcp://amq2:61616</connector>
<connector name="amq3">tcp://amq3:61616</connector>
<connector name="amq4">tcp://amq4:61616</connector>
<connector name="amq5">tcp://amq5:61616</connector>
<connector name="amq6">tcp://amq6:61616</connector>
</connectors>
<acceptors>
<acceptor name="artemis">tcp://0.0.0.0:61616?tcpSendBufferSize=1048576;tcpReceiveBufferSize=1048576;amqpMinLargeMessageSize=102400;protocols=CORE,AMQP,STOMP,HORNETQ,MQTT,OPENWIRE;useEpoll=true;amqpCredits=1000;amqpLowCredits=300;amqpDuplicateDetection=true</acceptor>
<acceptor name="amqp">tcp://0.0.0.0:5672?tcpSendBufferSize=1048576;tcpReceiveBufferSize=1048576;protocols=AMQP;useEpoll=true;amqpCredits=1000;amqpLowCredits=300;amqpMinLargeMessageSize=102400;amqpDuplicateDetection=true</acceptor>
<acceptor name="stomp">tcp://0.0.0.0:61613?tcpSendBufferSize=1048576;tcpReceiveBufferSize=1048576;protocols=STOMP;useEpoll=true</acceptor>
<acceptor name="hornetq">tcp://0.0.0.0:5445?anycastPrefix=jms.queue.;multicastPrefix=jms.topic.;protocols=HORNETQ,STOMP;useEpoll=true</acceptor>
<acceptor name="mqtt">tcp://0.0.0.0:1883?tcpSendBufferSize=1048576;tcpReceiveBufferSize=1048576;protocols=MQTT;useEpoll=true</acceptor>
</acceptors>
<broadcast-groups>
<broadcast-group name="artemis-broadcast-group">
<group-address>231.7.7.7</group-address>
<group-port>9876</group-port>
<broadcast-period>2000</broadcast-period>
<connector-ref>amq1</connector-ref>
</broadcast-group>
</broadcast-groups>
<discovery-groups>
<discovery-group name="artemis-discovery-group">
<group-address>231.7.7.7</group-address>
<group-port>9876</group-port>
<refresh-timeout>10000</refresh-timeout>
</discovery-group>
</discovery-groups>
<cluster-user>admin</cluster-user>
<cluster-password>admin</cluster-password>
<cluster-connections>
<cluster-connection name="artemis-cluster">
<connector-ref>amq1</connector-ref>
<retry-interval>1000</retry-interval>
<retry-interval-multiplier>3</retry-interval-multiplier>
<max-retry-interval>5000</max-retry-interval>
<initial-connect-attempts>-1</initial-connect-attempts>
<reconnect-attempts>-1</reconnect-attempts>
<use-duplicate-detection>true</use-duplicate-detection>
<message-load-balancing>STRICT</message-load-balancing>
<max-hops>1</max-hops>
<discovery-group-ref discovery-group-name="artemis-discovery-group"/>
</cluster-connection>
</cluster-connections>
<!-- Other config -->
<ha-policy>
<replication>
<master>
<group-name>artemis-group-1</group-name>
<quorum-vote-wait>12</quorum-vote-wait>
<vote-on-replication-failure>true</vote-on-replication-failure>
<!--for auto failback -->
<check-for-live-server>true</check-for-live-server>
</master>
</replication>
</ha-policy>
<security-settings>
<security-setting match="#">
<permission type="createNonDurableQueue" roles="amq"/>
<permission type="deleteNonDurableQueue" roles="amq"/>
<permission type="createDurableQueue" roles="amq"/>
<permission type="deleteDurableQueue" roles="amq"/>
<permission type="createAddress" roles="amq"/>
<permission type="deleteAddress" roles="amq"/>
<permission type="consume" roles="amq"/>
<permission type="browse" roles="amq"/>
<permission type="send" roles="amq"/>
<!-- we need this otherwise ./artemis data imp wouldn't work -->
<permission type="manage" roles="amq"/>
</security-setting>
</security-settings>
<addresses>
<address name="exampleQueue">
<anycast>
<queue name="exampleQueue"/>
</anycast>
</address>
<address name="DLQ">
</address>
<address name="ExpiryQueue">
<anycast>
<queue name="ExpiryQueue" />
</anycast>
</address>
</addresses>
<address-settings>
<!-- if you define auto-create on certain queues, management has to be auto-create -->
<address-setting match="activemq.management#">
<dead-letter-address>DLQ</dead-letter-address>
<expiry-address>ExpiryQueue</expiry-address>
<redelivery-delay>0</redelivery-delay>
<!-- with -1 only the global-max-size is in use for limiting -->
<max-size-bytes>-1</max-size-bytes>
<message-counter-history-day-limit>10</message-counter-history-day-limit>
<address-full-policy>PAGE</address-full-policy>
<auto-create-queues>true</auto-create-queues>
<auto-create-addresses>true</auto-create-addresses>
<auto-create-jms-queues>true</auto-create-jms-queues>
<auto-create-jms-topics>true</auto-create-jms-topics>
</address-setting>
<!--default for catch all-->
<address-setting match="#">
<dead-letter-address>DLQ</dead-letter-address>
<expiry-address>ExpiryQueue</expiry-address>
<redelivery-delay>0</redelivery-delay>
<auto-create-dead-letter-resources>true</auto-create-dead-letter-resources>
<!-- with -1 only the global-max-size is in use for limiting -->
<max-size-bytes>-1</max-size-bytes>
<message-counter-history-day-limit>10</message-counter-history-day-limit>
<address-full-policy>PAGE</address-full-policy>
<auto-create-queues>true</auto-create-queues>
<auto-create-addresses>true</auto-create-addresses>
<auto-create-jms-queues>true</auto-create-jms-queues>
<auto-create-jms-topics>true</auto-create-jms-topics>
</address-setting>
<address-setting match="exampleQueue">
<dead-letter-address>DLQ</dead-letter-address>
<redelivery-delay>1000</redelivery-delay>
<max-delivery-attempts>3</max-delivery-attempts>
<max-size-bytes>-1</max-size-bytes>
<page-size-bytes>1048576</page-size-bytes>
<message-counter-history-day-limit>10</message-counter-history-day-limit>
<address-full-policy>PAGE</address-full-policy>
</address-setting>
</address-settings>
<!-- Uncomment the following if you want to use the Standard LoggingActiveMQServerPlugin pluging to log in events
<broker-plugins>
<broker-plugin class-name="org.apache.activemq.artemis.core.server.plugin.impl.LoggingActiveMQServerPlugin">
<property key="LOG_ALL_EVENTS" value="true"/>
<property key="LOG_CONNECTION_EVENTS" value="true"/>
<property key="LOG_SESSION_EVENTS" value="true"/>
<property key="LOG_CONSUMER_EVENTS" value="true"/>
<property key="LOG_DELIVERING_EVENTS" value="true"/>
<property key="LOG_SENDING_EVENTS" value="true"/>
<property key="LOG_INTERNAL_EVENTS" value="true"/>
</broker-plugin>
</broker-plugins>
-->
</core>
</configuration>
ActiveMQ Artemis is failing because the bind attribute of the web element is using the HTTPS protocol:
<web bind="https://0.0.0.0:8161" path="web">
To fix this issue the bind attribute should use HTTP protocol:
<web bind="http://0.0.0.0:8161" path="web">

SSL implemented on Artemis, but clients with invalid trust stores and user credentials are able to connect to the broker

I'm using JMS on a Spring boot client to connect to an ActiveMQ Artemis broker over SSL. The client is able to connect regardless of the validity of the certificates in the truststore and even if invalid credentials are used. How do I ensure that the broker is filtering clients out based on the configured parameters?
The acceptors in the broker.xml are defined as show below. The SSL acceptor uses port 61617.
<acceptors>
<!-- Acceptor for every supported protocol -->
<acceptor name="artemis">tcp://0.0.0.0:61616?tcpSendBufferSize=1048576;tcpReceiveBufferSize=1048576;amqpMinLargeMessageSize=102400;protocols=CORE,AMQP,STOMP,HORNETQ,MQTT,OPENWIRE;useEpoll=true;amqpCredits=1000;amqpLowCredits=300;amqpDuplicateDetection=true;supportAdvisory=false;suppressInternalManagementObjects=false</acceptor>
<!-- AMQP Acceptor. Listens on default AMQP port for AMQP traffic.-->
<acceptor name="amqp">tcp://0.0.0.0:5672?tcpSendBufferSize=1048576;tcpReceiveBufferSize=1048576;protocols=AMQP;useEpoll=true;amqpCredits=1000;amqpLowCredits=300;amqpMinLargeMessageSize=102400;amqpDuplicateDetection=true</acceptor>
<!-- STOMP Acceptor. -->
<acceptor name="stomp">tcp://0.0.0.0:61613?tcpSendBufferSize=1048576;tcpReceiveBufferSize=1048576;protocols=STOMP;useEpoll=true</acceptor>
<!-- HornetQ Compatibility Acceptor. Enables HornetQ Core and STOMP for legacy HornetQ clients. -->
<acceptor name="hornetq">tcp://0.0.0.0:5445?anycastPrefix=jms.queue.;multicastPrefix=jms.topic.;protocols=HORNETQ,STOMP;useEpoll=true</acceptor>
<!-- MQTT Acceptor -->
<acceptor name="mqtt">tcp://0.0.0.0:1883?tcpSendBufferSize=1048576;tcpReceiveBufferSize=1048576;protocols=MQTT;useEpoll=false</acceptor>
<!-- SSL Acceptor -->
<acceptor name="netty-ssl-acceptor">tcp://0.0.0.0:61617?tcpSendBufferSize=1048576;tcpReceiveBufferSize=1048576;sslEnabled=true;keyStorePath=E:/apache-artemis-2.18.0/bin/localBroker/etc/sprink.jks;keyStorePassword=changeit;trustStorePath=E:/apache-artemis-2.18.0/bin/localBroker/etc/sprinktrust.ts;trustStorePassword=changeit;needClientAuth=true;protocols=CORE,AMQP,STOMP,HORNETQ,MQTT,OPENWIRE</acceptor>
</acceptors>
The connection factory, listener, and JmsTemplate are configured on the spring boot client as shown below
import org.apache.activemq.artemis.jms.client.ActiveMQConnectionFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.jms.annotation.EnableJms;
import org.springframework.jms.config.DefaultJmsListenerContainerFactory;
import org.springframework.jms.core.JmsTemplate;
import javax.jms.JMSException;
#Configuration
#EnableJms
public class MQTTConfig {
#Value("${activemq.broker-url}")
private String brokerUrl;
#Value("${activemq.ssl-url}")
private String sslUrl;
#Value("${JMS_BROKER_TRUSTSTORE}")
private String pathToTrustStore;
#Value("${JMS_BROKER_KEYSTORE}")
private String pathToKeystore;
#Value("${JMS_BROKER_TRUSTSTORE_PASSWORD}")
private String truststorePassword;
#Value("${JMS_BROKER_KEYSTORE_PASSWORD}")
private String keystorePassword;
/**
* Initialise the connection factory that will be used
*/
#Bean
public ActiveMQConnectionFactory artemisSSLConnectionFactory() {
ActiveMQConnectionFactory artemisConnectionFactory = new ActiveMQConnectionFactory("tcp://localhost:61617?&" + "sslEnabled=true&" +
"trustStorePath=" + pathToTrustStore + "&trustStorePassword=changeit&needClientAuth=true");
artemisConnectionFactory.setUser("user");
artemisConnectionFactory.setPassword("password");
return artemisConnectionFactory;
}
/**
* Initialise {#link JmsTemplate} as required
*/
#Bean
public JmsTemplate jmsTemplate() throws JMSException {
JmsTemplate jmsTemplate = new JmsTemplate();
jmsTemplate.setConnectionFactory(artemisSSLConnectionFactory());
jmsTemplate.setExplicitQosEnabled(true);
//setting PuSubDomain to true configures JmsTemplate to work with topics instead of queues
jmsTemplate.setPubSubDomain(true);
jmsTemplate.setDeliveryMode(DeliveryMode.NON_PERSISTENT);
jmsTemplate.setDeliveryPersistent(true);
return jmsTemplate;
}
/**
* Initialise {#link DefaultJmsListenerContainerFactory} as required
*/
#Bean
public DefaultJmsListenerContainerFactory jmsListenerContainerFactory() throws JMSException {
DefaultJmsListenerContainerFactory factory = new DefaultJmsListenerContainerFactory();
factory.setConnectionFactory(artemisSSLConnectionFactory());
//setting PuSubDomain to true configures the DefaultJmsListenerContainerFactory to work with topics instead of queues
factory.setPubSubDomain(true);
return factory;
}
}
The artemis-users.properties file is as shown below
admin = ENC(1024...)
system=manager
user=password
guest=password
The artemis-roles.properties has the below roles defined
admins = admin
users=user
Admins and users have been given privileges in the broker.xml as shown below
<security-settings>
<security-setting match="#">
<permission type="createNonDurableQueue" roles="admins, users"/>
<permission type="deleteNonDurableQueue" roles="admins, users"/>
<permission type="createDurableQueue" roles="admins, users"/>
<permission type="deleteDurableQueue" roles="admins, users"/>
<permission type="createAddress" roles="admins, users"/>
<permission type="deleteAddress" roles="admins, users"/>
<permission type="consume" roles="admins, users"/>
<permission type="browse" roles="admins, users"/>
<permission type="send" roles="admins, users"/>
<!-- we need this otherwise ./artemis data imp wouldn't work -->
<permission type="manage" roles="admins"/>
</security-setting>
</security-settings>
With the above setup any client with any truststore or username and password is able to connect to the broker on port 61617 and produce and consume messages. What am I missing that is allowing this to happen?
Here's what got things working.
Firstly, the login.config file on Artemis has a GuestLoginModule that this link says is chained to the PropertiesLoginModule and the guest module allows clients without credentials, or even invalid credentials to connect to the broker. Now, by default, the GuestLoginModule looks like this
org.apache.activemq.artemis.spi.core.security.jaas.GuestLoginModule sufficient
debug=false
org.apache.activemq.jaas.guest.user="admin"
org.apache.activemq.jaas.guest.role="admins";
Notice that the guests are treated as admins. My artemis-users.properties file already has a guest user defined (as seen in my original post above), so I created a guests group in the artemis-roles.properties file and assigned this guest user to this group. I then changed the admin mapping in the GuestLoginModule so that the module would reference guests. The GuestLoginModule now looks like this:
org.apache.activemq.artemis.spi.core.security.jaas.GuestLoginModule sufficient
debug=false
org.apache.activemq.jaas.guest.user="guest"
org.apache.activemq.jaas.guest.role="guests";
The security settings in the broker.xml file can be modified to accommodate guest functions as per one's relevant use case, mine doesn't have any use case for a guest user, so guests have no permissions.
Once this was done, I tried connecting to the broker and got the null cert chain exception. Fixed this by including the keystore in the connection factory config.
Because needClientAuth is set to true (dual auth is enabled), Artemis needs the clients to have the relevant keystore built by bundling the key pair derived from the root CA, so my connection factory configuration changed from
#Bean
public ActiveMQConnectionFactory artemisSSLConnectionFactory() {
ActiveMQConnectionFactory artemisConnectionFactory = new ActiveMQConnectionFactory("tcp://localhost:61617?&" + "sslEnabled=true&" +
"trustStorePath=" + pathToTrustStore + "&trustStorePassword=changeit&needClientAuth=true");
artemisConnectionFactory.setUser("user");
artemisConnectionFactory.setPassword("password");
return artemisConnectionFactory;
}
to
#Bean
public ActiveMQConnectionFactory artemisSSLConnectionFactory() {
ActiveMQConnectionFactory artemisConnectionFactory = new ActiveMQConnectionFactory("tcp://localhost:61617?&" + "sslEnabled=true&" +
"trustStorePath=" + pathToTrustStore + "&trustStorePassword=changeit&keyStorePath="+ pathToKeystore +"&keyStorePassword=changeit&needClientAuth=true");
artemisConnectionFactory.setUser("user");
artemisConnectionFactory.setPassword("password");
return artemisConnectionFactory;
}
The only difference here is the addition of the keystore path and password. The broker did not connect when it was just the truststore in the config.
If one way auth is needed, just omit the needClientAuth field in the acceptor as it is set to false by default.
This is what finally worked.

How do I connect to ActiveMQ Artemis embedded server?

I have an embedded ActiveMQ Artemis application that I have started using below configuration & code:
broker.xml
<?xml version='1.0'?>
<configuration
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="urn:activemq"
xsi:schemaLocation="urn:activemq /schema/artemis-server.xsd">
<core xmlns="urn:activemq:core"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="urn:activemq:core ">
<persistence-enabled>false</persistence-enabled>
<paging-directory>target/data/paging</paging-directory>
<bindings-directory>target/data/bindings</bindings-directory>
<journal-directory>target/data/journal</journal-directory>
<large-messages-directory>target/data/large-messages</large-messages-directory>
<acceptors>
<acceptor name="in-vm">vm://0</acceptor>
</acceptors>
<security-settings>
<security-setting match="#">
<permission type="createNonDurableQueue" roles="guest"/>
<permission type="deleteNonDurableQueue" roles="guest"/>
<permission type="createDurableQueue" roles="guest"/>
<permission type="deleteDurableQueue" roles="guest"/>
<permission type="createAddress" roles="guest"/>
<permission type="deleteAddress" roles="guest"/>
<permission type="consume" roles="guest"/>
<permission type="browse" roles="guest"/>
<permission type="send" roles="guest"/>
<!-- we need this otherwise ./artemis data imp wouldn't work -->
<permission type="manage" roles="guest"/>
</security-setting>
</security-settings>
<address-settings>
...
</address-settings>
<addresses>
...
</addresses>
</core>
</configuration>
jndi.properties
java.naming.factory.initial=org.apache.activemq.artemis.jndi.ActiveMQInitialContextFactory
connectionFactory.ConnectionFactory=vm://0
Main.java
SecurityConfiguration securityConfig = new SecurityConfiguration();
securityConfig.addUser("guest", "guest");
securityConfig.addRole("guest", "guest");
securityConfig.setDefaultUser("guest");
ActiveMQJAASSecurityManager securityManager = new ActiveMQJAASSecurityManager(InVMLoginModule.class.getName(), securityConfig);
// Step 2. Create and start embedded broker.
server = ActiveMQServers.newActiveMQServer("broker.xml", null, securityManager);
server.start();
I have few application that produce messages and consume messages. How do I configuration these application so that they communicate with the embedded server to produce and consume message. When I use tcp://localhost:61616 as broker URL, I am not able to connect to the embedded server. What configuration changes should I make in order to achieve what I am trying to do.
This is the only acceptor you have:
<acceptor name="in-vm">vm://0</acceptor>
Therefore this is the only URL you can use from your clients: vm://0.
If you want to connect to tcp://localhost:61616 then you need to configure a corresponding acceptor, e.g.:
<acceptor name="activemq">tcp://localhost:61616</acceptor>

How to configure correctly an authentication using Tomcat 10?

I'm using Tomcat 10 and eclipse to develop a J2E (or Jakarta EE) web application. I followed this tutorial (http://objis.com/tutoriel-securite-declarative-jee-avec-jaas/#partie2) which seems old (it's a french document, because i'm french, sorry if my english isn't perfect), but I also read the Tomcat 10 documentation.
The dataSource works, I followed instructions on this page (https://tomcat.apache.org/tomcat-10.0-doc/jndi-datasource-examples-howto.html#Oracle_8i,_9i_&_10g) and tested it, but it seems that the realm doesn't work, because I can't login successfully. I always have an authentification error, even if I use the right login and password.
I tried a lot of "solutions" to correct this, but no one works. And I still don't know if I have to put the realm tag inside context.xml, server.xml or both. I tried context.xml and both, but i don't see any difference.
My web.xml :
<?xml version="1.0" encoding="UTF-8"?>
<web-app
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://Java.sun.com/xml/ns/javaee"
xsi:schemaLocation="http://java.sun.com/xml/ns/javaee http://java.sun.com/xml/ns/javaee/web-app_3_0.xsd"
version="3.0">
<!-- Servlet -->
<servlet>
<servlet-name>Accueil</servlet-name>
<servlet-class>servlet.Accueil</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>Accueil</servlet-name>
<url-pattern></url-pattern>
</servlet-mapping>
<servlet>
<servlet-name>Bar</servlet-name>
<servlet-class>servlet.Bar</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>Bar</servlet-name>
<url-pattern>/bar</url-pattern>
</servlet-mapping>
<servlet>
<servlet-name>Galerie</servlet-name>
<servlet-class>servlet.Galerie</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>Galerie</servlet-name>
<url-pattern>/galerie</url-pattern>
</servlet-mapping>
<servlet>
<servlet-name>Cave</servlet-name>
<servlet-class>servlet.Cave</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>Cave</servlet-name>
<url-pattern>/cave</url-pattern>
</servlet-mapping>
<servlet>
<servlet-name>Mentions</servlet-name>
<servlet-class>servlet.Mentions</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>Mentions</servlet-name>
<url-pattern>/mentions-legales</url-pattern>
</servlet-mapping>
<servlet>
<servlet-name>Plan</servlet-name>
<servlet-class>servlet.Plan</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>Plan</servlet-name>
<url-pattern>/plan-acces</url-pattern>
</servlet-mapping>
<servlet>
<servlet-name>Restaurant</servlet-name>
<servlet-class>servlet.Restaurant</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>Restaurant</servlet-name>
<url-pattern>/restaurant</url-pattern>
</servlet-mapping>
<servlet>
<servlet-name>Catalogue</servlet-name>
<servlet-class>servlet.catalogue</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>Catalogue</servlet-name>
<url-pattern>/catalogue</url-pattern>
</servlet-mapping>
<servlet>
<servlet-name>AdminCatalogue</servlet-name>
<servlet-class>servlet.AdminCatalogue</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>AdminCatalogue</servlet-name>
<url-pattern>/admin/administration-catalogue</url-pattern>
</servlet-mapping>
<security-constraint>
<display-name>Test authentification Tomcat</display-name>
<!-- Liste des pages protégées -->
<web-resource-collection>
<web-resource-name>Page sécurisée</web-resource-name>
<url-pattern>/admin/*</url-pattern>
</web-resource-collection>
<!-- Rôles des utilisateurs ayant le droit d'y accéder -->
<auth-constraint>
<role-name>admin</role-name>
</auth-constraint>
<!-- Connection sécurisée -->
<!-- <user-data-constraint>
<transport-guarantee>CONFIDENTIAL</transport-guarantee>
</user-data-constraint> -->
</security-constraint>
<!-- Configuration de l'authentification -->
<login-config>
<auth-method>FORM</auth-method>
<realm-name>Espace administration</realm-name>
<form-login-config>
<form-login-page>/WEB-INF/login.jsp</form-login-page>
<form-error-page>/WEB-INF/erreur-authentification.jsp</form-error-page>
</form-login-config>
</login-config>
<!-- Rôles utilisés dans l'application -->
<security-role>
<description>Administrateur</description>
<role-name>admin</role-name>
</security-role>
<!-- Ajoute taglibs.jsp au début de chaque jsp -->
<jsp-config>
<jsp-property-group>
<url-pattern>*.jsp</url-pattern>
<include-prelude>/WEB-INF/taglibs.jsp</include-prelude>
</jsp-property-group>
</jsp-config>
<!-- Déclaration de référence à une source de données JNDI -->
<resource-ref>
<description>DB Connection</description>
<res-ref-name>jdbc/caradoc</res-ref-name>
<res-type>javax.sql.DataSource</res-type>
<res-auth>Container</res-auth>
</resource-ref>
</web-app>
context.xml :
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!-- The contents of this file will be loaded for each web application -->
<Context>
<!-- Default set of monitored resources. If one of these changes, the -->
<!-- web application will be reloaded. -->
<WatchedResource>WEB-INF/web.xml</WatchedResource>
<WatchedResource>WEB-INF/tomcat-web.xml</WatchedResource>
<WatchedResource>${catalina.base}/conf/web.xml</WatchedResource>
<!-- Uncomment this to enable session persistence across Tomcat restarts -->
<!--
<Manager pathname="SESSIONS.ser" />
-->
<Resource name="jdbc/caradoc" auth="Container" type="javax.sql.DataSource"
maxTotal="100" maxIdle="30" maxWaitMillis="10000"
username="root" password="Caradoc22600!" driverClassName="com.mysql.jdbc.Driver"
url="jdbc:mysql://localhost:3307/caradoc"/>
<Realm className="org.apache.catalina.realm.DataSourceRealm"
daraSourceName="jdbc/caradoc" localDataSource="true" userTable="utilisateurs"
userRoleTable="roles" userNameCol="login" userCredCol="mdp"
roleNameCol="role">
<CredentialHandler className="org.apache.catalina.realm.SecretKeyCredentialHandler"
algorithm="PBKDF2WithHmacSHA512"
iterations="100000"
keyLength="256"
saltLength="16"
/>
</Realm>
</Context>
server.xml :
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!-- Note: A "Server" is not itself a "Container", so you may not
define subcomponents such as "Valves" at this level.
Documentation at /docs/config/server.html
-->
<Server port="9000" shutdown="SHUTDOWN">
<Listener className="org.apache.catalina.startup.VersionLoggerListener" />
<!-- Security listener. Documentation at /docs/config/listeners.html
<Listener className="org.apache.catalina.security.SecurityListener" />
-->
<!--APR library loader. Documentation at /docs/apr.html -->
<Listener className="org.apache.catalina.core.AprLifecycleListener" SSLEngine="on" />
<!-- Prevent memory leaks due to use of particular java/javax APIs-->
<Listener className="org.apache.catalina.core.JreMemoryLeakPreventionListener" />
<Listener className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener" />
<Listener className="org.apache.catalina.core.ThreadLocalLeakPreventionListener" />
<!-- Global JNDI resources
Documentation at /docs/jndi-resources-howto.html
-->
<GlobalNamingResources>
<!-- Editable user database that can also be used by
UserDatabaseRealm to authenticate users
-->
<Resource name="UserDatabase" auth="Container"
type="org.apache.catalina.UserDatabase"
description="User database that can be updated and saved"
factory="org.apache.catalina.users.MemoryUserDatabaseFactory"
pathname="conf/tomcat-users.xml" />
</GlobalNamingResources>
<!-- A "Service" is a collection of one or more "Connectors" that share
a single "Container" Note: A "Service" is not itself a "Container",
so you may not define subcomponents such as "Valves" at this level.
Documentation at /docs/config/service.html
-->
<Service name="Catalina">
<!--The connectors can use a shared executor, you can define one or more named thread pools-->
<!--
<Executor name="tomcatThreadPool" namePrefix="catalina-exec-"
maxThreads="150" minSpareThreads="4"/>
-->
<!-- A "Connector" represents an endpoint by which requests are received
and responses are returned. Documentation at :
HTTP Connector: /docs/config/http.html
AJP Connector: /docs/config/ajp.html
Define a non-SSL/TLS HTTP/1.1 Connector on port 8080
-->
<Connector port="8080" protocol="HTTP/1.1"
connectionTimeout="20000"
redirectPort="8443"
URIEncoding = "UTF-8" />
<!-- <Connector port="8443"
protocol="org.apache.coyote.http11.Http11NioProtocol"
maxThreads="150"
SSLEnabled="true"
scheme="https"
secure="true"
clientAuth="false"
sslProtocol="TLS"
keystoreFile="autosigned-cert.keystore"
keyAlias="tomcat"
keystorePass="azertyuiop" /> -->
<!-- A "Connector" using the shared thread pool-->
<!--
<Connector executor="tomcatThreadPool"
port="8080" protocol="HTTP/1.1"
connectionTimeout="20000"
redirectPort="8443" />
-->
<!-- Define an SSL/TLS HTTP/1.1 Connector on port 8443
This connector uses the NIO implementation. The default
SSLImplementation will depend on the presence of the APR/native
library and the useOpenSSL attribute of the
AprLifecycleListener.
Either JSSE or OpenSSL style configuration may be used regardless of
the SSLImplementation selected. JSSE style configuration is used below.
-->
<!--
<Connector port="8443" protocol="org.apache.coyote.http11.Http11NioProtocol"
maxThreads="150" SSLEnabled="true">
<SSLHostConfig>
<Certificate certificateKeystoreFile="conf/localhost-rsa.jks"
type="RSA" />
</SSLHostConfig>
</Connector>
-->
<!-- Define an AJP 1.3 Connector on port 8009 -->
<!--
<Connector protocol="AJP/1.3"
address="::1"
port="8009"
redirectPort="8443" />
-->
<!-- An Engine represents the entry point (within Catalina) that processes
every request. The Engine implementation for Tomcat stand alone
analyzes the HTTP headers included with the request, and passes them
on to the appropriate Host (virtual host).
Documentation at /docs/config/engine.html -->
<!-- You should set jvmRoute to support load-balancing via AJP ie :
<Engine name="Catalina" defaultHost="localhost" jvmRoute="jvm1">
-->
<Engine name="Catalina" defaultHost="localhost">
<!--For clustering, please take a look at documentation at:
/docs/cluster-howto.html (simple how to)
/docs/config/cluster.html (reference documentation) -->
<!--
<Cluster className="org.apache.catalina.ha.tcp.SimpleTcpCluster"/>
-->
<!-- Use the LockOutRealm to prevent attempts to guess user passwords
via a brute-force attack -->
<Realm className="org.apache.catalina.realm.LockOutRealm">
<!-- This Realm uses the UserDatabase configured in the global JNDI
resources under the key "UserDatabase". Any edits
that are performed against this UserDatabase are immediately
available for use by the Realm. -->
<Realm className="org.apache.catalina.realm.UserDatabaseRealm"
resourceName="UserDatabase"/>
</Realm>
<Realm className="org.apache.catalina.realm.LockOutRealm">
<Realm className="org.apache.catalina.realm.DataSourceRealm"
daraSourceName="jdbc/caradoc" localDataSource="true" userTable="utilisateurs"
userRoleTable="roles" userNameCol="login" userCredCol="mdp"
roleNameCol="role">
<CredentialHandler className="org.apache.catalina.realm.SecretKeyCredentialHandler"
algorithm="PBKDF2WithHmacSHA512"
iterations="100000"
keyLength="256"
saltLength="16"
/>
</Realm>
</Realm>
<Host name="localhost" appBase="webapps"
unpackWARs="true" autoDeploy="true">
<!-- SingleSignOn valve, share authentication between web applications
Documentation at: /docs/config/valve.html -->
<!--
<Valve className="org.apache.catalina.authenticator.SingleSignOn" />
-->
<!-- Access log processes all example.
Documentation at: /docs/config/valve.html
Note: The pattern used is equivalent to using pattern="common" -->
<Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs"
prefix="localhost_access_log" suffix=".txt"
pattern="%h %l %u %t "%r" %s %b" />
</Host>
</Engine>
</Service>
</Server>
login.jsp :
<%# page language="java" contentType="text/html; charset=UTF-8"
pageEncoding="UTF-8"%>
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>Connexion Administrateur</title>
</head>
<body>
<div align="center">
<h2>Identification</h2>
</div>
<form action="j_security_check" method="post" accept-charset="utf-8">
<table align="center">
<tr>
<td>Login : </td>
<td><input type="text" name="j_username"/></td>
</tr>
<tr>
<td>Mot de passe : </td>
<td><input type="password" name="j_password"/></td>
</tr>
</table>
<p align="center"><input type="submit" value="Connexion"/></p>
</form>
</body>
</html>
erreur-authentifiction.jsp, has same content as login.jsp, but with an error message.
User table (password hash obtained with digest.bat) :
User table
Role table with foreign key on login referencing login column of user table :
Role table
This is my project arborescence, if it can help : arborescence
So please, can someone tell me what I did wrong ?
EDIT : I verified that we find the correct hash if we use the parameters specified in the CredentialHandler tag, it match.
That's the java code i used to verify :
import java.security.NoSuchAlgorithmException;
import java.security.spec.InvalidKeySpecException;
import java.security.spec.KeySpec;
import javax.crypto.SecretKeyFactory;
import javax.crypto.spec.PBEKeySpec;
public class test{
private static final char[] HEX_ARRAY = "0123456789ABCDEF".toCharArray();
public static byte[] hexStringToByteArray(String s) {
int len = s.length();
byte[] data = new byte[len / 2];
for (int i = 0; i < len; i += 2) {
data[i / 2] = (byte) ((Character.digit(s.charAt(i), 16) << 4)
+ Character.digit(s.charAt(i+1), 16));
}
return data;
}
public static String bytesToHex(byte[] bytes) {
char[] hexChars = new char[bytes.length * 2];
for (int j = 0; j < bytes.length; j++) {
int v = bytes[j] & 0xFF;
hexChars[j * 2] = HEX_ARRAY[v >>> 4];
hexChars[j * 2 + 1] = HEX_ARRAY[v & 0x0F];
}
return new String(hexChars);
}
public static void main(String[] args) throws NoSuchAlgorithmException, InvalidKeySpecException
{
String password = "password";
byte[] salt = hexStringToByteArray("e0cfcb0169f81fc46c861ecefeb7446b");
KeySpec spec = new PBEKeySpec(password.toCharArray(), salt, 100000, 256);
SecretKeyFactory factory = SecretKeyFactory.getInstance("PBKDF2WithHmacSHA512");
byte[] hash = factory.generateSecret(spec).getEncoded();
String res = bytesToHex(hash);
System.out.println(res);
}
}
I obtained the same encodedCredential as in data base ("33D6898C30FBE3E48B9A9EA2D5A0DAD01FD8FD809C9E6A6F3911BB23A481FB0F")
I obtained logs concerning realm :
juin 10, 2021 1:07:14 PM org.apache.catalina.realm.DataSourceRealm open
SEVERE: Exception lors de l'anthentification
java.lang.NullPointerException: Cannot invoke "String.length()" because "n" is null
at java.naming/javax.naming.NameImpl.<init>(NameImpl.java:283)
at java.naming/javax.naming.CompositeName.<init>(CompositeName.java:237)
at org.apache.naming.NamingContext.lookup(NamingContext.java:174)
at org.apache.catalina.realm.DataSourceRealm.open(DataSourceRealm.java:385)
at org.apache.catalina.realm.DataSourceRealm.authenticate(DataSourceRealm.java:255)
at org.apache.catalina.authenticator.FormAuthenticator.doAuthenticate(FormAuthenticator.java:244)
at org.apache.catalina.authenticator.AuthenticatorBase.invoke(AuthenticatorBase.java:633)
at org.apache.catalina.core.StandardHostValve.invoke(StandardHostValve.java:127)
at org.apache.catalina.valves.ErrorReportValve.invoke(ErrorReportValve.java:92)
at org.apache.catalina.valves.AbstractAccessLogValve.invoke(AbstractAccessLogValve.java:690)
at org.apache.catalina.core.StandardEngineValve.invoke(StandardEngineValve.java:78)
at org.apache.catalina.connector.CoyoteAdapter.service(CoyoteAdapter.java:353)
at org.apache.coyote.http11.Http11Processor.service(Http11Processor.java:374)
at org.apache.coyote.AbstractProcessorLight.process(AbstractProcessorLight.java:65)
at org.apache.coyote.AbstractProtocol$ConnectionHandler.process(AbstractProtocol.java:870)
at org.apache.tomcat.util.net.NioEndpoint$SocketProcessor.doRun(NioEndpoint.java:1696)
at org.apache.tomcat.util.net.SocketProcessorBase.run(SocketProcessorBase.java:49)
at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1130)
at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:630)
at org.apache.tomcat.util.threads.TaskThread$WrappingRunnable.run(TaskThread.java:61)
at java.base/java.lang.Thread.run(Thread.java:832)
As Piotr P. Karwasz said it, I misspelled dataSourceName in context.xml and server.xml file. I feel bad that I didn't notice it.
But I still have one question : In which document should I put the realm tag ?