Exception thrown for large number of Vertx connecting to Redis - redis

Trying to simulate scenario for heavy load with Redis (default config only).
To keep it simple, when multi is issued immediately excute then close the connection.
import io.vertx.core.*;
import io.vertx.core.json.Json;
import io.vertx.redis.RedisClient;
import io.vertx.redis.RedisOptions;
import io.vertx.redis.RedisTransaction;
class MyVerticle extends AbstractVerticle {
private int index;
public MyVerticle(int index) {
this.index = index;
}
private void run2() {
RedisClient client = RedisClient.create(vertx, new RedisOptions().setHost("127.0.0.1"));
RedisTransaction tr = client.transaction();
tr.multi(ev2 -> {
if (ev2.succeeded()) {
tr.exec(ev3 -> {
if (ev3.succeeded()) {
tr.close(i -> {
if (i.failed()) {
System.out.println("FAIL TR CLOSE");
client.close(j -> {
if (j.failed()) {
System.out.println("FAIL CLOSE");
}
});
}
});
}
else {
System.out.println("FAIL EXEC");
tr.close(i -> {
if (i.failed()) {
System.out.println("FAIL TR CLOSE");
client.close(j -> {
if (j.failed()) {
System.out.println("FAIL CLOSE");
}
});
}
});
}
});
}
else {
System.out.println("FAIL MULTI");
tr.close(i -> {
if (i.failed()) {
client.close(j -> {
if (j.failed()) {
System.out.println("FAIL CLOSE");
}
});
}
});
}
});
}
#Override
public void start(Future<Void> startFuture) {
long timerID = vertx.setPeriodic(1, new Handler<Long>() {
public void handle(Long aLong) {
run2();
}
});
}
#Override
public void stop(Future stopFuture) throws Exception {
System.out.println("MyVerticle stopped!");
}
}
public class Periodic {
public static void main(String[] args) {
Vertx vertx = Vertx.vertx();
for (int i = 0; i < 8000; i++) {
vertx.deployVerticle(new MyVerticle(i));
}
}
}
Although connections are closed properly I still get warning errors.
All of them are thrown even before I put more logic within multi.
2017-06-20 16:29:49 WARNING io.netty.util.concurrent.DefaultPromise notifyListener0 An exception was thrown by io.vertx.core.net.impl.ChannelProvider$$Lambda$61/1899599620.operationComplete()
java.lang.IllegalStateException: Uh oh! Event loop context executing with wrong thread! Expected null got Thread[globalEventExecutor-1-2,5,main]
at io.vertx.core.impl.ContextImpl.lambda$wrapTask$2(ContextImpl.java:316)
at io.vertx.core.impl.ContextImpl.executeFromIO(ContextImpl.java:193)
at io.vertx.core.net.impl.NetClientImpl.failed(NetClientImpl.java:258)
at io.vertx.core.net.impl.NetClientImpl.lambda$connect$5(NetClientImpl.java:233)
at io.vertx.core.net.impl.ChannelProvider.lambda$connect$0(ChannelProvider.java:42)
at io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:507)
at io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:481)
at io.netty.util.concurrent.DefaultPromise.access$000(DefaultPromise.java:34)
at io.netty.util.concurrent.DefaultPromise$1.run(DefaultPromise.java:431)
at io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:233)
at io.netty.util.concurrent.DefaultThreadFactory$DefaultRunnableDecorator.run(DefaultThreadFactory.java:144)
at java.lang.Thread.run(Thread.java:745)
Is there a reason for this error ?

You'll continue to get errors, because you test the wrong things.
First of all, vertices are not fat coroutines. They are thin actors. Meaning creating 500 of them won't speed things up, but probably will slow everything down, because event loop still needs to switch between them.
Second, if you want to prepare for 2K concurrent requests, put your Vertx application on one machine, and run wrk or similar tool over the network.
Third, your Redis is also on the same machine. I hope that won't be the case in your production, since Redis will compete with Vertx over CPU.
Once everything is setup correctly, I believe that you'll be able to handle 10K requests quite easily. I've seen Vertx handle 8K requests on modest machines with PostgreSQL.

Related

boost::asio: how can I make some clients listen to server and other client read/write to server at the same time

I am a novice about boost::asio, I write a server, some clients can connect to it and keep listening.
class socket_server {
public:
~socket_server() { io_context.stop(); };
int server_process();
private:
boost::asio::io_context io_context;
};
int socket_server::server_process() {
try {
unlink("/var/run/socket");
server s(io_context, "/var/run/socket");
INFO("server_process, start run\n");
io_context.run();
} catch (std::exception &e) {
std::cerr << "Exception: " << e.what() << "\n";
}
return 0;
}
class server {
public:
server(boost::asio::io_context &io_context, const std::string &file)
: acceptor_(io_context, stream_protocol::endpoint(file)), socket_id_(0) {
do_accept();
}
private:
void do_accept();
stream_protocol::acceptor acceptor_;
int socket_id_;
};
void server::do_accept() {
INFO("do accept\n");
acceptor_.async_accept(
[this](std::error_code ec, stream_protocol::socket socket) {
if (!ec) {
INFO("new session create\n");
std::make_shared<session>(std::move(socket), socket_id_++)->start();
}
do_accept();
});
}
class session : public std::enable_shared_from_this<session> {
public:
session(stream_protocol::socket sock, int socket_id)
: socket_(std::move(sock)), socket_id_(socket_id) {}
~session() { socket_id_--; }
void start();
private:
void do_read();
void do_write(std::array<char, 1024> data);
int get_id() { return socket_id_; }
// The socket used to communicate with the client.
stream_protocol::socket socket_;
// Buffer used to store data received from the client.
std::array<char, 1024> data_;
int socket_id_;
};
void session::start() { do_read(); }
void session::do_read() {
INFO("in do_read\n");
auto self(shared_from_this());
socket_.async_read_some(
boost::asio::buffer(data_),
[this, self](std::error_code ec, std::size_t length) {
if (!ec) {
if (request.find("listen") != std::string::npos) {
std::unique_lock<std::mutex> lock(unsol_mutex);
unsol_cond.wait(lock)
do_write(get_unsol_data());
} else {
std::unique_lock<std::mutex> lock(send_mutex);
if (send_cond.wait_for(lock, std::chrono::seconds(2)) ==
std::cv_status::timeout) {
ERROR("response time out\n");
}
do_write(get_write_data());
}
}
});
}
In do_read(), I found when a client is listening (block in unsol_cond.wait(lock)), another client can not go to do_read().
Is it due to make_shared session? Is there a better implementation suggestion?
Thanks~
You're using blocking synchronization primitives in async code. That's an anti-pattern.
Firstly, as you noticed, the blocking operations will prevent the event loop from progressing.
Secondly, holding locks across async calls is often a bug (it doesn't guard the critical execution during execution of the async operation).
For simple integration with Asio proactor model, you can often
use a strand instead.
Under the hood, it will end up using mutexes, just like now, but only
if the concurrency model requires it. That mainly depends on the
execution context used and/or how many threads are running the
services.
Use a queue with a async send-chain. I have quite a few answers on this site that show you how to do that.
I would gladly demonstrate, but the code is too incomplete, and the naming doesn't really give me an idea what things mean ("listen"/"unsol"?, nothing ever signals those conditions so... hard to guess what they do in reality)

how to start api app on intellij without tomcat or any type of server?

I have an API app created using vert.x framework , i am able to build the application but not able to run . when i try to run the app , i automatically get redirected to the "cucumber.api.cli.main not found error" . I delete the automatic configuration but next time i try to run the app it gets generated . What is the configuration i should run on.
I have tried to research about this , but most of the questions and answers asks me to set up tom cat server or glass fish server which i don't want to do .
Here is my hello world application using IntelliJ Idea, vert.x -
Verticle :
import io.vertx.core.AbstractVerticle;
import io.vertx.core.Future;
import io.vertx.core.http.HttpServerResponse;
import io.vertx.ext.web.Router;
import static com.sun.xml.internal.ws.spi.db.BindingContextFactory.LOGGER;
public class MyVerticle extends AbstractVerticle {
#Override
public void start(Future<Void> startFuture) throws Exception {
Router router = Router.router(vertx);
router.route("/").handler(routingContext -> {
HttpServerResponse response = routingContext.response();
response.putHeader("content-type", "text/html")
.end("<h1> Hello Vert.x </h>");
});
vertx.createHttpServer().requestHandler(router::accept)
.listen(8070, http -> {
if (http.succeeded()) {
LOGGER.info("Started Server at port 8070");
} else {
startFuture.fail(http.cause());
}
});
vertx.createHttpServer().requestHandler(req -> {
req.response()
.putHeader("content-type", "text/plain")
.end("Hello from Vert.x!");
}).listen(8888, http -> {
if (http.succeeded()) {
startFuture.complete();
System.out.println("HTTP server started on port 8888");
} else {
startFuture.fail(http.cause());
}
});
router.route("/test").handler(routingContext -> {
HttpServerResponse response = routingContext.response();
response.putHeader("content-type","text/html")
.end("<h2> This is another end point with same port </h2>");
});
vertx.createHttpServer().requestHandler(router::accept).listen(8070,http ->{
if(http.succeeded()){
LOGGER.info("Another server started 8070");
}else{
startFuture.fail(http.cause());
}
});
}
#Override
public void stop() {
LOGGER.info("Shutting down application");
}
}
Main Method to deploy Verticle
import com.testproject.starter.verticles.MyVerticle;
import io.vertx.core.Vertx;
public class MyVerticleTest {
public static void main(String[] args) {
Vertx vertex = Vertx.vertx();
MyVerticle myVerticle = new MyVerticle();
vertex.deployVerticle(myVerticle);
}
}
Now you can follow the below URLs -
1. http://localhost:8888
2. http://localhost:8070/test
The application doesn't required to have tomcat to run.
Reference : https://vertx.io/docs/
Useful links - https://github.com/vert-x3/vertx-awesome

Accessing Context inside an ExchangeFilterFunction

For some reason a context inside the doAfterSuccessOrError method is not available (populated) from the upstream. I've tried to access it using Mono.subscriberContext() (see the snipped). I would expect to have it present but for some reason is not. Am I doing something wrong?
public class LoggingRequestExchangeFunction implements ExchangeFilterFunction {
private final Logger log = LoggerFactory.getLogger(getClass());
#Override
public Mono<ClientResponse> filter(ClientRequest request, ExchangeFunction next) {
long start = System.currentTimeMillis();
return next.exchange(request).doAfterSuccessOrError((res, ex) -> {
Mono.subscriberContext().map((ctx -> {
log.info("doAfterSuccessOrError Context {}",ctx);
// log req/res ...
return ctx;
})).subscribe();
}).subscriberContext( ctx -> {
log.info("SubscriberContext: {}" , ctx);
return ctx;
});
}
}
Here is a log output
23:16:59.426 INFO [reactor-http-epoll-2] .p.c.LoggingRequestExchangeFunction [] SubscriberContext: Context1{nexmo-tracing-context=TracingContext{{traceId=f04961da-933a-4d1d-85d5-3bea2c47432f, clientIp=N/A}}}
23:16:59.589 INFO [reactor-http-epoll-2] .p.c.LoggingRequestExchangeFunction [] doAfterSuccessOrError Context Context0{}
The reason is that you create a new Mono inside doAfterSuccessOrError which is independent from the original reactor chain since you subscribe to it separately.
If you just want to log something inside, your alternative is to use doOnEach operator which beside the signal type gives you access to the context as well.
Mono.just("hello")
.doOnEach((signal) ->
{
if (signal.isOnError() || signal.isOnComplete())
{
Context ctx = signal.getContext();
log.info("doAfterSuccessOrError Context {}",ctx);
// log req/res ...
}
})
.subscriberContext( ctx -> {
log.info("SubscriberContext: {}" , ctx);
return ctx;
})
.subscribe();

stop polling files when rabbitmq is down: spring integration

I'm working on a project where we are polling files from a sftp server and streaming it out into a object on the rabbitmq queue. Now when the rabbitmq is down it still polls and deletes the file from the server and losses the file while sending it on queue when rabbitmq is down. I'm using ExpressionEvaluatingRequestHandlerAdvice to remove the file on successful transformation. My code looks like this:
#Bean
public SessionFactory<ChannelSftp.LsEntry> sftpSessionFactory() {
DefaultSftpSessionFactory factory = new DefaultSftpSessionFactory(true);
factory.setHost(sftpProperties.getSftpHost());
factory.setPort(sftpProperties.getSftpPort());
factory.setUser(sftpProperties.getSftpPathUser());
factory.setPassword(sftpProperties.getSftpPathPassword());
factory.setAllowUnknownKeys(true);
return new CachingSessionFactory<>(factory);
}
#Bean
public SftpRemoteFileTemplate sftpRemoteFileTemplate() {
return new SftpRemoteFileTemplate(sftpSessionFactory());
}
#Bean
#InboundChannelAdapter(channel = TransformerChannel.TRANSFORMER_OUTPUT, autoStartup = "false",
poller = #Poller(value = "customPoller"))
public MessageSource<InputStream> sftpMessageSource() {
SftpStreamingMessageSource messageSource = new SftpStreamingMessageSource(sftpRemoteFileTemplate,
null);
messageSource.setRemoteDirectory(sftpProperties.getSftpDirPath());
messageSource.setFilter(new SftpPersistentAcceptOnceFileListFilter(new SimpleMetadataStore(),
"streaming"));
messageSource.setFilter(new SftpSimplePatternFileListFilter("*.txt"));
return messageSource;
}
#Bean
#Transformer(inputChannel = TransformerChannel.TRANSFORMER_OUTPUT,
outputChannel = SFTPOutputChannel.SFTP_OUTPUT,
adviceChain = "deleteAdvice")
public org.springframework.integration.transformer.Transformer transformer() {
return new SFTPTransformerService("UTF-8");
}
#Bean
public ExpressionEvaluatingRequestHandlerAdvice deleteAdvice() {
ExpressionEvaluatingRequestHandlerAdvice advice = new ExpressionEvaluatingRequestHandlerAdvice();
advice.setOnSuccessExpressionString(
"#sftpRemoteFileTemplate.remove(headers['file_remoteDirectory'] + headers['file_remoteFile'])");
advice.setPropagateEvaluationFailures(false);
return advice;
}
I don't want the files to get removed/polled from the remote sftp server when the rabbitmq server is down. How can i achieve this ?
UPDATE
Apologies for not mentioning that I'm using spring cloud stream rabbit binder. And here is the transformer service:
public class SFTPTransformerService extends StreamTransformer {
public SFTPTransformerService(String charset) {
super(charset);
}
#Override
protected Object doTransform(Message<?> message) throws Exception {
String fileName = message.getHeaders().get("file_remoteFile", String.class);
Object fileContents = super.doTransform(message);
return new customFileDTO(fileName, (String) fileContents);
}
}
UPDATE-2
I added TransactionSynchronizationFactory on the customPoller as suggested. Now it doesn't poll file when rabbit server is down, but when the server is up, it keeps on polling the same file over and over again!! I cannot figure it out why? I guess i cannot use PollerSpec cause im on 4.3.2 version.
#Bean(name = "customPoller")
public PollerMetadata pollerMetadataDTX(StartStopTrigger startStopTrigger,
CustomTriggerAdvice customTriggerAdvice) {
PollerMetadata pollerMetadata = new PollerMetadata();
pollerMetadata.setAdviceChain(Collections.singletonList(customTriggerAdvice));
pollerMetadata.setTrigger(startStopTrigger);
pollerMetadata.setMaxMessagesPerPoll(Long.valueOf(sftpProperties.getMaxMessagePoll()));
ExpressionEvaluatingTransactionSynchronizationProcessor syncProcessor =
new ExpressionEvaluatingTransactionSynchronizationProcessor();
syncProcessor.setBeanFactory(applicationContext.getAutowireCapableBeanFactory());
syncProcessor.setBeforeCommitChannel(
applicationContext.getBean(TransformerChannel.TRANSFORMER_OUTPUT, MessageChannel.class));
syncProcessor
.setAfterCommitChannel(
applicationContext.getBean(SFTPOutputChannel.SFTP_OUTPUT, MessageChannel.class));
syncProcessor.setAfterCommitExpression(new SpelExpressionParser().parseExpression(
"#sftpRemoteFileTemplate.remove(headers['file_remoteDirectory'] + headers['file_remoteFile'])"));
DefaultTransactionSynchronizationFactory defaultTransactionSynchronizationFactory =
new DefaultTransactionSynchronizationFactory(syncProcessor);
pollerMetadata.setTransactionSynchronizationFactory(defaultTransactionSynchronizationFactory);
return pollerMetadata;
}
I don't know if you need this info but my CustomTriggerAdvice and StartStopTrigger looks like this :
#Component
public class CustomTriggerAdvice extends AbstractMessageSourceAdvice {
#Autowired private StartStopTrigger startStopTrigger;
#Override
public boolean beforeReceive(MessageSource<?> source) {
return true;
}
#Override
public Message<?> afterReceive(Message<?> result, MessageSource<?> source) {
if (result == null) {
if (startStopTrigger.getStart()) {
startStopTrigger.stop();
}
} else {
if (!startStopTrigger.getStart()) {
startStopTrigger.stop();
}
}
return result;
}
}
public class StartStopTrigger implements Trigger {
private PeriodicTrigger startTrigger;
private boolean start;
public StartStopTrigger(PeriodicTrigger startTrigger, boolean start) {
this.startTrigger = startTrigger;
this.start = start;
}
#Override
public Date nextExecutionTime(TriggerContext triggerContext) {
if (!start) {
return null;
}
start = true;
return startTrigger.nextExecutionTime(triggerContext);
}
public void stop() {
start = false;
}
public void start() {
start = true;
}
public boolean getStart() {
return this.start;
}
}
Well, would be great to see what your SFTPTransformerService and determine how it is possible to perform an onSuccessExpression when there should be an exception in case of down broker.
You also should not only throw an exception do not perform delete, but consider to add a RequestHandlerRetryAdvice to re-send the file to the RabbitMQ: https://docs.spring.io/spring-integration/docs/5.0.6.RELEASE/reference/html/messaging-endpoints-chapter.html#retry-advice
UPDATE
So, well, since Gary guessed that you use Spring Cloud Stream to send message to the Rabbit Binder after your internal process (very sad that you didn't share that information originally), you need to take a look to the Binder error handling on the matter: https://docs.spring.io/spring-cloud-stream/docs/Elmhurst.RELEASE/reference/htmlsingle/#_retry_with_the_rabbitmq_binder
And that is true that ExpressionEvaluatingRequestHandlerAdvice is applied only for the SFTPTransformerService and nothing more. The downstream error (in the Binder) is not included in this process already.
UPDATE 2
Yeah... I think Gary is right, and we don't have choice unless configure a TransactionSynchronizationFactory on the customPoller level instead of that ExpressionEvaluatingRequestHandlerAdvice: ExpressionEvaluatingRequestHandlerAdvice .
The DefaultTransactionSynchronizationFactory can be configured with the ExpressionEvaluatingTransactionSynchronizationProcessor, which has similar goal as the mentioned ExpressionEvaluatingRequestHandlerAdvice, but on the transaction level which will include your process starting with the SFTP Channel Adapter and ending on the Rabbit Binder level with the send to AMQP attempts.
See Reference Manual for more information: https://docs.spring.io/spring-integration/reference/html/transactions.html#transaction-synchronization.
The point with the ExpressionEvaluatingRequestHandlerAdvice (and any AbstractRequestHandlerAdvice) that they have a boundary only around handleRequestMessage() method, therefore only during the component they are declared.

Load external properties files into EJB 3 app running on WebLogic 11

Am researching the best way to load external properties files from and EJB 3 app whose EAR file is deployed to WebLogic.
Was thinking about using an init servlet but I read somewhere that it would be too slow (e.g. my message handler might receive a message from my JMS queue before the init servlet runs).
Suppose I have multiple property files or one file here:
~/opt/conf/
So far, I feel that the best possible solution is by using a Web Logic application lifecycle event where the code to read the properties files during pre-start:
import weblogic.application.ApplicationLifecycleListener;
import weblogic.application.ApplicationLifecycleEvent;
public class MyListener extends ApplicationLifecycleListener {
public void preStart(ApplicationLifecycleEvent evt) {
// Load properties files
}
}
See: http://download.oracle.com/docs/cd/E13222_01/wls/docs90/programming/lifecycle.html
What would happen if the server is already running, would post start be a viable solution?
Can anyone think of any alternative ways that are better?
It really depends on how often you want the properties to be reloaded. One approach I have taken is to have a properties file wrapper (singleton) that has a configurable parameter that defines how often the files should be reloaded. I would then always read properties through that wrapper and it would reload the properties ever 15 minutes (similar to Log4J's ConfigureAndWatch). That way, if I wanted to, I can change properties without changing the state of a deployed application.
This also allows you to load properties from a database, instead of a file. That way you can have a level of confidence that properties are consistent across the nodes in a cluster and it reduces complexity associated with managing a config file for each node.
I prefer that over tying it to a lifecycle event. If you weren't ever going to change them, then make them static constants somewhere :)
Here is an example implementation to give you an idea:
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.*;
/**
* User: jeffrey.a.west
* Date: Jul 1, 2011
* Time: 8:43:55 AM
*/
public class ReloadingProperties
{
private final String lockObject = "LockMe";
private long lastLoadTime = 0;
private long reloadInterval;
private String filePath;
private Properties properties;
private static final Map<String, ReloadingProperties> instanceMap;
private static final long DEFAULT_RELOAD_INTERVAL = 1000 * 60 * 5;
public static void main(String[] args)
{
ReloadingProperties props = ReloadingProperties.getInstance("myProperties.properties");
System.out.println(props.getProperty("example"));
try
{
Thread.sleep(6000);
}
catch (InterruptedException e)
{
e.printStackTrace();
}
System.out.println(props.getProperty("example"));
}
static
{
instanceMap = new HashMap(31);
}
public static ReloadingProperties getInstance(String filePath)
{
ReloadingProperties instance = instanceMap.get(filePath);
if (instance == null)
{
instance = new ReloadingProperties(filePath, DEFAULT_RELOAD_INTERVAL);
synchronized (instanceMap)
{
instanceMap.put(filePath, instance);
}
}
return instance;
}
private ReloadingProperties(String filePath, long reloadInterval)
{
this.reloadInterval = reloadInterval;
this.filePath = filePath;
}
private void checkRefresh()
{
long currentTime = System.currentTimeMillis();
long sinceLastLoad = currentTime - lastLoadTime;
if (properties == null || sinceLastLoad > reloadInterval)
{
System.out.println("Reloading!");
lastLoadTime = System.currentTimeMillis();
Properties newProperties = new Properties();
FileInputStream fileIn = null;
synchronized (lockObject)
{
try
{
fileIn = new FileInputStream(filePath);
newProperties.load(fileIn);
}
catch (FileNotFoundException e)
{
e.printStackTrace();
}
catch (IOException e)
{
e.printStackTrace();
}
finally
{
if (fileIn != null)
{
try
{
fileIn.close();
}
catch (IOException e)
{
e.printStackTrace();
}
}
}
properties = newProperties;
}
}
}
public String getProperty(String key, String defaultValue)
{
checkRefresh();
return properties.getProperty(key, defaultValue);
}
public String getProperty(String key)
{
checkRefresh();
return properties.getProperty(key);
}
}
Figured it out...
See the corresponding / related post on Stack Overflow.