We have a large quantity of data that we store in Redis. Actually, we have a large number of keys that we store in Redis and a tiny amount of data associated with each key. The keys are eight bytes long and the data is 8 bytes long (a long value). There are 1 billion keys (yes, billion).
Given the structure of Redis storage, as far as I can find out (https://redislabs.com/blog/redis-ram-ramifications-part-i/ and https://github.com/antirez/sds/blob/master/README.md) given 8 bytes of key there is overhead of 8 bytes for the header and 1 byte for the null at the end of the key. That is 17 bytes. Assuming this rounds up to at least 24 bytes, adding in the long value of 8 bytes gives 32 bytes.
A billion keys would be 32GB. The measured usage is 158GB. There is, of course, overhead but 5:1 ratio seems large. Can anyone explain this or point to ways to reduce memory usage.
I have included my test program based on Jedis.
import java.security.SecureRandom;
import java.text.DecimalFormat;
import java.util.Date;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import org.apache.commons.pool2.impl.GenericObjectPoolConfig;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.JedisCluster;
import redis.clients.jedis.exceptions.JedisClusterMaxRedirectionsException;
public class Test8byteKeys {
protected static JedisCluster cluster = null;
protected static final ExecutorService executor;
protected static volatile boolean shuttingDown = false;
private static final int AVAILABLE_PROCESSORS = Runtime.getRuntime().availableProcessors();
static {
final int cores = Math.max(4, (AVAILABLE_PROCESSORS * 3) / 4);
executor = new ThreadPoolExecutor(cores, cores, //
15, TimeUnit.SECONDS, //
new LinkedBlockingQueue<>(cores), //
new ThreadPoolExecutor.CallerRunsPolicy());
System.out.println("Running with " + cores + " threads");
}
static private GenericObjectPoolConfig getPoolConfiguration() {
GenericObjectPoolConfig poolConfig = new GenericObjectPoolConfig();
poolConfig.setLifo(true);
poolConfig.setTestOnBorrow(true);
poolConfig.setTestOnReturn(false);
poolConfig.setBlockWhenExhausted(true);
poolConfig.setMinIdle(1);
poolConfig.setMaxTotal(101);
poolConfig.setTestWhileIdle(false);
poolConfig.setSoftMinEvictableIdleTimeMillis(3000L);
poolConfig.setNumTestsPerEvictionRun(5);
poolConfig.setTimeBetweenEvictionRunsMillis(5000L);
poolConfig.setJmxEnabled(true);
return poolConfig;
}
private static void connectToCluster() {
try {
Set<HostAndPort> nodes = new HashSet<>();
String hap /* host and port */ = System.getProperty("hap", null);
if (hap == null) {
System.err.println("You must supply the host and port of a master in the cluster on the command line");
System.err.println("java -Dhap=<host:port> -jar <jar> ");
System.exit(1);
}
String[] parts = hap.split(":"); // assume ipv4 address
nodes.add(new HostAndPort(parts[0].trim(), Integer.valueOf(parts[1].trim())));
System.out.println("Connecting to " + hap);
cluster = new JedisCluster(nodes, getPoolConfiguration());
}
catch (Exception e) {
System.err.println("Could not connect to redis -- " + e.getMessage());
System.exit(1);
}
}
private static final Thread shutdown = new Thread(new Runnable() {
// Clean up at exit
#Override
public void run() {
shuttingDown = true;
System.out.println((new Date()).toString() + "\t" + "Executor shutdown in progress");
try {
executor.shutdown();
executor.awaitTermination(10L, TimeUnit.SECONDS);
}
catch (Exception e) {
// ignore
}
finally {
try {
if (!executor.isShutdown()) {
executor.shutdownNow();
}
}
catch (Exception e) {
//ignore
}
}
try {
cluster.close();
}
catch (Exception e) {
System.err.println("cluster disconnection failure: " + e);
}
finally {
//
}
System.out.println((new Date()).toString() + "\t" + "shutdown complete.");
}
});
final static char[] CHARACTERS = { //
'0', '1', '2', '3', '4', '5', //
'6', '7', '8', '9', 'a', 'b', //
'c', 'd', 'e', 'f', 'g', 'h', //
'i', 'j', 'k', 'l', 'm', 'n', //
'o', 'p', 'q', 'r', 's', 't', //
'u', 'v', 'w', 'x', 'y', 'z', //
'A', 'B', 'C', 'D', 'E', 'F', //
'G', 'H', 'I', 'J', 'K', 'L', //
'M', 'N', 'O', 'P', 'Q', 'R', //
'S', 'T', 'U', 'V', 'W', 'X', //
'Y', 'Z', '#', '#' //
};
protected final static byte[] KEY_EXISTS_MARKER = { '1' };
static class Runner implements Runnable {
private byte[] key = null;
public Runner(byte[] key) {
this.key = key;
}
#Override
public void run() {
if (!shuttingDown) {
try {
cluster.set(key, KEY_EXISTS_MARKER);
cluster.expire(key, 60 * 60 * 48); // for test purposes, only keep around for 2 days
}
catch (JedisClusterMaxRedirectionsException e) {
System.err.println(
(new Date()).toString() + "\tIGNORING\t" + e + "\t" + "For key " + new String(key));
}
catch (Exception e) {
System.err.println((new Date()).toString() + "\t" + e + "\t" + "For key " + new String(key));
e.printStackTrace();
System.exit(1);
}
}
}
}
public static void main(String[] args) {
SecureRandom random = new SecureRandom();
DecimalFormat decimal = new DecimalFormat("#,##0");
final byte[] randomBytes = new byte[8];
connectToCluster();
Runtime.getRuntime().addShutdownHook(shutdown);
System.out.println((new Date()) + " Starting test");
for (int i = 0; i < 1000000000; i++) {
random.nextBytes(randomBytes);
final byte[] key = new byte[8];
for (int j = 0; j < randomBytes.length; j++)
key[j] = (byte) (CHARACTERS[((randomBytes[j] & 0xFF)) % CHARACTERS.length] & 0xFF);
try {
if (shuttingDown) {
System.err.println((new Date()).toString() + "\t" + "Main loop terminating due to shutdown");
break;
}
if (i % 1000000 == 0)
System.out.println((new Date()).toString() + "\t" + decimal.format(i));
try {
executor.submit(new Runner(key));
}
catch (Exception e) {
System.err.println((new Date()).toString() + "\t" + e);
}
}
catch (Exception e) {
System.err.println("Failed to set key " + new String(key) + " -- " + e);
}
}
if (!shuttingDown) {
System.out.println((new Date()) + " Done");
System.exit(0);
}
}
}
Virtually every memory manager will have internal overhead for every object you allocate, simply to track the object. eg: when you call free(), the memory manager might need the some info about the object to determine which memory pool/page to which it belongs. Small objects might fall into one pool and use a different allocation mechanism than larger objects.
Very similar to how Redis sds.c/sds.h works, the heap manager usually also adds it's own structure to every malloc()'d object.
If your heap has an overhead of 16 bytes per object, then adding this to each 10KB malloc() would be an imperceptible overhead. However, if you're talking about 8 byte keys in Redis, then adding 16 bytes of overhead for each 8-byte key would exceed the memory of the keys themselves.
You can find a bit more info about malloc chunks and fastbins here:
http://iarchsys.com/?p=764
A quick and dirty check of this overhead would be to increase your keys from 8 bytes to 16. Although you're doubling the size of memory used by the keys, you will probably not see a doubling of the memory consumed by the Redis process.
This requires deeper analysis, but one thing that's obvious is that the overhead calculation is wrong (probably my fault for not completing the blog series - sorry ;)).
Every key in Redis, regardless its type/name/value, has an overhead. The overhead, IIRC, for v3.2.10 was about 70 bytes. However, that overhead was measured on smaller datasets (much less than 1B keys) and if I'm not mistaken a bigger global dictionary will incur more overhead per key. Add to that the value itself and its string overhead, and you get to 80 bytes easily and about 80GB in toto.
That said, I can't explain the x2 factor without actually recreating this in a lab. It could be that the cluster has additional overheads that need to be considered. I recommend that you begin with a smaller data set and compare the standalone vs. cluster memory usage as your next step in investigating this. Also, you may want to test against the latest version of Redis (4) as it includes several memory-usage related optimizations.
You should consider partitioning Ur redis instance into multiple instances
Related
I have a java code to submit long SMS to SMPP but while excecution I'm getting "length must be less than or equal to 254. Actual length is 270" error. When using a lengthy string or any arabic characters.
Can anyone help me to identify the cause and suggest me how to fix the problem.
Below is the code that I'm trying.
import java.io.IOException;
import java.util.Date;
import java.util.Random;
import org.jsmpp.InvalidResponseException;
import org.jsmpp.PDUException;
import org.jsmpp.bean.Alphabet;
import org.jsmpp.bean.BindType;
import org.jsmpp.bean.ESMClass;
import org.jsmpp.bean.GeneralDataCoding;
import org.jsmpp.bean.MessageClass;
import org.jsmpp.bean.NumberingPlanIndicator;
import org.jsmpp.bean.OptionalParameter;
import org.jsmpp.bean.OptionalParameters;
import org.jsmpp.bean.RegisteredDelivery;
import org.jsmpp.bean.SMSCDeliveryReceipt;
import org.jsmpp.bean.TypeOfNumber;
import org.jsmpp.extra.NegativeResponseException;
import org.jsmpp.extra.ResponseTimeoutException;
import org.jsmpp.session.BindParameter;
import org.jsmpp.session.SMPPSession;
import org.jsmpp.util.AbsoluteTimeFormatter;
import org.jsmpp.util.TimeFormatter;
public class SendLongSMSMessage
{
private static TimeFormatter timeFormatter = new AbsoluteTimeFormatter();
public String[] submitLongSMS(String MSISDN, String senderAddr, String message) throws Exception
{
SMPPSession session = getSession();
String[] msgId = null;
int splitSize = 135;
int totalSize = 140;
int totalSegments = 0;
RegisteredDelivery registeredDelivery = new RegisteredDelivery(SMSCDeliveryReceipt.DEFAULT);
GeneralDataCoding dataCoding = new GeneralDataCoding(false, false, MessageClass.CLASS1,
Alphabet.ALPHA_8_BIT);
ESMClass esmClass = new ESMClass();
if (message != null && message.length() > totalSize)
{
totalSegments = getTotalSegmentsForTextMessage(message);
}
Random random = new Random();
OptionalParameter sarMsgRefNum = OptionalParameters.newSarMsgRefNum((short) random.nextInt());
OptionalParameter sarTotalSegments = OptionalParameters.newSarTotalSegments(totalSegments);
String[] segmentData = splitIntoStringArray(message, splitSize, totalSegments);
msgId = new String[totalSegments];
for (int i = 0, seqNum = 0; i < totalSegments; i++)
{
seqNum = i + 1;
OptionalParameter sarSegmentSeqnum = OptionalParameters.newSarSegmentSeqnum(seqNum);
try
{ byte[] byteText = segmentData[i].getBytes("UTF-16BE");
msgId[i] = session.submitShortMessage("", TypeOfNumber.NATIONAL,
NumberingPlanIndicator.ISDN, "9999999999", TypeOfNumber.NATIONAL,
NumberingPlanIndicator.ISDN, MSISDN, esmClass, (byte) 0, (byte) 0, timeFormatter
.format(new Date()), null, registeredDelivery, (byte) 0, dataCoding, (byte) 0, byteText, sarMsgRefNum, sarSegmentSeqnum, sarTotalSegments);
System.out.println("Message id for segment " + seqNum + " out of totalsegment "
+ totalSegments + "is" + msgId[i]);
}
catch (PDUException e)
{
System.out.println("PDUException has occured" + e.getMessage());
}
catch (ResponseTimeoutException e)
{
System.out.println("ResponseTimeoutException has occured" + e.getMessage());
}
catch (InvalidResponseException e)
{
System.out.println("InvalidResponseException has occured" + e.getMessage());
}
catch (NegativeResponseException e)
{
System.out.println("NegativeResponseException has occured" + e.getMessage());
}
catch (IOException e)
{
System.out.println("IOException has occured" + e.getMessage());
}
}
session.unbindAndClose();
return msgId;
}
private SMPPSession getSession() throws Exception
{
return newSession();
}
private SMPPSession newSession() throws Exception
{
BindParameter bindParam = new BindParameter(BindType.BIND_TX, "<user_name>", "<pass_word>", "tdd",
TypeOfNumber.UNKNOWN, NumberingPlanIndicator.UNKNOWN, null);
return new SMPPSession("17.1.1.1", 6666, bindParam);
}
public int getTotalSegmentsForTextMessage(String message)
{
int splitPos = 135;
int totalsegments = 1;
if (message.length() > splitPos)
{
totalsegments = (message.length() / splitPos) + ((message.length() % splitPos > 0) ? 1 : 0);
}
return totalsegments;
}
public String[] splitIntoStringArray(String msg, int pos, int totalSegments)
{
String[] segmentData = new String[totalSegments];
if (totalSegments > 1)
{
int splitPos = pos;
int startIndex = 0;
segmentData[startIndex] = new String();
segmentData[startIndex] = msg.substring(startIndex, splitPos);
for (int i = 1; i < totalSegments; i++)
{
segmentData[i] = new String();
startIndex = splitPos;
if (msg.length() - startIndex <= pos)
{
segmentData[i] = msg.substring(startIndex, msg.length());
}
else
{
splitPos = startIndex + pos;
segmentData[i] = msg.substring(startIndex, splitPos);
}
}
}
return segmentData;
}
public static void main(String[] args) throws Exception
{
SendLongSMSMessage slSMS = new SendLongSMSMessage();
String message = "Tech Dive heralds the arrival of a community of Developers "
+ "who share, collaborate and exchange ideas, concepts, technical know-how. "
+ "This forum lets you take a deep dive in technical topics that are hot and happening as well as on legacy systems."
+ "The idea of the forum is to ensure collaboration amongst developers through exchange of ideas/concepts "
+ "so their technical skills are enhanced."
+ "We plan to bring in experienced professionals on board so content/blog written is authentic and precise."
+ "Come, join us and be a part of new way of collaboration!";
String MSISDN = "9500000000";
String senderAddr = "8500000000";
slSMS.submitLongSMS(MSISDN, senderAddr, message);
}
}
The best source to solve these kinds of problems is to use SMPP official documentation:
https://smpp.org/SMPP_v3_4_Issue1_2.pdf
To send SubmitSm with long messages, you need to use optional_parameter called message_payload instead of common short_message parameter.
You can read this information in documentation too:
The maximum message length which can be specified in sm_length field
is 254 octets. If an ESME wishes to submit a message of length greater
than 254 octets, the sm_length field must be set to NULL and the
message_payload optional parameter must be populated with the message
length value and user data.
To solve your problem, you need to check each time you are sending a message, how many bytes are in it, and if it is more than 254, add message_payload as your optional_parameter instead of short_message.
With cloudhopper library you can do it like this :
if (length > 254) {
submitSm.setOptionalParameter(new Tlv(
SmppConstants.TAG_MESSAGE_PAYLOAD,
CharsetUtil.encode(messageBody, CharsetUtil.CHARSET_UCS_2),
"message_payload"));
} else {
submitSm.setShortMessage(CharsetUtil.encode(messageBody, CharsetUtil.CHARSET_UCS_2));
}
so I'm trying to see what's the fastest way to write to Chronicle Queue in a multithread env so I have the following:
public static void main(String[] args) throws Exception{
final String path = args[0];
int times = Integer.parseInt(args[1]);
int num = Integer.parseInt(args[2]);
AtomicInteger nextid = new AtomicInteger(0);
ThreadLocal<Integer> id = ThreadLocal.withInitial(() -> nextid.getAndIncrement());
ChronicleTest test = new ChronicleTest();
ChronicleWriter writer = test.new ChronicleWriter(path);
CountDownLatch start = new CountDownLatch(1);
CountDownLatch done = new CountDownLatch(num);
Thread[] threads = new Thread[num];
long[] samples = new long[times * num];
for (int i = 0; i < num; i ++) {
threads[i] = new Thread(new Runnable() {
#Override
public void run() {
try {
start.await();
for (int i = 0; i < times; i++) {
int j = i + times*id.get().intValue();
long s = System.nanoTime();
writer.write(j + " DGirr5JgGVmxhvmaoO0c5MVVOUIEJxWa6nVStPnqmRl3T4hKE9tiwNjn6322uhgr2Fs4hDG8aKYvIB4O0733fx18EqGqNsshaSKoouky5ZekGK3vO87nfSUOz6uDD0olOp35QJQKPgr7tNlFgQP7BImcCyMPFCCm3yhSvOUgiVAD9W9BC3cqlKjQebOG4EkqzRIzwZjxbnIeK2YttfrvOvUJs0e9WBhXUVibi5Ks2j9ROQu2q0PJII4NYyN1a5YW2UKxyng3bRrtBVFqMSamtFzJ23EE4Y7rbQyeCVJhIKRM1LRvcGLUYZqKICWwDtOjGcbXUIlLLYiJcnVRZ4gNRvbFXvTL4XDjhD3uP5S8DjnkeAIBZcQ4VEUf30x65pTGLhWjOMV6jtiEQOWKB3nsuPMhcS1lP3wTQztViW7T8IsQlA5kvVAsnT5A7pojS1CffcYz4a2Rwqf9w6mhTPPZXgpDGArtThW3a69rwjsQxEY9aTwi0Pu0jlSAMmvOA158QFsSeJvLoJXILfocgjNEkj7iVcO0Rc6eC6b5EhJU3wv80EEHnORMXpQVuAuPyao7vJsx06TMcXf9t7Py4qxplVPhptIzrKs2qke2t5A8O4LQzq19OfEQsQGEjqHSbnfWXjfuntuFR8rV4VMyLZO1z3K7HmHtCEy14p5u0C0lj7vmtCnrOum0bnG2MwaAR7DJPIpOtwRObli5K5grv54AWnJyagpRX5e3eTEA8NAKO8oDZuLaoCvgavv9ciFrJcIDmyleVweiVrHs1fQXJoELzFpH4BmvzBuUjfZ8ORSIZsVuq4Hpls19GIA8opb1mSBt7MTifLPauo8WDWRoNi9DvjL4Z08Je6DvrhAFXasU2CMugg5EZ06OXexU17qnvxx2Vz9s9E5U50jDemySZ78KcZ6nqhQKIXUrkTktoEan2JXxI2NNSqEYifwly8ZO2MDquPe4J11rAcOqYp9y6Kb4NtFpNysM1evrLPvCx8oe");
long e = System.nanoTime();
samples[j] = e - s;
}
done.countDown();
} catch (Exception e){
e.printStackTrace();
}
}
});
}
for (int i = 0; i < num; i ++) {
try {
threads[i].start();
} catch (Exception e){
}
}
long startT = System.currentTimeMillis();
start.countDown();
done.await();
long endT = System.currentTimeMillis();
System.out.println("Time to complete [" + times + "] iteration in [" + (endT - startT) + " ms] and threads [" + num + "]");
System.out.println("#######");
for (int i = 0; i < times * num; i ++){
System.out.println(samples[i]);
}
}
private class ChronicleWriter {
SingleChronicleQueue m_cqueue;
ThreadLocal<ExcerptAppender> m_appender;
ChronicleWriter(String path ) {
m_cqueue = SingleChronicleQueueBuilder.binary(path).build();
m_appender = new ThreadLocal<ExcerptAppender>() {
protected ExcerptAppender initialValue() {
return m_cqueue.acquireAppender();
}
};
}
void write(String msg){
m_appender.get().writeText(msg);
}
}
And I ran with parameters:
path 2500 40
For some reason, this keeps crashing with core dump. What am I doing wrong? My disk has lots of disk space so that shouldn't matter. Thanks!!
If your program is crashing due to OutOfMemory error then
note that the disk space and the actual space used by the program may differ.
You may need to increase jvm heap size.
Refer below link to increase jvm heap size
What are the Xms and Xmx parameters when starting JVMs?
Or
Refer below link if you are running your program through eclipse
http://www.planetofbits.com/eclipse/increase-jvm-heap-size-in-eclipse/
I have tried your program with following version of chronicle-queue and it works fine.
<dependency>
<groupId>net.openhft</groupId>
<artifactId>chronicle-queue</artifactId>
<version>4.5.14</version>
</dependency>
I am using Rabbitmq 3.7.3 with Java client 5.1.2 [amqp-client-5.1.2.jar] for priority queue. In my usecase I will be having maximum of 60 priorities in a single non persistence queue where only few upto 10-15 will be mostly used.
Case 1. If I have defined a queue to have 10 priorities, and messages ranging from 0 to 9 priorities are pushed to the queue, I am getting 12500 writes per second.
Case 2. If I have defined a queue to have 60 priorities, and messages ranging from 0-9 priorities are pushed to the queue, I am getting 4200 writes per second.
Case 3: If I have defined a queue to have 250 priorities, and messages ranging from 0-9 priorities are pushed to the queue, I am getting only 1500 writes per second.
What observed here is, as and when we increase the priorities capacity of a queue, though only very few being used, the write performance degrades.
Below is the code snippet: [Writes are done using single thread]
import com.rabbitmq.client.ConnectionFactory;
import com.rabbitmq.client.Connection;
import com.rabbitmq.client.Channel;
import com.rabbitmq.client.AMQP.BasicProperties;
import java.io.*;
import java.util.*;
public class Write implements Runnable{
int start;
int count;
int end;
private long unixTime;
private long timeTaken;
private long totalTimeTaken;
private long totalRequests;
private long minLatency;
private long maxLatency;
private double avgLatency;
private FileOutputStream out;
int parity = 1;
int ndnc = 0;
int mnp = 1001;
int blist = 0;
String value;
ConnectionFactory factory;
Connection connection;
Channel channel;
String QUEUE_NAME;
public Write(int s, int c){
this.start = s;
this.count = c;
this.end = this.count;
this.totalTimeTaken = 0;
this.totalRequests = 0;
this.minLatency = 1000000;
this.maxLatency = 0;
try{
this.QUEUE_NAME = "queuedata_4";
this.factory = new ConnectionFactory();
factory.setHost("192.168.1.100");
factory.setUsername("user");
factory.setPassword("pass");
this.connection = factory.newConnection();
this.channel = this.connection.createChannel();
Map<String, Object> args = new HashMap<String, Object>();
args.put("x-max-priority", 60);
this.channel.queueDeclare(QUEUE_NAME, false, false, false, args);
}catch(Exception e){
System.out.println("Create Exception"+e);
}
}
public void run(){
String message;
byte[] data = null;
for(int k=this.start; k<=(this.end); k++){
message = "Message_" + k;
unixTime = System.nanoTime();
try{
this.channel.basicPublish(
"",
this.QUEUE_NAME,
new BasicProperties.Builder()
.deliveryMode(1)
.priority(k%10+1)
.build(),
message.getBytes("UTF-8")
);
}catch(Exception e){
System.out.println("New connection made"+e);
}
timeTaken = System.nanoTime() - unixTime;
totalTimeTaken += timeTaken;
if(timeTaken < minLatency){
minLatency = timeTaken;
}
if(timeTaken > maxLatency){
maxLatency = timeTaken;
}
totalRequests ++;
}
avgLatency = totalTimeTaken / totalRequests;
System.out.println("TotalReqs:" + totalRequests + "
TotalTime:" + ((float)totalTimeTaken/1000000.0) + "
MinLatency:" + ((float)minLatency/1000000.0) + " MaxLatency:"
+ ((float)maxLatency/1000000.0) + " AvgLatency:" +
((float)avgLatency/1000000.0));
try{
channel.close();
connection.close();
}catch(Exception e){
System.out.println("Close Exception");
}
}
}
i'm trying to learn Redis through Redisson. Here is my code to insert into redis using multiple threads.
package redisson
import java.io.File;
import java.util.concurrent.atomic.AtomicInteger;
import org.redisson.Redisson;
import org.redisson.api.RBatch;
import org.redisson.api.RMap;
import org.redisson.api.RedissonClient;
import org.redisson.config.Config;
public class RedisTest extends Thread {
static RMap<String, String> dMap = null;
static RMap<String, String> wMap = null;
static RMap<String, String> mMap = null;
static RedissonClient redisson = null;
public static void main(String[] args) throws Exception {
Config config = Config.fromJSON(new File("C:\\Users\\neon-workspace\\RedisProject\\src\\main\\resources\\SingleNodeConfig.json"));
RedissonClient redisson = Redisson.create(config);
dMap = redisson.getMap("Daily");
wMap = redisson.getMap("Weekly");
mMap = redisson.getMap("Monthly");
connectHbse(dMap,wMap,mMap,redisson);
redisson.shutdown();
}
public static void connectHbse(RMap<String, String> dMap,RMap<String, String> wMap,RMap<String, String> mMap,RedissonClient redisson) {
int totalSize=500000;
int totalThread=2;
int chunkSize = totalSize/totalThread;
AtomicInteger total = new AtomicInteger(chunkSize);
RedisTest test1[] = new RedisTest[totalThread];
for (int i = 0; i < test1.length; i++) {
test1[i] = new RedisTest(total,dMap,wMap,mMap,redisson);
total.set(total.intValue()+chunkSize);
}
long t1 = System.currentTimeMillis();
for (int i = 0; i < test1.length; i++) {
test1[i].start();
}
try {
for (int i = 0; i < test1.length; i++) {
test1[i].join();
}
} catch (InterruptedException e) {
e.printStackTrace();
}
System.out.println("Final Total Time Taken ::>>>>>>>>>>>>>>>>> " + ((System.currentTimeMillis() - t1))+"ms");
}
private AtomicInteger total = null;
public RedisTest(AtomicInteger total,RMap<String, String> dMap,RMap<String, String> wMap,RMap<String, String> mMap,RedissonClient redisson) {
this.total = new AtomicInteger(total.intValue());
this.dMap = dMap;
this.wMap = wMap;
this.mMap = mMap;
this.redisson = redisson;
}
public static int getRandomInteger(int maximum, int minimum) {
return ((int) (Math.random() * (maximum - minimum))) + minimum;
}
public void run() {
try {
long t1 = System.currentTimeMillis();
dMap.clear();
wMap.clear();
mMap.clear();
RBatch batch = redisson.createBatch();
for (;total.decrementAndGet()>=0;) {
String dvalue = ""+getRandomInteger(100,200);
String wvalue = "" +getRandomInteger(200, 300);
String mvalue = "" +getRandomInteger(300, 400);
batch.getMap("Daily").fastPutAsync(""+total.get(), dvalue);
batch.getMap("Weekly").fastPutAsync(""+total.get(), wvalue);
batch.getMap("Monthly").fastPutAsync(""+total.get(), mvalue);
synchronized (total) {
if(total.get()%100==0)
System.out.println(total.get()+" Records in Seconds:::::" + ((System.currentTimeMillis() - t1))/1000);
}
}
batch.execute();
System.out.println("Time Taken for completion::::: " + ((System.currentTimeMillis() - t1))+" by thread:::::"+Thread.currentThread().getName());
System.out.println("Done !!!");
} catch (Exception e) {
System.out.println("Done !!!" + e.getMessage());
e.printStackTrace();
} finally {
}
}
}
This code works fine until totalSize=400000.
When i put the totalSize=500000, its throwing the following exception.
io.netty.handler.codec.EncoderException: io.netty.util.internal.OutOfDirectMemoryError: failed to allocate 16777216 byte(s) of direct memory (used: 939524096, max: 954466304)
at io.netty.handler.codec.MessageToByteEncoder.write(MessageToByteEncoder.java:125)
at org.redisson.client.handler.CommandBatchEncoder.write(CommandBatchEncoder.java:45)
at io.netty.channel.AbstractChannelHandlerContext.invokeWrite0(AbstractChannelHandlerContext.java:738)
... 25 more
Caused by: io.netty.util.internal.OutOfDirectMemoryError: failed to allocate 16777216 byte(s) of direct memory (used: 939524096, max: 954466304)
at io.netty.util.internal.PlatformDependent.incrementMemoryCounter(PlatformDependent.java:627)
at io.netty.util.internal.PlatformDependent.allocateDirectNoCleaner(PlatformDependent.java:581)
at io.netty.buffer.PoolArena$DirectArena.allocateDirect(PoolArena.java:764)
at io.netty.buffer.PoolArena$DirectArena.newChunk(PoolArena.java:740)
at io.netty.buffer.PoolArena.allocateNormal(PoolArena.java:244)
at io.netty.buffer.PoolArena.allocate(PoolArena.java:226)
at io.netty.buffer.PoolArena.reallocate(PoolArena.java:397)
at io.netty.buffer.PooledByteBuf.capacity(PooledByteBuf.java:118)
at io.netty.buffer.AbstractByteBuf.ensureWritable0(AbstractByteBuf.java:285)
at io.netty.buffer.AbstractByteBuf.ensureWritable(AbstractByteBuf.java:265)
at io.netty.buffer.AbstractByteBuf.writeBytes(AbstractByteBuf.java:1046)
at io.netty.buffer.AbstractByteBuf.writeBytes(AbstractByteBuf.java:1054)
at org.redisson.client.handler.CommandEncoder.writeArgument(CommandEncoder.java:169)
at org.redisson.client.handler.CommandEncoder.encode(CommandEncoder.java:110)
at org.redisson.client.handler.CommandBatchEncoder.encode(CommandBatchEncoder.java:52)
at org.redisson.client.handler.CommandBatchEncoder.encode(CommandBatchEncoder.java:32)
at io.netty.handler.codec.MessageToByteEncoder.write(MessageToByteEncoder.java:107)
... 27 more
But i have about 7Gb ram free.
Can someone explain to me the reason i'm getting this exception?
It seems i should provide more memory to my JVM instance using -Xmx which solved the issue for me.
I have a GPS tracker with TK06a Chipset, and I have my own tcp listener, everything is working fine, I have received the data from the device with this format :
#355488020131775##1#0000#AUT#01#52500100232a47#10341.175280,E,121.322800,N,0.28,0.00#111113#171607.000##
I think i figured out what are these, (for example the first one is the IMEI), but I didn't know how to convert (10341.175280,E) and (121.322800,N) to something that google maps can understand.
beside the device has a poor user manual and no documentation for the protocol.
the real location should be in here (1.355269,103.686426) maybe this can lead you to solve this mystery :)
Thanks in advance.
Edit:
I Found this on the web, maybe some will find it useful :
The decode of the line above.
the IMEI number cannot be empty, if the SIM card number regarded as device series number, then the data of IMEI part should be filled in
SIM cad number.
SIM card number: this part can be empty , or also can be same as 1st point , fill in SIM card number.
0 or 1 , reserve (original meaning is ACC status )
Device password ( 0-9 numbers, digit cannot over 6 digits, generally is in 4 digits )
Reserved word AUT, cannot be changed .
Numbers of data, 00-99 , in 2 digits.
The format of Each data as below:
#base station number#Longitude, East and West identification, latitude,North and South identification,speed(nm), direction angle(0-360)#date#time
Base station number can be empty.
Longitude, format : dddff.ffff, the degree part must be in 3 integer, the minute part must be in 2 integer, the decimal part is in
4 digits, there is no separator between degree and minute.
East and West identification, only one character , E/W.
Latitude, format : ddff.ffff, same as Longitude , only the degree part is in 2 integer.
North and South identification, only one character , N/S.
Speed: can be 0.
Direction : can be 0.
Date, format : ddmmyy.
Time, format: hhnnss.mmm, the part before decimal point should be hour, minute and second in turn, each is in 2 digits, the part after
decimal point should be milliseconds, it can be 000.
This format is DM like in NMEA RMC message, but with a missing leading 0:
given longitude: 10341.175280 E
The first 3 digits are degrees: 103
Then the rest is minutes: 41.175280
This now is fomrat "DM" Degrees and decimal minutes.
Google uses "DEG" (Decimal degrees)
convert: 103 + 41.175280 / 60.0 = 103.686254
(DEG = degrees + minutes / 60.0)
wich fits perfectly to your location
Now it is a bit strange:
It should read "0121.322800" not "121.322800"
But then similar to above but since latitude is limited to two digits:
The first 2 digits are always degrees: 01
Then the rest is minutes: 21.322800
same formala as above: lat= 1 + 21.322800 / 60.0 = 1,35538
finally: if W or S, multiply the deg value with -1
(In your case it is N and E, so it stays as it is - positive)
This format looks partly like the NMEA RMC sentence
I think you want to make this work with OpenGTS.
So here what i done to work:(Note i dont need the tk10x devices so i overwrited the files, you can create another class if you want)
go to $GTS_HOME/src/org/opengts/servers/tk10x
and change the TrackServer.java with this code
I writed a new parseInsertFunction
package org.opengts.servers.tk10x;
import java.lang.*;
import java.util.*;
import java.io.*;
import java.net.*;
import java.sql.*;
import org.opengts.util.*;
import org.opengts.db.*;
import org.opengts.db.tables.*;
public class TrackServer
{
// ------------------------------------------------------------------------
// initialize runtime configuration
public static void configInit()
{
DCServerConfig dcs = Main.getServerConfig();
if (dcs != null) {
TrackServer.setTcpIdleTimeout( dcs.getTcpIdleTimeoutMS( Constants.TIMEOUT_TCP_IDLE ));
TrackServer.setTcpPacketTimeout( dcs.getTcpPacketTimeoutMS( Constants.TIMEOUT_TCP_PACKET ));
TrackServer.setTcpSessionTimeout(dcs.getTcpSessionTimeoutMS(Constants.TIMEOUT_TCP_SESSION));
TrackServer.setUdpIdleTimeout( dcs.getUdpIdleTimeoutMS( Constants.TIMEOUT_UDP_IDLE ));
TrackServer.setUdpPacketTimeout( dcs.getUdpPacketTimeoutMS( Constants.TIMEOUT_UDP_PACKET ));
TrackServer.setUdpSessionTimeout(dcs.getUdpSessionTimeoutMS(Constants.TIMEOUT_UDP_SESSION));
}
}
// ------------------------------------------------------------------------
// Start TrackServer (TrackServer is a singleton)
private static TrackServer trackServerInstance = null;
/* start TrackServer on array of ports */
public static TrackServer startTrackServer(int tcpPorts[], int udpPorts[], int commandPort)
throws Throwable
{
if (trackServerInstance == null) {
trackServerInstance = new TrackServer(tcpPorts, udpPorts, commandPort);
} else {
//Print.logError("TrackServer already initialized!");
}
return trackServerInstance;
}
public static TrackServer getTrackServer()
{
return trackServerInstance;
}
// ------------------------------------------------------------------------
// TCP Session timeouts
/* idle timeout */
private static long tcpTimeout_idle = Constants.TIMEOUT_TCP_IDLE;
public static void setTcpIdleTimeout(long timeout)
{
TrackServer.tcpTimeout_idle = timeout;
}
public static long getTcpIdleTimeout()
{
return TrackServer.tcpTimeout_idle;
}
/* inter-packet timeout */
private static long tcpTimeout_packet = Constants.TIMEOUT_TCP_PACKET;
public static void setTcpPacketTimeout(long timeout)
{
TrackServer.tcpTimeout_packet = timeout;
}
public static long getTcpPacketTimeout()
{
return TrackServer.tcpTimeout_packet;
}
/* total session timeout */
private static long tcpTimeout_session = Constants.TIMEOUT_TCP_SESSION;
public static void setTcpSessionTimeout(long timeout)
{
TrackServer.tcpTimeout_session = timeout;
}
public static long getTcpSessionTimeout()
{
return TrackServer.tcpTimeout_session;
}
// ------------------------------------------------------------------------
// UDP Session timeouts
/* idle timeout */
private static long udpTimeout_idle = Constants.TIMEOUT_UDP_IDLE;
public static void setUdpIdleTimeout(long timeout)
{
TrackServer.udpTimeout_idle = timeout;
}
public static long getUdpIdleTimeout()
{
return TrackServer.udpTimeout_idle;
}
/* inter-packet timeout */
private static long udpTimeout_packet = Constants.TIMEOUT_UDP_PACKET;
public static void setUdpPacketTimeout(long timeout)
{
TrackServer.udpTimeout_packet = timeout;
}
public static long getUdpPacketTimeout()
{
return TrackServer.udpTimeout_packet;
}
/* total session timeout */
private static long udpTimeout_session = Constants.TIMEOUT_UDP_SESSION;
public static void setUdpSessionTimeout(long timeout)
{
TrackServer.udpTimeout_session = timeout;
}
public static long getUdpSessionTimeout()
{
return TrackServer.udpTimeout_session;
}
// ------------------------------------------------------------------------
// ------------------------------------------------------------------------
// TCP port listener threads
private java.util.List<ServerSocketThread> tcpThread = new Vector<ServerSocketThread>();
// UDP port listener threads
private java.util.List<ServerSocketThread> udpThread = new Vector<ServerSocketThread>();
// Command port listener thread
private ServerSocketThread cmdThread = null;
private DatagramSocket udpSocket = null;
// ------------------------------------------------------------------------
/* private constructor */
private TrackServer(int tcpPorts[], int udpPorts[], int commandPort)
throws Throwable
{
int listeners = 0;
// Start TCP listeners
if (!ListTools.isEmpty(tcpPorts)) {
for (int i = 0; i < tcpPorts.length; i++) {
int port = tcpPorts[i];
if (ServerSocketThread.isValidPort(port)) {
try {
this._startTCP(port);
listeners++;
} catch (java.net.BindException be) {
Print.logError("TCP: Error binding to port: %d", port);
}
} else {
throw new Exception("TCP: Invalid port number: " + port);
}
}
}
// Start UDP listeners
if (!ListTools.isEmpty(udpPorts)) {
for (int i = 0; i < udpPorts.length; i++) {
int port = udpPorts[i];
if (ServerSocketThread.isValidPort(port)) {
try {
ServerSocketThread sst = this._startUDP(port);
if (this.udpSocket == null) {
this.udpSocket = sst.getDatagramSocket();
}
listeners++;
} catch (java.net.BindException be) {
Print.logError("UDP: Error binding to port: %d", port);
}
} else {
throw new Exception("UDP: Invalid port number: " + port);
}
}
}
/* do we have any active listeners? */
if (listeners <= 0) {
Print.logWarn("No active device communication listeners!");
}
}
// ------------------------------------------------------------------------
/* start TCP listener */
private void _startTCP(int port)
throws Throwable
{
ServerSocketThread sst = null;
/* create server socket */
try {
sst = new ServerSocketThread(port);
} catch (Throwable t) { // trap any server exception
Print.logException("ServerSocket error", t);
throw t;
}
/* initialize */
sst.setTextPackets(Constants.ASCII_PACKETS);
sst.setBackspaceChar(null); // no backspaces allowed
sst.setLineTerminatorChar(Constants.ASCII_LINE_TERMINATOR);
sst.setIgnoreChar(Constants.ASCII_IGNORE_CHARS);
sst.setMaximumPacketLength(Constants.MAX_PACKET_LENGTH);
sst.setMinimumPacketLength(Constants.MIN_PACKET_LENGTH);
sst.setIdleTimeout(TrackServer.tcpTimeout_idle); // time between packets
sst.setPacketTimeout(TrackServer.tcpTimeout_packet); // time from start of packet to packet completion
sst.setSessionTimeout(TrackServer.tcpTimeout_session); // time for entire session
sst.setTerminateOnTimeout(Constants.TERMINATE_ON_TIMEOUT);
sst.setClientPacketHandlerClass(TrackClientPacketHandler.class);
sst.setLingerTimeoutSec(Constants.LINGER_ON_CLOSE_SEC);
/* start thread */
Print.logInfo("Starting TCP listener thread on port " + port + " [timeout=" + sst.getSessionTimeout() + "ms] ...");
sst.start();
this.tcpThread.add(sst);
}
// ------------------------------------------------------------------------
/* start UDP listener */
private ServerSocketThread _startUDP(int port)
throws Throwable
{
ServerSocketThread sst = null;
/* create server socket */
try {
sst = new ServerSocketThread(ServerSocketThread.createDatagramSocket(port));
} catch (Throwable t) { // trap any server exception
Print.logException("ServerSocket error", t);
throw t;
}
/* initialize */
sst.setTextPackets(Constants.ASCII_PACKETS);
sst.setBackspaceChar(null); // no backspaces allowed
sst.setLineTerminatorChar(Constants.ASCII_LINE_TERMINATOR);
sst.setIgnoreChar(Constants.ASCII_IGNORE_CHARS);
sst.setMaximumPacketLength(Constants.MAX_PACKET_LENGTH);
sst.setMinimumPacketLength(Constants.MIN_PACKET_LENGTH);
sst.setIdleTimeout(TrackServer.udpTimeout_idle);
sst.setPacketTimeout(TrackServer.udpTimeout_packet);
sst.setSessionTimeout(TrackServer.udpTimeout_session);
sst.setTerminateOnTimeout(Constants.TERMINATE_ON_TIMEOUT);
sst.setClientPacketHandlerClass(TrackClientPacketHandler.class);
/* start thread */
Print.logInfo("Starting UDP listener thread on port " + port + " [timeout=" + sst.getSessionTimeout() + "ms] ...");
sst.start();
this.udpThread.add(sst);
return sst;
}
public DatagramSocket getUdpDatagramSocket()
{
return this.udpSocket;
}
// ------------------------------------------------------------------------
}`
and in Constant.java find 'ASCII_LINE_TERMINATOR[] ' constant declaration and add '000' to with
public static final int ASCII_LINE_TERMINATOR[] = new int[] {
// this list has been construction by observation of various data packets
0x00, 0xFF, 0xCE, '\0', '\n', '\r', ')', ';',000
};
after this
cd $GTS_HOME
ant tk10x
bin/runserver.sh -s tk10x
This should do the trick
And here is a link to the package i created
https://anonfiles.com/file/0aae22ccb3822618fb693cd667283b18