RabbitMQ won't start after power failure - crash
I'm running a RabbitMQ server instance on my machine. But after a power failure in the building I can't get Rabbit to start.
There is no other service running on the same TCP-port and I've tried to reinstall both rabbit and erlang but nothing seems to work.
I'm running:
CentOS release 5.4 (Final)
Rabbit rabbitmq-server-2.8.2-1.noarch
erlang-R14B-03.3.el5.x86_64
The startup_error log is empty but the startup_log contains:
{
error_logger,
{
{2012,5,10},
{9,35,43}
},
"Protocol: ~p: register error: ~p~n",
[
"inet_tcp",
{
{
badmatch,
{
error,
econnrefused
}
},
[
{
inet_tcp_dist,
listen,
1
},
{
net_kernel,
start_protos,
4
},
{
net_kernel,
start_protos,
3
},
{
net_kernel,
init_node,
2
},
{
net_kernel,
init,
1
},
{
gen_server,
init_it,
6
},
{
proc_lib,
init_p_do_apply,
3
}
]
}
]
}{
error_logger,
{
{2012,5,10},
{9,35,43}
},
crash_report,
[
[
{
initial_call,
{
net_kernel,
init,
[
'Argument__1'
]
}
},
{
pid,
<0.20.0>
},
{
registered_name,
[
]
},
{
error_info,
{
exit,
{
error,
badarg
},
[
{
gen_server,
init_it,
6
},
{
proc_lib,
init_p_do_apply,
3
}
]
}
},
{
ancestors,
[
net_sup,
kernel_sup,
<0.9.0>
]
},
{
messages,
[
]
},
{
links,
[
#Port<0.90>,
<0.17.0>
]
},
{
dictionary,
[
{
longnames,
false
}
]
},
{
trap_exit,
true
},
{
status,
running
},
{
heap_size,
610
},
{
stack_size,
24
},
{
reductions,
509
}
],
[
]
]
}{
error_logger,
{
{2012,5,10},
{9,35,43}
},
supervisor_report,
[
{
supervisor,
{
local,
net_sup
}
},
{
errorContext,
start_error
},
{
reason,
{
'EXIT',
nodistribution
}
},
{
offender,
[
{
pid,
undefined
},
{
name,
net_kernel
},
{
mfargs,
{
net_kernel,
start_link,
[
[
rabbitmqprelaunch4868,
shortnames
]
]
}
},
{
restart_type,
permanent
},
{
shutdown,
2000
},
{
child_type,
worker
}
]
}
]
}{
error_logger,
{
{
2012,
5,
10
},
{
9,
35,
43
}
},
supervisor_report,
[
{
supervisor,
{
local,
kernel_sup
}
},
{
errorContext,
start_error
},
{
reason,
shutdown
},
{
offender,
[
{
pid,
undefined
},
{
name,
net_sup
},
{
mfargs,
{
erl_distribution,
start_link,
[
]
}
},
{
restart_type,
permanent
},
{
shutdown,
infinity
},
{
child_type,
supervisor
}
]
}
]
}{
error_logger,
{
{
2012,
5,
10
},
{
9,
35,
43
}
},
std_info,
[
{
application,
kernel
},
{
exited,
{
shutdown,
{
kernel,
start,
[
normal,
[
]
]
}
}
},
{
type,
permanent
}
]
}{
"Kernel pid terminated",
application_controller,
"{application_start_failure,kernel,{shutdown,{kernel,start,[normal,[]]}}}"
}
Anyone got an idea?
econnrefused in the beginning clearly indicates that something's listening on the same port, netstat -plten could have shown what was it.
Related
MongoDB Lookup values based on dynamic field name
I'm pretty sure the below can be done, I'm struggling to understand how to do it in MongoDB. My data is structured like this (demo data): db={ "recipes": [ { "id": 1, "name": "flatbread pizza", "ingredients": { "1010": 1, "1020": 2, "1030": 200 } }, { "id": 2, "name": "cheese sandwich", "ingredients": { "1040": 1, "1050": 2 } } ], "ingredients": [ { "id": 1010, "name": "flatbread", "unit": "pieces" }, { "id": 1020, "name": "garlic", "unit": "clove" }, { "id": 1030, "name": "tomato sauce", "unit": "ml" }, { "id": 1040, "name": "bread", "unit": "slices" }, { "id": 1050, "name": "cheese", "unit": "slices" } ] } The output I'm trying to achieve would look like this: [ { "id": 1, "name": "flatbread pizza", “flatbread”: “1 pieces”, “garlic”: “2 cloves”, “tomato sauce”: “200 ml” }, { "id": 2, "name": "cheese sandwich", “bread”: “1 slices”, “cheese”: “2 slices” } ] I've tried several approaches, and I get stuck at the bit where I need to do a lookup based on the ingredient name (which actually is the id). I tried using $objectToArray to turn it into a k-v document, but then I get stuck in how to construct the lookup pipeline.
This is not a simple solution, and probably can be improved: db.recipes.aggregate([ { "$addFields": { ingredientsParts: { "$objectToArray": "$ingredients" } } }, { $unwind: "$ingredientsParts" }, { "$group": { _id: "$id", name: { $first: "$name" }, ingredientsParts: { $push: { v: "$ingredientsParts.v", id: { $toInt: "$ingredientsParts.k" } } } } }, { "$lookup": { "from": "ingredients", "localField": "ingredientsParts.id", "foreignField": "id", "as": "ingredients" } }, { $unwind: "$ingredients" }, { "$addFields": { "ingredientsPart": { "$filter": { input: "$ingredientsParts", as: "item", cond: { $eq: [ "$$item.id", "$ingredients.id" ] } } } } }, { $project: { ingredients: 1, ingredientsPart: { "$arrayElemAt": [ "$ingredientsPart", 0 ] }, name: 1 } }, { "$addFields": { units: { k: "$ingredients.name", v: { "$concat": [ { $toString: "$ingredientsPart.v" }, " ", "$ingredients.unit" ] } } } }, { $group: { _id: "$_id", name: { $first: "$name" }, units: { $push: "$units" } } }, { "$addFields": { "data": { "$arrayToObject": "$units" } } }, { "$addFields": { "data.id": "$_id", "data.name": "$name" } }, { "$replaceRoot": { "newRoot": "$data" } } ]) You can see it works here As rickhg12hs said, it can be modeled better.
Filter time range in Mongodb
I want to get all the documents that fall within a particular TIME in MongoDB. I have searched for this, but I can only find solutions for documents that fall within a particular DATE. For example, the SQL expression of what I am looking for would be: WHERE YEAR(date_time) = 2019 AND TIME(date_time) BETWEEN '07:30' AND '08:30'
Demo - https://mongoplayground.net/p/Ny5FCEiQkE7 Use $expr db.collection.aggregate([{ $match: { $expr: { $eq: [ { $year: "$dt" }, 2021 ] }, // match year $or: [ // or query { $and: [ // match hour 7 and minutes 30+ { $expr: { $eq: [ { "$hour": "$dt" }, 7 ] } }, { $expr: { $gte: [ { "$minute": "$dt" }, 30 ] } } ] }, { $and: [ // match hour 8 and minutes less than 30 { $expr: { $eq: [ { "$hour": "$dt" }, 8 ] } }, { $expr: { $lt: [ { "$minute": "$dt" }, 30 ] } } ] }, ] } }])
You can use this one: db.collection.aggregate([ { $addFields: { parts: { $dateToParts: { date: "$date_time" // , timezone : <timezone> if needed } } } }, { $set: { "parts.time": { $sum: [{ $multiply: ["$parts.hour", 24] }, "$parts.minute"] } } }, { $match: { "parts.year": 2019, "parts.time": { $gte: { $sum: [{ $multiply: [7, 24] }, 30] }, $lte: { $sum: [{ $multiply: [8, 24] }, 30] } } } } ]) Another solution could be this one: db.collection.aggregate([ { $addFields: { time: { $dateFromParts: { year: { $year: "$date_time" }, month: 1, day: 1, hour: { $hour: "$date_time" }, minute: { $minute: "$date_time" } // , timezone : <timezone> if needed } } } }, { $match: { time: { $gte: ISODate("2019-01-01T07:30:00Z"), $lte: ISODate("2019-01-01T08:30:00Z") } } } ])
'nonstandard' output on the Bitcoin blockchain
Can someone explain how a nonstandard output is spendable in another transaction? (Since it doesn't have an address for the bitcoins to come from) # bitcoin-cli getblock "00000000000005f1eb5bb5f6b50e6a9f47182335aa0c749b0e8a43b53daad5c4" 2 { ... "tx": [ { ... }, { ... }, { ... }, { ... }, { ... }, { ... }, { ... }, { "txid": "8ebe1df6ebf008f7ec42ccd022478c9afaec3ca0444322243b745aa2e317c272", ... "vin": [ ... { "txid": "db3f14e43fecc80eb3e0827cecce85b3499654694d12272bf91b1b2b8c33b5cb", "vout": 2, "scriptSig": { "asm": "3045022100f4ece69a7c50c911b3af6fa017dcf22de4df66699cd85c5753634d85140b955602204996b677af3a0b5835b36ae1db6323a125f1525edd4727be3209a0535073f422[ALL] 0412b80271b9e034006fd944ae4cdbdbc45ee30595c1f8961439385575f1973019b3ff615afed85a75737ff0d43cd81df74bc76004b45a6e7c9e2d115f364da1d7", "hex": "483045022100f4ece69a7c50c911b3af6fa017dcf22de4df66699cd85c5753634d85140b955602204996b677af3a0b5835b36ae1db6323a125f1525edd4727be3209a0535073f42201410412b80271b9e034006fd944ae4cdbdbc45ee30595c1f8961439385575f1973019b3ff615afed85a75737ff0d43cd81df74bc76004b45a6e7c9e2d115f364da1d7" }, "sequence": 4294967295 }, ... ] { ... }, } ] ... } ... # bitcoin-cli getrawtransaction "db3f14e43fecc80eb3e0827cecce85b3499654694d12272bf91b1b2b8c33b5cb" 2 { "txid": "db3f14e43fecc80eb3e0827cecce85b3499654694d12272bf91b1b2b8c33b5cb", ... "vin": [ ... ], "vout": [ { ... }, { ... }, { "value": 0.00065536, "n": 2, "scriptPubKey": { "asm": "OP_DUP OP_HASH160 6300bf4c5c2a724c280b893807afb976ec78a92b OP_EQUALVERIFY OP_CHECKSIG OP_NOP", "hex": "76a9146300bf4c5c2a724c280b893807afb976ec78a92b88ac61", "type": "nonstandard" } } ], ... } #
Addresses are just a convenient representation of common scripts, namely p2pkh, p2sh, p2wpkh, and p2wsh scripts. The script in your example is simply a p2pkh script with an additional OP_NOP. As an OP_NOP is a... nop, it does not change the script in this case at all. You can spend it the same way as a regular p2pkh output.
Create a new Google Sheet with row or column groups
I'm trying to create a new spreadsheet using spreadsheets#create, with specified row groups. In the API Explorer, I am entering in the JSON below. which corresponds to the following appearance: No errors are flagged or returned when I execute the call, but when the sheet is created, the specified grouping is not created - only the values are set. { "properties": { "title": "Test Spreadsheet", "locale": "en" }, "sheets": [ { "properties": {"title": "Test1"}, "data": [ { "startRow": 0, "startColumn": 0, "rowData": [ { "values": [ { "userEnteredValue": { "stringValue": "Top1" } } ] }, { "values": [ { "userEnteredValue": { "stringValue": "Top2" } } ] }, { "values": [ { "userEnteredValue": { "stringValue": "" } }, { "userEnteredValue": { "stringValue": "Top2A" } } ] }, { "values": [ { "userEnteredValue": { "stringValue": "" } }, { "userEnteredValue": { "stringValue": "Top2B" } } ] }, { "values": [ { "userEnteredValue": { "stringValue": "" } }, { "userEnteredValue": { "stringValue": "Top2C" } } ] }, { "values": [ { "userEnteredValue": { "stringValue": "Top3" } } ] } ] } ], "rowGroups": [ { "range": { "dimension": "ROWS", "startIndex": 2, "endIndex": 5 } } ] } ] } Even when I create the rowGroups JSON directly on the page, with its structured editor to make sure it is properly defined, the created spreadsheet still doesn't group the specified rows. I have triple-checked all my objects from the top down, and can't see what I am doing wrong.
How to properly implement boolean logic
I'm using ElasticSearch for logging within an application. I need to write a log viewer that filters on all the fields of my document. My documents look like this: "_source": { "timestamp": 1373502012000, "userId": 6, "paId": 56331, "lId": 6, "prId": 2, "vId": 6336, "actionType": "LOAD_DATA" } actionType is an enum (Java). I need to write a ElasticSearch equivalent to the following SQL query: SELECT * FROM snapshot.action_log_item WHERE timestamp BETWEEN 1372718783286 AND 1372718783286 AND userId=6 AND paId=56331 AND lId=6 AND prId=2 AND vId=6336 AND ( actionType='LOAD_DATA' OR actionType='SAVE_DATA' OR actionType='LOG_IN' ); Please help me write a properly nested query and/or filter to get a result equivalent to my SQL statement. EDIT Here's my current code (that works without the { "or"... portion). { "query" : { "bool" : { "must" : [ { "term" : { "userId" : 6 } }, { "term" : { "lId" : 6 } }, { "term" : { "vId" : 6336 } } ] } }, "filter" : { "and" : { "filters" : [ { "term" : { "paId" : 56331 } }, { "range" : { "timestamp" : { "from" : 1372718783286, "to" : 1377643583286, "include_lower" : true, "include_upper" : true } } }, { "or" : { "filters" : [ { "term" : { "actionType" : "LOAD_DATA" } }, { "term" : { "actionType" : "SAVE_DATA" } }, { "term" : { "actionType" : "LOG_IN" } } ] } } ] } } } EDIT: The following query works. It's not the same query as above, but it returns the expected result. It seems that these filters/queries don't work on the actionType field. { "size": 30, "query": { "filtered": { "query": { "bool": { "must": [ { "term": { "uId": 6 } }, { "term": { "loId": 6 } }, { "term": { "prId": 2 } }, { "terms": { "paId": [ 56331, 56298 ], "minimum_should_match": 1 } } ] } }, "filter": { "range": { "timestamp": { "from": 1372718783286, "to": 1377643583286, "include_lower": true, "include_upper": true } } } } } }
The {or... portion should like this: { "or": [ { "term": { "actionType": "LOAD_DATA" } }, { "term": { "actionType": "SAVE_DATA" } }, { "term": { "actionType": "LOG_IN" } } ] } You can check the doc for that filter here Edit As I see you are having problems I rewrote your query. I hope it helps { "query": { "filtered": { "query": { "bool": { "must": [ { "term": { "userId": 6 } }, { "term": { "paId": 56331 } }, { "term": { "lId": 6 } }, { "term": { "prId": 2 } }, { "term": { "vId": 6336 } }, { "terms": { "actionType": [ "LOAD_DATA", "SAVE_DATA", "LOG_IN" ], "minimum_should_match": 1 } } ] } }, "filter": { "range": { "timestamp": { "from": 1372718783286, "to": 1377643583286, "include_lower": true, "include_upper": true } } } } } } Basically I put the date range as filter and the other conditions are term queries inside the must clause of the boolean query. You can see that the or part is now inside the must clause as a terms query that act as or between those 3 values.