Related
I'm trying to create a driver for my usb device, using iOS and DriverKit.
I'm basing my code in the example used in WWDC: https://github.com/knightsc/USBApp
My driver starts fine when the device is connected and the readCompleted method is called fine, but the action->GetReference() gets only \0 characteres.
Also in order to know that the usb device is actually working I've connected it to my mac first and using PyUSB I can see that it's returning data in chunks of 1024 bytes in the interface 0.
This is the data I get in PyUSB:
array('B', [6, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 0, 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0, 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0, 32, 0, 33, 0, 34, 0, 35, 0, 36, 0, 37, 0, 38, 0, 39, 0, 40, 0, 41, 0, 42, 0, 43, 0, 44, 0, 45, 0, 46, 0, 47, 0, 48, 0, 49, 0, 50, 0, 51, 0, 52, 0, 53, 0, 54, 0, 55, 0, 56, 0, 57, 0, 58, 0, 59, 0, 60, 0, 61, 0, 62, 0, 63, 0, 64, 0, 65, 0, 66, 0, 67, 0, 68, 0, 69, 0, 70, 0, 71, 0, 72, 0, 73, 0, 74, 0, 75, 0, 76, 0, 77, 0, 78, 0, 79, 0, 80, 0, 81, 0, 82, 0, 83, 0, 84, 0, 85, 0, 86, 0, 87, 0, 88, 0, 89, 0, 90, 0, 91, 0, 92, 0, 93, 0, 94, 0, 95, 0, 96, 0, 97, 0, 98, 0, 99, 0, 100, 0, 101, 0, 102, 0, 103, 0, 104, 0, 105, 0, 106, 0, 107, 0, 108, 0, 109, 0, 110, 0, 111, 0, 112, 0, 113, 0, 114, 0, 115, 0, 116, 0, 117, 0, 118, 0, 119, 0, 120, 0, 121, 0, 122, 0, 123, 0, 124, 0, 125, 0, 126, 0, 127, 0, 128, 0, 129, 0, 130, 0, 131, 0, 132, 0, 133, 0, 134, 0, 135, 0, 136, 0, 137, 0, 138, 0, 139, 0, 140, 0, 141, 0, 142, 0, 143, 0, 144, 0, 145, 0, 146, 0, 147, 0, 148, 0, 149, 0, 150, 0, 151, 0, 152, 0, 153, 0, 154, 0, 155, 0, 156, 0, 157, 0, 158, 0, 159, 0, 160, 0, 161, 0, 162, 0, 163, 0, 164, 0, 165, 0, 166, 0, 167, 0, 168, 0, 169, 0, 170, 0, 171, 0, 172, 0, 173, 0, 174, 0, 175, 0, 176, 0, 177, 0, 178, 0, 179, 0, 180, 0, 181, 0, 182, 0, 183, 0, 184, 0, 185, 0, 186, 0, 187, 0, 188, 0, 189, 0, 190, 0, 191, 0, 192, 0, 193, 0, 194, 0, 195, 0, 196, 0, 197, 0, 198, 0, 199, 0, 200, 0, 201, 0, 202, 0, 203, 0, 204, 0, 205, 0, 206, 0, 207, 0, 208, 0, 209, 0, 210, 0, 211, 0, 212, 0, 213, 0, 214, 0, 215, 0, 216, 0, 217, 0, 218, 0, 219, 0, 220, 0, 221, 0, 222, 0, 223, 0, 224, 0, 225, 0, 226, 0, 227, 0, 228, 0, 229, 0, 230, 0, 231, 0, 232, 0, 233, 0, 234, 0, 235, 0, 236, 0, 237, 0, 238, 0, 239, 0, 240, 0, 241, 0, 242, 0, 243, 0, 244, 0, 245, 0, 246, 0, 247, 0, 248, 0, 249, 0, 250, 0, 251, 0, 252, 0, 253, 0, 254, 0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
This is the Ivars:
struct Mk1dDriver_IVars
{
IOUSBHostInterface *interface;
IOUSBHostPipe *inPipe;
OSAction *ioCompleteCallback;
IOBufferMemoryDescriptor *inData;
uint16_t maxPacketSize;
};
This is the Start method:
kern_return_t
IMPL(Mk1dDriver, Start)
{
kern_return_t ret;
IOUSBStandardEndpointDescriptors descriptors;
ret = Start(provider, SUPERDISPATCH);
__Require(kIOReturnSuccess == ret, Exit);
ret = RegisterService();
if (ret != kIOReturnSuccess)
{
Log("Start() - Failed to register service with error: 0x%08x.", ret);
goto Exit;
}
ivars->interface = OSDynamicCast(IOUSBHostInterface, provider);
__Require_Action(NULL != ivars->interface, Exit, ret = kIOReturnNoDevice);
ret = ivars->interface->Open(this, 0, NULL);
__Require(kIOReturnSuccess == ret, Exit);
ret = ivars->interface->CopyPipe(kMyEndpointAddress, &ivars->inPipe);
__Require(kIOReturnSuccess == ret, Exit);
ret = ivars->interface->CreateIOBuffer(kIOMemoryDirectionIn,
1024,
&ivars->inData);
__Require(kIOReturnSuccess == ret, Exit);
ret = OSAction::Create(this,
Mk1dDriver_ReadComplete_ID,
IOUSBHostPipe_CompleteAsyncIO_ID,
0,
&ivars->ioCompleteCallback);
__Require(kIOReturnSuccess == ret, Exit);
ret = ivars->inPipe->AsyncIO(ivars->inData,
ivars->maxPacketSize,
ivars->ioCompleteCallback,
0);
__Require(kIOReturnSuccess == ret, Exit);
os_log(OS_LOG_DEFAULT,"Finish");
// WWDC slides don't show the full function
// i.e. this is still unfinished
Exit:
return ret;
}
The only difference in this compared with the code from Apple is that I set capacity in the method CreateIOBuffer to 1024. This is because if I leave it to 0 it will return an error that memory could not be allocated: kIOReturnNoMemory
And the ReadComplete method:
void
IMPL(Mk1dDriver, ReadComplete)
{
char output[1024];
memcpy(action->GetReference(), &output, 1024);
os_log(OS_LOG_DEFAULT,"ReadComplete");
If I put a breakpoint in the log, I can see all the positions in output will be \0
Any idea what I'm doing wrong?
Thanks
You need to store some reference to the IOBufferMemoryDescriptor* that you asked AsyncIO to write to when the data from the device is received (ivars->inData) so that you can access it when the completion callback ReadComplete is called. You can store this in the memory that you can access with GetReference().
You should set the size of the custom memory that should be allocated for you. Currently you are allocating 0 bytes. See OSAction::Create.
In ReadComplete you can then call GetReference() to access the memory. Since you know that this memory contains a reference to the IOBufferMemoryDescriptor that data has been written to, you can then use it with memcpy.
Something like this:
ret = OSAction::Create(this,
Mk1dDriver_ReadComplete_ID,
IOUSBHostPipe_CompleteAsyncIO_ID,
sizeof(IOBufferMemoryDescriptor*),
&ivars->ioCompleteCallback);
memcpy(ivars->ioCompleteCallback->GetReference(),
ivars->inData, sizeof(IOBufferMemoryDescriptor*));
First parameter to memcpy is the destination.
void IMPL(Mk1dDriver, ReadComplete)
{
IOBufferMemoryDescriptor* ptr;
memcpy(ptr, action->GetReference(), sizeof(IOBufferMemoryDescriptor*));
IOAddressSegment addressSegement{};
ptr->GetAddressRange(&addressSegement);
char output[1024];
memcpy(output, addressSegement.address, addressSegement.length);
}
I've got numpy array with shape of (3, 50):
data = np.array([[0, 3, 0, 2, 0, 0, 1, 2, 2, 0, 1, 0, 0, 0, 0, 0, 0, 2, 1, 2, 0, 0,
0, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 1, 0, 0, 7, 0, 0, 0, 0,
1, 1, 2, 0, 0, 2],
[0, 0, 0, 0, 0, 3, 0, 1, 6, 1, 1, 0, 0, 0, 0, 2, 0, 0, 1, 0, 1, 0,
3, 0, 0, 0, 0, 0, 0, 5, 2, 2, 2, 1, 0, 0, 1, 0, 1, 3, 2, 0, 0, 0,
0, 0, 2, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 2, 0, 1, 0, 0, 0, 1, 0,
0, 0, 0, 0, 0, 0]])
and the following column names:
new_cols = [f'description_word_{i+1}_count' for i in range(50)]
I'm trying to add new columns in already existing dataframe in such way:
df[new_cols] = data
but get the error:
KeyError: "None of [Index(['description_word_1_count',
'description_word_2_count',\n 'description_word_3_count',
'description_word_4_count',\n 'description_word_5_count',
'description_word_6_count',\n 'description_word_7_count',
'description_word_8_count',\n 'description_word_9_count',
'description_word_10_count',\n 'description_word_11_count',
'description_word_12_count',\n 'description_word_13_count',
'description_word_14_count',\n 'description_word_15_count',
'description_word_16_count',\n 'description_word_17_count',
'description_word_18_count',\n 'description_word_19_count',
'description_word_20_count',\n 'description_word_21_count',
'description_word_22_count',\n 'description_word_23_count',
'description_word_24_count',\n 'description_word_25_count',
'description_word_26_count',\n 'description_word_27_count',
'description_word_28_count',\n 'description_word_29_count',
'description_word_30_count',\n 'description_word_31_count',
'description_word_32_count',\n 'description_word_33_count',
'description_word_34_count',\n 'description_word_35_count',
'description_word_36_count',\n 'description_word_37_count',
'description_word_38_count',\n 'description_word_39_count',
'description_word_40_count',\n 'description_word_41_count',
'description_word_42_count',\n 'description_word_43_count',
'description_word_44_count',\n 'description_word_45_count',
'description_word_46_count',\n 'description_word_47_count',
'description_word_48_count',\n 'description_word_49_count',
'description_word_50_count'],\n dtype='object')] are in the
[columns]"
Also I don't know where it finds a '\n' symbols in my column names.
At the same time creating a new dataframe with the data is OK:
new_df = pd.DataFrame(data=data, columns=new_cols)
Does anyone know what is causing the error?
Suppose you have a df like this:
df = pd.DataFrame({'person': [1,1,1], 'event': ['A','B','C']})
You can add new columns like this:
import pandas as pd
import numpy as np
data = np.array([[0, 3, 0, 2, 0, 0, 1, 2, 2, 0, 1, 0, 0, 0, 0, 0, 0, 2, 1, 2, 0, 0,
0, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 1, 0, 0, 7, 0, 0, 0, 0,
1, 1, 2, 0, 0, 2],
[0, 0, 0, 0, 0, 3, 0, 1, 6, 1, 1, 0, 0, 0, 0, 2, 0, 0, 1, 0, 1, 0,
3, 0, 0, 0, 0, 0, 0, 5, 2, 2, 2, 1, 0, 0, 1, 0, 1, 3, 2, 0, 0, 0,
0, 0, 2, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 2, 0, 1, 0, 0, 0, 1, 0,
0, 0, 0, 0, 0, 0]])
new_cols = [f'description_word_{i+1}_count' for i in range(50)]
df[new_cols] = pd.DataFrame(data, index=df.index)
I think the problem is that you are using a syntax to create series, when you actually need to create several series. In other words, a dataframe.
Problem: how to write a sqlite statement to select a value from a nested json object when the needed name is dynamic / variable. It is also important that this can be done from a single sql statement. Eventually, this will be executed from within a bash script.
In the object sample below, I need to list all the dot11.advertisedssid.ssid in the sql database. An acceptable solution is to list all values of dot11.advertisedssid.ssid that exist in the json object, but I would like to understand how to query a dynamic json name (so I can get the other nested values).
In general I am using json_extract in my sql statement I just can’t figure out how to get to the ssid value (in this example)!
How do I know 733545801 is the field name and how can I then use it in the json_extract statement? And do that for all such nested objects.
Examples:
In general this is how I am querying other json values.
select json_extract(devices.device,'$."dot11.device"."dot11.device.typeset"') from devices;
An object sample from the database:
"dot11.device": {
"dot11.device.typeset": 257,
"dot11.device.client_map": {
},
"dot11.device.num_client_aps": 0,
"dot11.device.advertised_ssid_map": {
"733545801": {
"dot11.advertisedssid.ssid": "SampleFES-WiFi",
"dot11.advertisedssid.ssidlen": 15,
"dot11.advertisedssid.beacon": 1,
"dot11.advertisedssid.probe_response": 1,
"dot11.advertisedssid.channel": "6",
"dot11.advertisedssid.ht_mode": "HT20",
"dot11.advertisedssid.ht_center_1": 0,
"dot11.advertisedssid.ht_center_2": 0,
"dot11.advertisedssid.first_time": 1559567379,
"dot11.advertisedssid.last_time": 1559567379,
"dot11.advertisedssid.beacon_info": "",
"dot11.advertisedssid.cloaked": 0,
"dot11.advertisedssid.crypt_set": 268436162,
"dot11.advertisedssid.maxrate": 65.000000,
"dot11.advertisedssid.beaconrate": 10,
"dot11.advertisedssid.beacons_sec": 2,
"dot11.advertisedssid.ietag_checksum": 1220416683,
"dot11.advertisedssid.wpa_mfp_required": 0,
"dot11.advertisedssid.wpa_mfp_supported": 0,
"dot11.advertisedssid.dot11d_country": "",
"dot11.advertisedssid.dot11d_list": [
],
"dot11.advertisedssid.wps_state": 0,
"dot11.advertisedssid.dot11r_mobility": 0,
"dot11.advertisedssid.dot11r_mobility_domain_id": 0,
"dot11.advertisedssid.dot11e_qbss": 0,
"dot11.advertisedssid.dot11e_qbss_stations": 0,
"dot11.advertisedssid.dot11e_channel_utilization_perc": 0.000000,
"dot11.advertisedssid.ccx_txpower": 0,
"dot11.advertisedssid.cisco_client_mfp": 0,
"dot11.advertisedssid.ie_tag_list": [
0.000000,
1.000000,
3.000000,
5.000000,
42.000000,
50.000000,
48.000000,
45.000000,
61.000000,
127.000000,
221.000000
]
}
}
Thanks for the help!
PS. This is from the new kismet database and the redesigned schema.
Here is the whole object:
{
"kismet.device.base.manuf": "Texas Instruments",
"kismet.device.base.key": "4202770D00000000_AFB4F569D2380000",
"kismet.device.base.macaddr": "38:D2:69:F5:B4:AF",
"kismet.device.base.phyname": "IEEE802.11",
"kismet.device.base.phyid": 0,
"kismet.device.base.name": "LincolnFES-WiFi",
"kismet.device.base.commonname": "LincolnFES-WiFi",
"kismet.device.base.type": "Wi-Fi AP",
"kismet.device.base.basic_type_set": 1,
"kismet.device.base.crypt": "WPA2-PSK",
"kismet.device.base.basic_crypt_set": 2,
"kismet.device.base.first_time": 1559567379,
"kismet.device.base.last_time": 1559567379,
"kismet.device.base.mod_time": 1559567380,
"kismet.device.base.packets.total": 3,
"kismet.device.base.packets.rx": 0,
"kismet.device.base.packets.tx": 0,
"kismet.device.base.packets.llc": 3,
"kismet.device.base.packets.error": 0,
"kismet.device.base.packets.data": 0,
"kismet.device.base.packets.crypt": 0,
"kismet.device.base.packets.filtered": 0,
"kismet.device.base.datasize": 0,
"kismet.device.base.packets.rrd": {
"kismet.common.rrd.last_time": 1559567383,
"kismet.common.rrd.minute_vec": [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
],
"kismet.common.rrd.blank_val": 0,
"kismet.common.rrd.aggregator": "default",
"kismet.common.rrd.hour_vec": [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
],
"kismet.common.rrd.day_vec": [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
]
},
"kismet.device.base.signal": {
"kismet.common.signal.type": "dbm",
"kismet.common.signal.last_signal": -56,
"kismet.common.signal.last_noise": 0,
"kismet.common.signal.min_signal": -74,
"kismet.common.signal.min_noise": 0,
"kismet.common.signal.max_signal": -56,
"kismet.common.signal.max_noise": 0,
"kismet.common.signal.maxseenrate": 10,
"kismet.common.signal.encodingset": 1,
"kismet.common.signal.carrierset": 1,
"kismet.common.signal.signal_rrd": {
"kismet.common.rrd.last_time": 1559567383,
"kismet.common.rrd.minute_vec": [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
],
"kismet.common.rrd.blank_val": 0,
"kismet.common.rrd.aggregator": "peak_signal"
}
},
"kismet.device.base.freq_khz_map": {
"2437000.000000": 1,
"2442000.000000": 1,
"5500000.000000": 1
},
"kismet.device.base.channel": "6",
"kismet.device.base.frequency": 2442000,
"kismet.device.base.num_alerts": 0,
"kismet.device.base.tags": {
},
"kismet.device.base.seenby": {
"-1970862229": {
"kismet.common.seenby.uuid": "5FE308BD-0000-0000-0000-00C0CAA60413",
"kismet.common.seenby.first_time": 1559567379,
"kismet.common.seenby.last_time": 1559567379,
"kismet.common.seenby.num_packets": 3,
"kismet.common.seenby.freq_khz_map": {
"2437000.000000": 1,
"2442000.000000": 1,
"5500000.000000": 1
},
"kismet.common.seenby.signal": {
"kismet.common.signal.type": "dbm",
"kismet.common.signal.last_signal": -56,
"kismet.common.signal.last_noise": 0,
"kismet.common.signal.min_signal": -74,
"kismet.common.signal.min_noise": 0,
"kismet.common.signal.max_signal": -56,
"kismet.common.signal.max_noise": 0,
"kismet.common.signal.maxseenrate": 10,
"kismet.common.signal.encodingset": 1,
"kismet.common.signal.carrierset": 1,
"kismet.common.signal.signal_rrd": {
"kismet.common.rrd.last_time": 1559567383,
"kismet.common.rrd.minute_vec": [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
],
"kismet.common.rrd.blank_val": 0,
"kismet.common.rrd.aggregator": "peak_signal"
}
}
}
},
"kismet.device.base.server_uuid": "A8F71A2C-85F8-11E9-BA41-4B49534D4554",
"dot11.device": {
"dot11.device.typeset": 257,
"dot11.device.client_map": {
},
"dot11.device.num_client_aps": 0,
"dot11.device.advertised_ssid_map": {
"733545801": {
"dot11.advertisedssid.ssid": "LincolnFES-WiFi",
"dot11.advertisedssid.ssidlen": 15,
"dot11.advertisedssid.beacon": 1,
"dot11.advertisedssid.probe_response": 1,
"dot11.advertisedssid.channel": "6",
"dot11.advertisedssid.ht_mode": "HT20",
"dot11.advertisedssid.ht_center_1": 0,
"dot11.advertisedssid.ht_center_2": 0,
"dot11.advertisedssid.first_time": 1559567379,
"dot11.advertisedssid.last_time": 1559567379,
"dot11.advertisedssid.beacon_info": "",
"dot11.advertisedssid.cloaked": 0,
"dot11.advertisedssid.crypt_set": 268436162,
"dot11.advertisedssid.maxrate": 65,
"dot11.advertisedssid.beaconrate": 10,
"dot11.advertisedssid.beacons_sec": 2,
"dot11.advertisedssid.ietag_checksum": 1220416683,
"dot11.advertisedssid.wpa_mfp_required": 0,
"dot11.advertisedssid.wpa_mfp_supported": 0,
"dot11.advertisedssid.dot11d_country": "",
"dot11.advertisedssid.dot11d_list": [
],
"dot11.advertisedssid.wps_state": 0,
"dot11.advertisedssid.dot11r_mobility": 0,
"dot11.advertisedssid.dot11r_mobility_domain_id": 0,
"dot11.advertisedssid.dot11e_qbss": 0,
"dot11.advertisedssid.dot11e_qbss_stations": 0,
"dot11.advertisedssid.dot11e_channel_utilization_perc": 0,
"dot11.advertisedssid.ccx_txpower": 0,
"dot11.advertisedssid.cisco_client_mfp": 0,
"dot11.advertisedssid.ie_tag_list": [
0,
1,
3,
5,
42,
50,
48,
45,
61,
127,
221
]
}
},
"dot11.device.num_advertised_ssids": 1,
"dot11.device.probed_ssid_map": {
},
"dot11.device.num_probed_ssids": 0,
"dot11.device.associated_client_map": {
},
"dot11.device.num_associated_clients": 0,
"dot11.device.client_disconnects": 0,
"dot11.device.last_sequence": 0,
"dot11.device.bss_timestamp": 0,
"dot11.device.num_fragments": 0,
"dot11.device.num_retries": 0,
"dot11.device.datasize": 0,
"dot11.device.datasize_retry": 0,
"dot11.device.last_probed_ssid_csum": 0,
"dot11.device.last_beaconed_ssid": "LincolnFES-WiFi",
"dot11.device.last_beaconed_ssid_checksum": 733545801,
"dot11.device.last_bssid": "38:D2:69:F5:B4:AF",
"dot11.device.last_beacon_timestamp": 1559567379,
"dot11.device.wps_m3_count": 0,
"dot11.device.wps_m3_last": 0,
"dot11.device.wpa_handshake_list": [
],
"dot11.device.wpa_nonce_list": [
],
"dot11.device.wpa_anonce_list": [
],
"dot11.device.wpa_present_handshake": 0,
"dot11.device.min_tx_power": 0,
"dot11.device.max_tx_power": 0,
"dot11.device.supported_channels": [
],
"dot11.device.link_measurement_capable": 0,
"dot11.device.neighbor_report_capable": 0,
"dot11.device.extended_capabilities": [
],
"dot11.device.beacon_fingerprint": 4212996422,
"dot11.device.probe_fingerprint": 0,
"dot11.device.response_fingerprint": 0
}
}
When you want to recursively walk through the fields of an entire object and its contents, you need json_tree():
SELECT j.value
FROM devices AS d
JOIN json_tree(d.device) AS j
WHERE j.key = 'dot11.advertisedssid.ssid';
gives
value
--------------
SampleFES-WiFi
when run on a table holding a fixed version of that sample object.
I know this is a bit old, but OP seemed (in comments) to want a more complete solution. I know I did when I first came across this answer. The accepted solution allows you to pull in one field from the JSON blob, but the common use case in OP's example is to pull multiple fields from that blob. After some searching I found that the json_extract() function works very well for this once you realize that the "dot11.device.advertised_ssid_map" object is an array. Once you provide it with an index his normal query method works.
Considerations:
OP's example is relating to the Kismet device field in the devices table, so my example will use a common query that I often need in the context of that table
With Kismet the keys used in these JSON blobs are long and contain dots, so the syntax for specifying them in SQLite3 is a bit cumbersome for some nested values
SQLite3's JSON1 extension does not seem to like some of the wildcarding syntax normally allowed in JSONPath specifications, so long explicit paths are required
So here is my solution:
SELECT devmac, strongest_signal,
json_extract(d.device, '$."dot11.device"."dot11.device.advertised_ssid_map"[0]."dot11.advertisedssid.ssid"') AS ssid,
json_extract(d.device, '$."dot11.device"."dot11.device.advertised_ssid_map"[0]."dot11.advertisedssid.cloaked"') AS cloaked,
json_extract(d.device, '$."kismet.device.base.signal"."kismet.common.signal.min_signal"') AS weakest_signal,
json_extract(d.device, '$."kismet.device.base.channel"') AS channel,
json_extract(d.device, '$."dot11.device"."dot11.device.num_associated_clients"') AS clientCnt,
json_extract(d.device, '$."kismet.device.base.crypt"') AS crypt,
json_extract(d.device, '$."kismet.device.base.manuf"') AS manuf
FROM devices AS d
WHERE type = 'Wi-Fi AP'
;
Updated
I am working on the word embedding model for answer Matching score prediction using Tflearn. I have to build a model using sentence vector using tflearn dnn classifier, Now I have to add a word embedding layer to the dnn model. How to do that? Thanks in advance.
"JVMdefines": enables a computer to run a Java program
is coverted as :
"JVMdefines": [[list([0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
enables a computer to run a Java program :
list([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])]
My question: Is there any method that the machine can able to analyze.
enables a "machine" to run a Java program
That is It can detect computer and machine as in same meaning.
I would post a clarifying comment, but I do not have enough reputation to do so, so I will try to answer given the information you have presented in the original question...
Your problem seems unclear, but here is how you would do this for a binary classification problem in tflearn.
Step 1: Preprocessing
First thing you need to do is to tokenize and transform your sentences into list of integers:
"What kind of food do you like?" ---> [234,64,12,5224,43,96,23]
Then, most people pad their sequences to all be the same length, cutting off the long ones or increasing the length of short ones by padding with 0's.
[234,64,12,5224,43,96,23] ---> [0,0,0,0....234,64,12,5224,43,96,23]
Hint:
from tflearn.data_utils import pad_sequences
padded = pad_sequences(unpadded, maxlen=max_document_length, value=0.)
Step 2: Model Building
After you transform all the text you have into integer sequences, you can build the model. Note here that our input shape is [None, max_document_length]. None means optional size (allows for variable batch size), and max_document_length is the length of our sequences that we padded previously.
#Create our model
network = input_data(shape=[None, max_document_length], name='input')
Create embedding matrix. Note that you push the embedding matrix to the CPU. The input dim parameter is looking for an integer that represents the size of your vocabulary. the output_dim is the size of your embedding.
with tf.device('/cpu:0'):
network = tflearn.embedding(network, input_dim=vocabulary_size, output_dim=128)
#Pass embeddings into an lstm layer (handles sequential problems)
network = tflearn.lstm(network, 512, dropout=0.8)
#Squish data into a fully connected layer, with 2 outputs for binary classification
network = tflearn.fully_connected(network, 2, activation='softmax')
#Perform regression to get the final anaswer
network = tflearn.regression(network, optimizer='rmsprop', learning_rate=0.001,
loss='categorical_crossentropy')
#Wrap the graph we just created in a tflearn DNN wrapper
model = tflearn.DNN(network)
#Run model.fit to actually train your model
model.fit(x_train, y_train, n_epoch=15, shuffle=True, validation_set=(x_val, y_val), show_metric=True, batch_size=batch_size)
import numpy as np
from hmmlearn.hmm import MultinomialHMM
startprob_prior = np.array([0.5, 0.5]) # guess
transmat_prior = np.array([[0.9, 0.1], [0.3, 0.7]]) # guess
#data is binary, 0\1 with bursts of 1's
x = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] # data
x = np.array(x).reshape(-1,1) # make it in the desirable format
hmm = MultinomialHMM(n_components=2, verbose=True, startprob_prior=startprob_prior, transmat_prior=transmat_prior)
hmm.fit(x)
print(hmm.monitor_.converged) # returns True
print(hmm.transmat_) # returns 2x2 matrix of NaN
Why doesn't it converges? clearly the 1's comes in bulks.
see issue 137
The solution was to tell the model not to initialize the emission rate (model.init_params = 'st'
) + set it up by setting the private attribute startprob_.
Now it seems like working! - red is state, blue is observation :
import numpy as np
from hmmlearn.hmm import MultinomialHMM
import hmmlearn
start_probability = np.array([0.9, 0.1]) # guess
transition_probability = np.array([[0.9, 0.1], [0.1, 0.9]])
emission_probability = np.array([[0.9, 0.1], [0.1, 0.9]])
model = MultinomialHMM(n_components=2, verbose=True, n_iter=1000, tol=1e-3)
model.startprob = start_probability
model.transmat = transition_probability
model.emissionprob_ = emission_probability # notice here the init is to the internal variable emissionprob_ and not
model.init_params = 'st'
# data is binary, 0\1 with bursts of 1's
x = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,
0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0] # data
x = np.array(x).reshape(-1, 1) # make it in the desirable format
model.fit(x)
print(model.monitor_.converged) # returns True
print(model.transmat_) # returns 2x2 matrix of NaN
print(model.emissionprob_) # returns 2x2 matrix of NaN
print(model.startprob_) # returns 2x2 matrix of NaN
logprob, estimated_states = model.decode(x, algorithm="viterbi")
import matplotlib.pyplot as plt
plt.stem(x, label='observation')
plt.plot(estimated_states, label='hidden states', color='red')
plt.show()