I've written a simple usb device driver but it's not getting probed when the device is connected to the system, instead the kernel is calling the usb-storage module instead.
Code for my kernel module:
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/device.h>
#define USB_IT8951_VENDOR_ID 0x048d //ITE Vendor ID
#define USB_IT8951_PRODUCT_ID 0x0220
static int it8951_usb_probe(struct usb_interface *interface, const struct usb_device_id *id)
{
pr_info("test_string 2\n");
return 0;
}
static void it8951_usb_disconnect(struct usb_interface *interface)
{
pr_info("Disconnect enter\n");
}
static const struct usb_device_id it8951_usb_devices [] = {
{ USB_DEVICE(USB_IT8951_VENDOR_ID, USB_IT8951_PRODUCT_ID) },
{},
};
MODULE_DEVICE_TABLE(usb, it8951_usb_devices);
static struct usb_driver it8951_usb_driver_struct = {
.name = "it8951_usb",
.probe = it8951_usb_probe,
.disconnect = it8951_usb_disconnect,
//.fops = &skel_fops,
.id_table = it8951_usb_devices,
};
static int __init it8951_usb_init(void)
{
int result;
pr_info("test_string 1\n");
result = usb_register(&it8951_usb_driver_struct);
if (result < 0) {
pr_err("usb_register failed for the "__FILE__ "driver."
"Error number %d", result);
return -1;
}
return 0;
}
module_init(it8951_usb_init);
static void __exit it8951_usb_exit(void)
{
usb_deregister(&it8951_usb_driver_struct);
}
module_exit(it8951_usb_exit);
Dmesg when the device is connnected:
[ 668.940000] usb 1-2: new high-speed USB device number 3 using atmel-ehci
[ 669.130000] usb 1-2: New USB device found, idVendor=048d, idProduct=0220
[ 669.130000] usb 1-2: New USB device strings: Mfr=1, Product=2, SerialNumber=3
[ 669.140000] usb 1-2: Product: Digi-Photo-Frame
[ 669.140000] usb 1-2: Manufacturer: Smedia inc.
[ 669.140000] usb 1-2: SerialNumber: 14024689FA08
[ 669.160000] usb-storage 1-2:1.0: USB Mass Storage device detected
[ 669.170000] scsi host0: usb-storage 1-2:1.0
[ 670.240000] scsi 0:0:0:0: Direct-Access Generic Storage RamDisc 1.00 PQ: 0 ANSI: 0 CCS
[ 670.260000] sd 0:0:0:0: [sda] 1 512-byte logical blocks: (512 B/512 B)
[ 670.270000] sd 0:0:0:0: [sda] Write Protect is off
[ 670.270000] sd 0:0:0:0: [sda] Mode Sense: 03 00 00 00
[ 670.270000] sd 0:0:0:0: [sda] No Caching mode page found
[ 670.270000] sd 0:0:0:0: [sda] Assuming drive cache: write through
[ 670.320000] sd 0:0:0:0: [sda] Attached SCSI removable disk
I can't remove usb-storage support from the system, so is there a way to force the kernel to use my module instead of usb-storage? Or can I write a udev rule to re-assign the device?
Thanks in advance.
Related
How do you tell OpenCL to target build for a gpu instead of a cpu? will it automatically pick one over the other?
OpenCL will not automatically pick a device for you. You have to explicitly choose a platform (Intel/AMD/Nvidia) and a device (CPU/GPU) on that platform. Platform #0 and device #0 by default will not always give you the GPU. This is quite cumbersome when running code on different computers, as on each you have to manually select the device.
However there is a smart solution for this, a lightweight OpenCL-Wrapper that automatically picks the fastest available GPU (or CPU if no GPU is available) for you. This works by reading out the number of compute units and clock frequency and adding missing information (number of cores per CU) via vendor and device name with a small database.
Find the source code with an example here.
Here is just the code for automatically selecting the fastest device:
vector<cl::Device> cl_devices; // get all devices of all platforms
{
vector<cl::Platform> cl_platforms; // get all platforms (drivers)
cl::Platform::get(&cl_platforms);
for(uint i=0u; i<(uint)cl_platforms.size(); i++) {
vector<cl::Device> cl_devices_available;
cl_platforms[i].getDevices(CL_DEVICE_TYPE_ALL, &cl_devices_available); // to query only GPUs, use CL_DEVICE_TYPE_GPU here
for(uint j=0u; j<(uint)cl_devices_available.size(); j++) {
cl_devices.push_back(cl_devices_available[j]);
}
}
}
cl::Device cl_device; // select fastest available device
{
float best_value = 0.0f;
uint best_i = 0u; // index of fastest device
for(uint i=0u; i<(uint)cl_devices.size(); i++) { // find device with highest (estimated) floating point performance
const string name = trim(cl_devices[i].getInfo<CL_DEVICE_NAME>()); // device name
const string vendor = trim(cl_devices[i].getInfo<CL_DEVICE_VENDOR>()); // device vendor
const uint compute_units = (uint)cl_devices[i].getInfo<CL_DEVICE_MAX_COMPUTE_UNITS>(); // compute units (CUs) can contain multiple cores depending on the microarchitecture
const uint clock_frequency = (uint)cl_devices[i].getInfo<CL_DEVICE_MAX_CLOCK_FREQUENCY>(); // in MHz
const bool is_gpu = cl_devices[i].getInfo<CL_DEVICE_TYPE>()==CL_DEVICE_TYPE_GPU;
const uint ipc = is_gpu?2u:32u; // IPC (instructions per cycle) is 2 for GPUs and 32 for most modern CPUs
const bool nvidia_192_cores_per_cu = contains_any(to_lower(name), {" 6", " 7", "ro k", "la k"}) || (clock_frequency<1000u&&contains(to_lower(name), "titan")); // identify Kepler GPUs
const bool nvidia_64_cores_per_cu = contains_any(to_lower(name), {"p100", "v100", "a100", "a30", " 16", " 20", "titan v", "titan rtx", "ro t", "la t", "ro rtx"}) && !contains(to_lower(name), "rtx a"); // identify P100, Volta, Turing, A100, A30
const bool amd_128_cores_per_dualcu = contains(to_lower(name), "gfx10"); // identify RDNA/RDNA2 GPUs where dual CUs are reported
const float nvidia = (float)(contains(to_lower(vendor), "nvidia"))*(nvidia_192_cores_per_cu?192.0f:(nvidia_64_cores_per_cu?64.0f:128.0f)); // Nvidia GPUs have 192 cores/CU (Kepler), 128 cores/CU (Maxwell, Pascal, Ampere) or 64 cores/CU (P100, Volta, Turing, A100)
const float amd = (float)(contains_any(to_lower(vendor), {"amd", "advanced"}))*(is_gpu?(amd_128_cores_per_dualcu?128.0f:64.0f):0.5f); // AMD GPUs have 64 cores/CU (GCN, CDNA) or 128 cores/dualCU (RDNA, RDNA2), AMD CPUs (with SMT) have 1/2 core/CU
const float intel = (float)(contains(to_lower(vendor), "intel"))*(is_gpu?8.0f:0.5f); // Intel integrated GPUs usually have 8 cores/CU, Intel CPUs (with HT) have 1/2 core/CU
const float arm = (float)(contains(to_lower(vendor), "arm"))*(is_gpu?8.0f:1.0f); // ARM GPUs usually have 8 cores/CU, ARM CPUs have 1 core/CU
const uint cores = to_uint((float)compute_units*(nvidia+amd+intel+arm)); // for CPUs, compute_units is the number of threads (twice the number of cores with hyperthreading)
const float tflops = 1E-6f*(float)cores*(float)ipc*(float)clock_frequency; // estimated device FP32 floating point performance in TeraFLOPs/s
if(tflops>best_value) {
best_value = tflops;
best_i = i;
}
}
const string name = trim(cl_devices[best_i].getInfo<CL_DEVICE_NAME>()); // device name
cl_device = cl_devices[best_i];
print_info(name); // print device name
}
Alternatively, you can also make it automatically choose the device with most memory rather than FLOPs, or a device with specified ID from the list of all devices from all platforms. There is many more benefits to using this wrapper, for example significantly simpler code for using arrays and automatic tracking of total device memory allocation, all while not impacting performance in any way.
I am working on a react-native Android app using react-native-ble-plx for BTLE support, and Windows 10 using the .NET API Windows.Devices.Bluetooth.GenericAttributeProfile for the GATT server/peripheral.
When I add any "ServiceData" to the advertising payload in the Windows GATT service, the scanned device's 'serviceData' payload is always null in the react-native client.
In the Windows 10 server, if the GattServiceProviderAdvertisingParameters.ServiceData property is not null, then the service advertisement status is GattServiceProviderAdvertisementStatus.StartedWithoutAllAdvertisementData. (When leaving 'ServiceData' set to null, it is GattServiceProviderAdvertisementStatus.Started, as expected).
Using the Silicon Labs "EFR Connect" app in android, it also does not show any ServiceData for the device in the advertising packet.
Using WireShark with BTVS to inspect the packets on the Windows machine, it does show the Service Data bytes, and the controller shows a 'Success' response.
Here is the code where I set up the ServiceData in Windows:
...
GattServiceProviderAdvertisingParameters advParameters = new GattServiceProviderAdvertisingParameters
{
IsConnectable = _peripheralSupported,
IsDiscoverable = true,
ServiceData = GetServiceData().AsBuffer()
};
_serviceProvider.AdvertisementStatusChanged += ServiceProvider_AdvertisementStatusChanged;
_serviceProvider.StartAdvertising(advParameters);
...
private byte[] GetServiceData()
{
var flagsData = new List<byte> { 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
flagsData.AddRange(new List<byte>(Encoding.ASCII.GetBytes("ABCDEFGHIJKLMNOP")));
return flagsData.ToArray();
}
Here is the code in the react-native scanner:
public scanDevices(
timeoutSeconds: number,
listener: (error: string | null, scannedDevice: IPhoenixDevice | null) => void,
): void {
console.log('Entered PhoenixDeviceManager.scanDevices');
this.isScanning = true;
try {
this.scannedDevices.length = 0; // Clear array of devices
this.bleManager.startDeviceScan(this.serviceUUIDs, null, (error, scannedDevice) => {
console.log('In device scan callback');
if (error) {
console.warn(error);
listener(`Error message: ${error.message}, reason: ${error.reason}`, null);
}
if (!scannedDevice) {
console.log('scannedDevice is null');
} else if (!this.scannedDevices.some((d) => d.id === scannedDevice.id)) {
this.scannedDevices.push(scannedDevice);
listener(null, new PhoenixDevice(scannedDevice.id, scannedDevice.name));
console.log(
`Device discovered, id: ${scannedDevice.id}, name: ${scannedDevice.name}, localName: ${scannedDevice.localName}, serviceData: ${scannedDevice.serviceData}`,
);
}
});
const timeoutMs = timeoutSeconds * 1000;
// stop scanning devices after specified number of seconds
setTimeout(() => {
this.stopScanning();
}, timeoutMs);
} catch (error) {
console.error(error);
this.stopScanning();
}
}
Here is the WireShark trace running on Windows 10 showing the packet containing the ServiceData (and the Success response):
Frame 499: 84 bytes on wire (672 bits), 84 bytes captured (672 bits) on interface TCP#127.0.0.1:24352, id 0
Bluetooth
Bluetooth HCI H4
Bluetooth HCI Command - LE Set Extended Advertising Data
Command Opcode: LE Set Extended Advertising Data (0x2037)
Parameter Total Length: 80
Advertising Handle: 0x02
Data Operation: Complete scan response data (0x03)
Fragment Preference: The Controller should not fragment or should minimize fragmentation of Host data (0x01)
Data Length: 76
Advertising Data
Flags
Length: 2
Type: Flags (0x01)
000. .... = Reserved: 0x0
...1 .... = Simultaneous LE and BR/EDR to Same Device Capable (Host): true (0x1)
.... 1... = Simultaneous LE and BR/EDR to Same Device Capable (Controller): true (0x1)
.... .0.. = BR/EDR Not Supported: false (0x0)
.... ..1. = LE General Discoverable Mode: true (0x1)
.... ...0 = LE Limited Discoverable Mode: false (0x0)
16-bit Service Class UUIDs
Length: 3
Type: 16-bit Service Class UUIDs (0x03)
UUID 16: Device Information (0x180a)
128-bit Service Class UUIDs
Length: 17
Type: 128-bit Service Class UUIDs (0x07)
Custom UUID: caecface-e1d9-11e6-bf01-fe55135034f0 (Unknown)
Service Data - 128 bit UUID
Length: 50
Type: Service Data - 128 bit UUID (0x21)
Custom UUID: caecface-e1d9-11e6-bf01-fe55135034f0 (Unknown)
Service Data: ffff0000000000000000000000000000004142434445464748494a4b4c4d4e4f50
[Response in frame: 500]
[Command-Response Delta: 1.889ms]
Frame 500: 7 bytes on wire (56 bits), 7 bytes captured (56 bits) on interface TCP#127.0.0.1:24352, id 0
Bluetooth
Bluetooth HCI H4
Bluetooth HCI Event - Command Complete
Event Code: Command Complete (0x0e)
Parameter Total Length: 4
Number of Allowed Command Packets: 1
Command Opcode: LE Set Extended Advertising Data (0x2037)
0010 00.. .... .... = Opcode Group Field: LE Controller Commands (0x08)
.... ..00 0011 0111 = Opcode Command Field: LE Set Extended Advertising Data (0x037)
Status: Success (0x00)
[Command in frame: 499]
[Command-Response Delta: 1.889ms]
I've also tried using the exact code verbatim in this example and it has the exact same problem:
https://github.com/ProH4Ck/treadmill-bridge/blob/98e683e2380178319972af522d9251f44350a448/src/TreadmillBridge/Services/VirtualTreadmill/VirtualTreadmillService.cs#L90
Does anyone know what the problem might be?
I am trying to use QSPI bus to access a serial NOR flash in Zephyr. The CS(chip select) pin stays high all the time. It should be active low when the flash chip is selected. Just wondering if the QSPI CS is working in Zephyr or I need to configure the CS pin as GPIO and control it by my software.
Anyone had CS pin of QSPI working in Zephyr ?? I am using rNF52480 from Nordic semiconductor.
Thanks,
JC
I think you need to configure it yourself:
...
#include <drivers/gpio.h>
#include <drivers/spi.h>
struct spi_cs_control spi_cs = {
/* PA4 as CS pin */
.gpio_dev = DEVICE_DT_GET(DT_NODELABEL(gpioa)),
.gpio_pin = 4,
.gpio_dt_flags = GPIO_ACTIVE_LOW,
/* delay in microseconds to wait before starting the transmission and before releasing the CS line */
.delay = 10,
};
#define SPI_CS (&spi_cs)
struct spi_config spi_cfg = {
.frequency = 350000,
.operation = SPI_OP_MODE_MASTER | SPI_TRANSFER_MSB | SPI_WORD_SET(8) | SPI_LINES_QUAD | SPI_LOCK_ON,
.cs = SPI_CS,
};
void spi_init()
{
spi = device_get_binding("SPI_1");
....
}
I got an Olimexino-STM32 with an STM32F103RBT6. I used STM32 Workbench 2.6 on Windows 10 x64 and STMCubeXM 4.27.0.
I choosed in CubeMX
STM32F103RBT6
RCC HSE
USB
USB Device Communication Device Class
Debug Line: JTAG 4 pin
Fix clock speeds
Add LED1 on PA5 as GPIO Out
Add LED2 on PA1 as GPIO Out
Toolchain SW4STM
Stack size
In main.c in the while loop
/* Infinite loop */
/* USER CODE BEGIN WHILE */
while (1)
{
HAL_GPIO_WritePin(LED2_GPIO_Port, LED2_Pin, GPIO_PIN_SET);
HAL_Delay(500);
/* USER CODE END WHILE */
/* USER CODE BEGIN 3 */
HAL_GPIO_WritePin(LED2_GPIO_Port, LED2_Pin, GPIO_PIN_RESET);
HAL_Delay(500);
uint8_t HiMsg[] = "hello\r\n";
CDC_Transmit_FS(HiMsg, strlen(HiMsg));
}
/* USER CODE END 3 */
in usb_cdc_if.c
static int8_t CDC_Receive_FS(uint8_t* Buf, uint32_t *Len)
{
/* USER CODE BEGIN 6 */
if (Buf[0]=='1')
{
HAL_GPIO_WritePin(LED1_GPIO_Port, LED1_Pin, GPIO_PIN_SET);
}
else
{
HAL_GPIO_WritePin(LED1_GPIO_Port, LED1_Pin, GPIO_PIN_RESET);
}
USBD_CDC_SetRxBuffer(&hUsbDeviceFS, &Buf[0]);
USBD_CDC_ReceivePacket(&hUsbDeviceFS);
return (USBD_OK);
/* USER CODE END 6 */
}
Generate, compile and flash to Board. I can see the LED blinking in the main loop. The speed seems also correct but i just cant get a device registered on windows. I can flash the board with an example hex file from the distributor with the bootloader active. There i can get the COM port registered. So i can see the USB Port is ok but somehow the generated code doesnt register the COM port.
Any Ideas?
https://www.olimex.com/Products/Duino/STM32/OLIMEXINO-STM32/
Here is the test1.ioc file
#MicroXplorer Configuration settings - do not modify
File.Version=6
KeepUserPlacement=false
Mcu.Family=STM32F1
Mcu.IP0=NVIC
Mcu.IP1=RCC
Mcu.IP2=SYS
Mcu.IP3=USB
Mcu.IP4=USB_DEVICE
Mcu.IPNb=5
Mcu.Name=STM32F103R(8-B)Tx
Mcu.Package=LQFP64
Mcu.Pin0=PD0-OSC_IN
Mcu.Pin1=PD1-OSC_OUT
Mcu.Pin2=PA5
Mcu.Pin3=PA11
Mcu.Pin4=PA12
Mcu.Pin5=VP_SYS_VS_ND
Mcu.Pin6=VP_SYS_VS_Systick
Mcu.Pin7=VP_USB_DEVICE_VS_USB_DEVICE_CDC_FS
Mcu.PinsNb=8
Mcu.ThirdPartyNb=0
Mcu.UserConstants=
Mcu.UserName=STM32F103RBTx
MxCube.Version=4.27.0
MxDb.Version=DB.4.0.270
NVIC.BusFault_IRQn=true\:0\:0\:false\:false\:true\:false
NVIC.DebugMonitor_IRQn=true\:0\:0\:false\:false\:true\:false
NVIC.HardFault_IRQn=true\:0\:0\:false\:false\:true\:false
NVIC.MemoryManagement_IRQn=true\:0\:0\:false\:false\:true\:false
NVIC.NonMaskableInt_IRQn=true\:0\:0\:false\:false\:true\:false
NVIC.PendSV_IRQn=true\:0\:0\:false\:false\:true\:false
NVIC.PriorityGroup=NVIC_PRIORITYGROUP_4
NVIC.SVCall_IRQn=true\:0\:0\:false\:false\:true\:false
NVIC.SysTick_IRQn=true\:0\:0\:false\:false\:true\:false
NVIC.USB_LP_CAN1_RX0_IRQn=true\:0\:0\:false\:false\:true\:false
NVIC.UsageFault_IRQn=true\:0\:0\:false\:false\:true\:false
PA11.Mode=Device
PA11.Signal=USB_DM
PA12.Mode=Device
PA12.Signal=USB_DP
PA5.GPIOParameters=GPIO_Label
PA5.GPIO_Label=LED1
PA5.Locked=true
PA5.Signal=GPIO_Output
PCC.Checker=false
PCC.Line=STM32F103
PCC.MCU=STM32F103R(8-B)Tx
PCC.PartNumber=STM32F103RBTx
PCC.Seq0=0
PCC.Series=STM32F1
PCC.Temperature=25
PCC.Vdd=3.3
PD0-OSC_IN.Mode=HSE-External-Oscillator
PD0-OSC_IN.Signal=RCC_OSC_IN
PD1-OSC_OUT.Mode=HSE-External-Oscillator
PD1-OSC_OUT.Signal=RCC_OSC_OUT
PinOutPanel.RotationAngle=0
RCC.ADCFreqValue=36000000
RCC.AHBFreq_Value=72000000
RCC.APB1CLKDivider=RCC_HCLK_DIV2
RCC.APB1Freq_Value=36000000
RCC.APB1TimFreq_Value=72000000
RCC.APB2Freq_Value=72000000
RCC.APB2TimFreq_Value=72000000
RCC.FCLKCortexFreq_Value=72000000
RCC.FamilyName=M
RCC.HCLKFreq_Value=72000000
RCC.IPParameters=ADCFreqValue,AHBFreq_Value,APB1CLKDivider,APB1Freq_Value,APB1TimFreq_Value,APB2Freq_Value,APB2TimFreq_Value,FCLKCortexFreq_Value,FamilyName,HCLKFreq_Value,MCOFreq_Value,PLLCLKFreq_Value,PLLMCOFreq_Value,PLLMUL,SYSCLKFreq_VALUE,SYSCLKSource,TimSysFreq_Value,USBFreq_Value,USBPrescaler,VCOOutput2Freq_Value
RCC.MCOFreq_Value=72000000
RCC.PLLCLKFreq_Value=72000000
RCC.PLLMCOFreq_Value=36000000
RCC.PLLMUL=RCC_PLL_MUL9
RCC.SYSCLKFreq_VALUE=72000000
RCC.SYSCLKSource=RCC_SYSCLKSOURCE_PLLCLK
RCC.TimSysFreq_Value=72000000
RCC.USBFreq_Value=48000000
RCC.USBPrescaler=RCC_USBCLKSOURCE_PLL_DIV1_5
RCC.VCOOutput2Freq_Value=8000000
USB_DEVICE.CLASS_NAME_FS=CDC
USB_DEVICE.IPParameters=VirtualMode,VirtualModeFS,CLASS_NAME_FS
USB_DEVICE.VirtualMode=Cdc
USB_DEVICE.VirtualModeFS=Cdc_FS
VP_SYS_VS_ND.Mode=No_Debug
VP_SYS_VS_ND.Signal=SYS_VS_ND
VP_SYS_VS_Systick.Mode=SysTick
VP_SYS_VS_Systick.Signal=SYS_VS_Systick
VP_USB_DEVICE_VS_USB_DEVICE_CDC_FS.Mode=CDC_FS
VP_USB_DEVICE_VS_USB_DEVICE_CDC_FS.Signal=USB_DEVICE_VS_USB_DEVICE_CDC_FS
board=custom
Problem solved: The Olimexino-STM32 has an output to connect and disconnect the USB. I had not configured that output pin.
I have compiled uboot and a mainline kernel downloaded from kernel.org to run on a Raspberry Type B Module. I have a problem using the GPIO Interface. I am writing a module to manage two I/O's one of them should generate irq. When I call gpio_to_irq() or any other gpio related kernel api, the call always fail (return code -517 or -22). The same code, running on the raspberry kernel downloaded from the github RPI repository works. The mainline kernel, however, support BCM2835 that is the Soc used on RPI. Where is my approch wrong ? How does the gpio_api calls fail ? If I manually find the GPIO number (Virq) and I requests it with request_irq() everything works fine even with the mainline kernel from kernel.org.
The mainline kernel version is 4.11 while the rpi kernel version from github is 4.9.26. Here is the init function of the module:
I apologize for this. I will try to be more in toych now. Linux version (mainline) is 4.11 while the rpi linux version is 4.9.26. This is the init function of the module:
static int __init hello_init(void)
{
int result;
int temp;
printk(KERN_INFO "Hello world!\n");
printk(KERN_INFO "%s\n",Version);
/* Registering device */
result = register_chrdev(memory_major, "Bisio", &memory_fops);
if (result < 0)
{
printk(KERN_INFO "Memory Driver: Cannot Obtain Major Number %d\n", memory_major);
return -1;
}
/* Allocating memory for the buffer */
memory_buffer = kmalloc(MEMSIZE, GFP_KERNEL);
if (!memory_buffer) {
return -ENOMEM;
}
memset(memory_buffer, 0, MEMSIZE);
result = gpio_request(23,"MyIO");
printk(KERN_INFO "gpio_request: %d\n",result);
result = gpio_direction_input(23);
printk(KERN_INFO "gpio_direction_input: %d\n",result);
result = gpio_to_irq(23);
printk(KERN_INFO "gpio_to_irq: %d\n",result);
return 0; // Non-zero return means that the module couldn't be loaded.
}
Doing a insmod for this module I get:
[ 109.792257] nothing: loading out-of-tree module taints kernel.
[ 109.820350] Hello world!
[ 109.829341] Driver Version 1.27 - 10/09/2015 - 10:20
[ 109.841344] gpio_request: -517
[ 109.850737] gpio_direction_input: 0
[ 109.860187] gpio_to_irq: -22
while, doing the same with the rpi kernel (compiled with the same gcc crosscompiler version (5.20 soft float) works correctly and here is the output of the same init function:
[ 47.927565] nothing: loading out-of-tree module taints kernel.
[ 47.939771] Hello world!
[ 47.942333] Driver Version 1.27 - 10/09/2015 - 10:20
[ 47.947411] gpio_request: 0
[ 47.952798] gpio_direction_input: 0
[ 47.956314] gpio_to_irq: 183
what I am missing ?
Any help will be appreciated.
Regards
Marco Bisio