I have a client/server LWIP program, I want to use multicast features so I used IGMP library did the following setting like this:
Setting .IOC
Enable LWIP_IGMP
Enable LWIP_MULTICAST_TX_OPTION
Enable LWIP_IP4
in ethernet.c
netif->flags |= NETIF_FLAG_IGMP
in stm32f7xx_hal_eth.c (ETH_MACDMAConfig)
macinit.PromiscuousMode = ETH_PROMISCUOUS_MODE_ENABLE;
macinit.MulticastFramesFilter = ETH_MULTICASTFRAMESFILTER_NONE;
and i implemented code like this :
void UDP_Multicast_init(void *arg)
{
struct ip4_addr ipgroup, localIP;
struct udp_pcb *upcb;
char msg[] = "hello";
struct pbuf* p;
p = pbuf_alloc(PBUF_TRANSPORT,sizeof(msg),PBUF_RAM);
memcpy (p->payload, msg, sizeof(msg));
IP4_ADDR(&ipgroup, 224, 224, 0, 1); //Multicast IP address.
IP4_ADDR(&localIP, 192, 168, 1, 99); //Interface IP address
#if LWIP_IGMP
igmp_joingroup((ip4_addr_t *)(&localIP),(ip4_addr_t *)(&ipgroup));
#endif
upcb = ( struct udp_pcb*) udp_new();
MulticastStart(); //binding connection to the port 10
udp_recv(upcb, UDP_callback, NULL);
udp_sendto(upcb,p,&ipgroup,10);
}
void UDP_callback(void *arg, struct udp_pcb *upcb, struct pbuf *p,
const ip_addr_t *addr, u16_t port)
{
printf("test");
}
and I try to see in wireshark, iplocal successfully joins into the multicast address, and sends a data to the multicast address.
enter image description here
but the callback function cannot be executed.
is there something missing??
thanks for the response.
Related
I'm trying to make interrupt work for a device in qemu. The machnie name is ab21q, a modified version of arm64 virt machine, and the device name is ab21q_axpu.
Here are some relevant codes. I referenced pl011.c. (I switched temporarily back to qemu-5.1.0 for this test.)
==== hw/arm/ab21q.c
machab21q_init(MachineState *machine)
{
.... skip ....
create_ab21q_axpu_device(vms, sysmem); // ab21q-axpu test
....
}
static void create_ab21q_axpu_device(const Ab21qMachineState *vms, MemoryRegion *mem)
{
char *nodename;
hwaddr base = vms->memmap[AB21Q_AXPU].base;
hwaddr size = vms->memmap[AB21Q_AXPU].size;
int irq = vms->irqmap[AB21Q_AXPU];
const char compat[] = "ab21q-axpu";
DeviceState *dev = qdev_new(TYPE_AB21Q_AXPU);
SysBusDevice *s = SYS_BUS_DEVICE(dev);
//sysbus_create_simple("ab21q-axpu", base, qdev_get_gpio_in(vms->gic, irq));
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
memory_region_add_subregion(mem, base,
sysbus_mmio_get_region(s, 0));
sysbus_connect_irq(s, 0, qdev_get_gpio_in(vms->gic, irq));
nodename = g_strdup_printf("/ab21q_axpu#%" PRIx64, base);
qemu_fdt_add_subnode(vms->fdt, nodename);
qemu_fdt_setprop(vms->fdt, nodename, "compatible", compat, sizeof(compat));
qemu_fdt_setprop_sized_cells(vms->fdt, nodename, "reg", 2, base, 2, size);
qemu_fdt_setprop_cells(vms->fdt, nodename, "interrupts",
GIC_FDT_IRQ_TYPE_SPI, irq,
GIC_FDT_IRQ_FLAGS_LEVEL_HI);
qemu_fdt_setprop_cell(vms->fdt, nodename, "interrupt-parent", vms->gic_phandle);
g_free(nodename);
}
==== hw/misc/ab21q_axpu.c
static void ab21q_axpu_init(Object *obj)
{
Ab21qAxpuState *s = AB21Q_AXPU(obj);
SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
int i;
memory_region_init_io(&s->iomem, OBJECT(s), &ab21q_axpu_ops, s,
TYPE_AB21Q_AXPU, 0x200000*64);
sysbus_init_mmio(sbd, &s->iomem);
sysbus_init_irq(sbd, &s->irq);
s->init = 0;
s->int_flag = 0;
s->status = 0;
s->id = CHIP_ID;
}
static void ab21q_axpu_realize(DeviceState *d, Error **errp)
{
Ab21qAxpuState *s = AB21Q_AXPU(d);
SysBusDevice *sbd = SYS_BUS_DEVICE(d);
if (qemu_irq_is_connected(s->irq)) {printf("axpu irq connected!\n");}
else { printf("axpu irq not connected!\n");}
}
static void ab21q_axpu_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = ab21q_axpu_realize;
}
static void ab21q_axpu_set_irq(Ab21qAxpuState *s, int irq)
{
s->status = irq;
qemu_set_irq(s->irq, 1);
}
static void ab21q_axpu_write(void *opaque, hwaddr offset, uint64_t value,
unsigned size)
{
Ab21qAxpuState *s = (Ab21qAxpuState *)opaque;
.... skip ...
switch (offset) {
case TRIGGER_RUN:
....
if (((uint64_t *)(s->ioctl_arg + *host_virt_offset_p))[0] == 0x1604) {
s->int_flag = 1;
ab21q_axpu_set_irq(s, INT_AXPU_RUN_FINISHED);
}
The machine and the device works. (actually the device is a shared library that qemu links to) except the interrupt doesn't work even though qemu does set_irq. I checked with qemu_irq_is_connected in realize function. The pl011 case says pl011 irq connected!, but in my device I see axpu irq not connected!. So it's not related to device driver but qemu model itself.
Could anyone find what is missing in the above code? Should I add something to acpi table(in ab21q-build-acpi.c)?
I tried adding in hw/arm/ab21q-build-acpi.c these lines. In function build_dsdt,
acpi_dsdt_add_axpu(scope, &memmap[AB21Q_AXPU],
(irqmap[AB21Q_AXPU] + ARM_SPI_BASE));
The acpi_dsdt_add_axpu function being
static void acpi_dsdt_add_axpu(Aml *scope, const MemMapEntry *uart_memmap,
uint32_t irq)
{
Aml *dev = aml_device("AXPU");
aml_append(dev, aml_name_decl("_HID", aml_string("AXPU0011")));
aml_append(dev, aml_name_decl("_UID", aml_int(0)));
Aml *crs = aml_resource_template();
aml_append(crs, aml_memory32_fixed(uart_memmap->base,
uart_memmap->size, AML_READ_WRITE));
aml_append(crs,
aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
AML_EXCLUSIVE, &irq, 1));
aml_append(dev, aml_name_decl("_CRS", crs));
aml_append(scope, dev);
}
Inside the virtual machine (ubuntu 20.04), I did acpidump and converted it to .dsl files. The dsdt.dsl contains this entry.
Device (AXPU)
{
Name (_HID, "AXPU0011") // _HID: Hardware ID
Name (_UID, Zero) // _UID: Unique ID
Name (_CRS, ResourceTemplate () // _CRS: Current Resource Settings
{
Memory32Fixed (ReadWrite,
0x09100000, // Address Base
0x00080000, // Address Length
)
Interrupt (ResourceConsumer, Level, ActiveHigh, Exclusive, ,, )
{
0x000000D0,
}
})
}
I'm not sure what I should fix in acpi table.
Any comment or advice will be deeply appreciated.
Thank you!
Chan Kim
So I added some prints. This is the result.
create_uart called!
pl011_init called!
pl011_realize called!
pl011 irq not connected!
now calling sysbus_connect_irq for pl011..
now passed sysbus_connect_irq for pl011..
pl011 irq connected!
create_ab21q_axpu_device called!
ab21q_axpu_init called!
ab21q_axpu_realize called!
axpu irq not connected!
now calling sysbus_connect_irq for ab21q_axpu..
now passed sysbus_connect_irq for ab21q_axpu..
ab21q_axpu irq connected!
So irq-connection-wise speaking, pl011 and ab21q_axpu is the same!
For pl011, the irq was not connected either before the sysbus_connect_irq,
but in the code above, I used qemu_irq_is_connected(s->irq)) by mistake and it gave me true (a false true). I fixed it to qemu_irq_is_connected(s->irq[0])) because it had 6 irq outputs and it returned false before the sysbus_connect_irq. After the sysbus_connect_irq, both ab21q_axpu and pl011's irq shown to have been connected.
And of course I used qemu_irq_is_connected(AB21Q_AXPU(dev)->irq) or
qemu_irq_is_connected(PL011(dev)->irq[0]) to check the irq connection inside the xxx_create functions.
ADD : I later found the request_irq functino returned -EINVAL. so interrupt was not registerred correctly in the driver.
I'm trying to send data to and from my computer and an STM32H745 over Ethernet using LwIP and UDP. I have successfully configured the card and right now I can send data from the card to a Python script running on the computer. However, I don't understand how udp_recv works <udp,lwip> or how to receive data with UDP on LwIP in general, and I can't find examples that do just that. Where is the data being received? Should I even use udp_recv?
In the main loop I run MX_LWIP_Process, which runs ethernetif_input which somehow handles the received data, but I don't understand where it puts it.
Below is the main code, just for reference.
const char* message = "a";
HAL_GPIO_TogglePin(GPIOE, GPIO_PIN_1); // orange
ip_addr_t PC_IPADDR;
IP_ADDR4(&PC_IPADDR, 192, 168, 1, 200);
u16_t port = 8000;
struct udp_pcb* my_udp = udp_new();
struct pbuf* udp_buffer = NULL;
/* Infinite loop */
for (;; )
{
MX_LWIP_Process();
HAL_GPIO_TogglePin(GPIOE, GPIO_PIN_1); // orange
HAL_Delay(1000);
udp_buffer = pbuf_alloc(PBUF_TRANSPORT, strlen(message), PBUF_RAM);
if (udp_buffer != NULL)
{
memcpy(udp_buffer->payload, message, strlen(message));
udp_sendto(my_udp, udp_buffer,&PC_IPADDR, port);
pbuf_free(udp_buffer);
}
//udp_recv (struct udp_pcb *pcb, udp_recv_fn recv, void *recv_arg)
}
udp_recv() does not actually receive UDP datagrams (despite its name). It registers a callback function that will then be called by MX_LWIP_Process() when a datagram has been buffered. It would better be called udp_set_recv_callback(), but it is what it is.
To that end you should call it once before your executive loop:
udp_bind( my_udp, IP_ADDR_ANY, port ) ;
udp_recv( my_udp, udp_receive_callback, NULL ) ;
/* Infinite loop */
for (;; )
{
// Run the CubeMX LwIP stack
MX_LWIP_Process() ;
...
}
Where udp_receive_callback is a function that will be invoked on receipt of a datagram:
void udp_receive_callback( void* arg, // User argument - udp_recv `arg` parameter
struct udp_pcb* upcb, // Receiving Protocol Control Block
struct pbuf* p, // Pointer to Datagram
const ip_addr_t* addr, // Address of sender
u16_t port ) // Sender port
{
// Process datagram here (non-blocking code)
...
// Must free receive pbuf before return
pbuf_free(p);
}
Examples include:
https://gist.github.com/iwanbk/1399729
https://github.com/STMicroelectronics/STM32CubeF2/blob/master/Projects/STM322xG_EVAL/Applications/LwIP/LwIP_UDP_Echo_Client/Src/udp_echoclient.c
Documentation can be found at https://www.nongnu.org/lwip/2_0_x/group__udp__raw.html
Environment: Windows Socket Programming using VC++ 2010
GVCP : GigE Vision Control Protocol
GVCP = UDP+(GVCP Header Data+Payload Data). so basically on top its a UDP only
for Detecting GigE Sensor (Camera) need to first Broadcast a GVCP packet (containing Gvcp Payload data), using Broadcast address 255.255.255.255
but i am able to broadcast only by 192.168.1.255 (as seen on wire-shark) when i change broadcast address 255.255.255.255 nothing is visible on wire-shark nor on other machine
so problem is not able to broadcast using IP 255.255.255.255 using UDP/WinSock
able to start broadcasting the GVCP packet its just a socket creation error the correct one is below
//---------------------DATA SENDER------------------------------
struct sockaddr_in Sender_addr;
int Sender_addrlen = sizeof(Sender_addr);
Sender_addr.sin_family = AF_INET;
Sender_addr.sin_port = htons(CAMPORT); //BROADCAST_PORT);
Sender_addr.sin_addr.s_addr = inet_addr("255.255.255.255"); //Broadcast
IP Here");
//---------------------DATA RECEIVER----------------------------
struct sockaddr_in Recv_addr;
int Recv_addrlen = sizeof(Recv_addr);
Recv_addr.sin_family = AF_INET;
Recv_addr.sin_port = htons(PCPORT);
Recv_addr.sin_addr.s_addr = INADDR_ANY;
if(bind(sock,(sockaddr*)&Recv_addr,sizeof(Recv_addr))<0)
{
perror("bind");
_getch;
closesocket(sock);
}
//and then send command for GVCP packet (GVCP packet Structure is )
TxBuff[0] = 0x42;
TxBuff[1] = 0x01;
TxBuff[2] = 0x00;
TxBuff[3] = 0x02;
TxBuff[4] = 0x00;
TxBuff[5] = 0x00;
TxBuff[6] = 0x00;
TxBuff[7] = 0x02;
if(sendto(sock,TxBuff,TxBuffSize,0,(struct sockaddr
*)&Sender_addr,sizeof(Sender_addr)) <0)
{
perror("send: error ");
_getch();
closesocket(sock);
}
I am kinda new to the lwip stack. I'm trying to send some data over UDP protocol from my development board to my pc. And using ethernet cable between the two of them.
I gave an ip address to my server(source-board), which is 192.168.1.75:88. And the ip address of my computer is 192.168.1.2:90. When I set this configuration and run the program, I can sniff nothing with wireshark, I mean there is no udp package exchange at all. But when I change all destination adress to 255.255.255.255 or 0.0.0.0, I can sniff some packages.
Why can't I send udp packages to the ip address that I want?
Main.c
int main(void)
{
#define dst_port 88
#define src_port 90
#ifdef SERIAL_DEBUG
DebugComPort_Init();
#endif
LCD_LED_Init();
ETH_BSP_Config();
LwIP_Init();
IP4_ADDR(&dstaddr, 0, 0, 0, 0);
IP4_ADDR(&srcaddr, 192, 168, 1, 75);
pcb = udp_new();
udp_bind(pcb, &dstaddr, src_port);
udp_recv(pcb, RecvUTPCallBack, NULL);
udp_connect(pcb, &dstaddr, dst_port);
#ifdef USE_DHCP
/* Start DHCPClient */
xTaskCreate(LwIP_DHCP_task, "DHCPClient", configMINIMAL_STACK_SIZE * 2, NULL,DHCP_TASK_PRIO, NULL);
#endif
/* Start toogleLed4 task : Toggle LED4 every 250ms */
xTaskCreate(ToggleLed4, "LED4", configMINIMAL_STACK_SIZE, NULL, LED_TASK_PRIO, NULL);
xTaskCreate(SendUDP, "UDP", configMINIMAL_STACK_SIZE, NULL, LED_TASK_PRIO, NULL);
/* Start scheduler */
vTaskStartScheduler();
for( ;; );
}
SendUDP Task
void SendUDP(void * pvParameters)
{
while(1)
{
pcb = udp_new();
udp_bind(pcb, &dstaddr, src_port);
udp_recv(pcb, RecvUTPCallBack, NULL);
udp_connect(pcb, &dstaddr, dst_port);
pb = pbuf_alloc(PBUF_TRANSPORT, sizeof((str)), PBUF_REF);
pb->payload = str;
pb->len = pb->tot_len = sizeof((str));
udp_sendto(pcb, pb, &dstaddr, dst_port);
udp_disconnect(pcb);
udp_remove(pcb);
pbuf_free(pb);
vTaskDelay(1000);
}
}
I figured this out about a week ago, but I couldn't post the answer to here.
First of all, there is an ip defining in main.h, like
/*Static IP ADDRESS*/
#define IP_ADDR0 192
#define IP_ADDR1 168
#define IP_ADDR2 1
#define IP_ADDR3 15
and this configurations being use in netconf.h
IP4_ADDR(&ipaddr, IP_ADDR0, IP_ADDR1, IP_ADDR2, IP_ADDR3);
that's why server's ip adress is always 192.168.1.15.
And the second, I just started to use netconn API instead of raw API, this is way much easier than raw API. And this is my new SendwithUDP function, which is working perfect.
void SendwithUDP(uint16_t *veri, uint8_t length)
{
while(1)
{
if(((EventFlags.udp) && (1<<0)) == (1<<0))
{
STM_EVAL_LEDToggle(LED3);
sendconn = netconn_new( NETCONN_UDP );
netconn_bind(sendconn, IP_ADDR_ANY, src_port );
netconn_connect(sendconn, &clientAddr, 150);
sendbuf = netbuf_new();
data =netbuf_alloc(sendbuf, 2*length);
memcpy(data, veri, 2*length);
netconn_send(sendconn, sendbuf);
netbuf_free(sendbuf);
netbuf_delete(sendbuf);
netconn_disconnect(sendconn);
netconn_delete(sendconn);
vTaskDelay(10);
}
}
}
Below you can see my code that implements a pretty basic UDP sender in C++ with Winsock. The thing is that no matter how many times I run the code, the socket (the listenSocket) gets bound to a different UDP port. Is there any specific reason for this? Am I doing some mistake in my code?
thanks
#include <cstdlib>
#include <iostream>
#include <windows.h>
#include <winsock2.h>
#include <ws2tcpip.h>
#include <stdio.h>
using namespace std;
int main(int argc, char *argv[])
{
WSADATA wsaData;
SOCKADDR_IN myAddress;
SOCKADDR_IN targetAddress;
int myPort = 60888;
const char *myIP = "192.168.0.1";
int remotePort = 2048;
const char *remoteIP = "192.168.0.2";
SOCKET ListenSocket = INVALID_SOCKET;
SOCKET SendSocket = INVALID_SOCKET;
SOCKET acceptSocket;
char cBuffer[1024] = "Test Buffer";
int nBytesSent = 0;
int nBufSize = strlen(cBuffer);
int iResult;
// Initialize Winsock
if( WSAStartup( MAKEWORD(2, 2), &wsaData ) != NO_ERROR )
{
cerr<<"Socket Initialization: Error with WSAStartup\n";
system("pause");
WSACleanup();
exit(10);
}
ListenSocket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
SendSocket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
if (ListenSocket == INVALID_SOCKET or SendSocket == INVALID_SOCKET)
{
cerr<<"Socket Initialization: Error creating socket"<<endl;
system("pause");
WSACleanup();
exit(11);
}
//bind
myAddress.sin_family = AF_INET;
myAddress.sin_addr.s_addr = inet_addr(myIP);
myAddress.sin_port = htons(myPort);
targetAddress.sin_family = AF_INET;
targetAddress.sin_addr.s_addr = inet_addr(remoteIP);
targetAddress.sin_port = htons(remotePort);
if(bind(ListenSocket, (SOCKADDR*) &myAddress, sizeof(myAddress)) == SOCKET_ERROR)
{
cerr<<"ServerSocket: Failed to connect\n";
system("pause");
WSACleanup();
exit(14);
}
else
printf("Server: bind() is OK.\n");
nBytesSent = sendto(SendSocket, cBuffer, nBufSize, 0,
(SOCKADDR *) &targetAddress,
sizeof(SOCKADDR_IN));
printf("Everything is ok\n");
system("PAUSE");
closesocket(ListenSocket);
closesocket(SendSocket);
return EXIT_SUCCESS;
}
EDIT: Maybe I was not so clear. What I do with this code is to send some data to a remote PC. But what is required is that the UDP segments should appear to be originated from a specific port. How can this be done? Is it wrong what I'm doing here? Now that I'm thinking of it, I guess it is wrong indeed. The SendSocket and ListenSocket don't have any connection, correct? So, how can I make it that the UDP segments appear to originate from a specific UDP port? Thanks!
You are not calling bind() on SendSocket before sending data with it, so WinSock is free to bind that socket to whatever random local IP/Port it needs to. If you have to send data with a specific source IP/Port every time, you have to bind() to that IP/Port first. If that local IP/Port is the same pair you are binding ListenSocket to, then you don't need to use two separate sockets to begin with. You can send data with the same socket that is listening for incoming data.