[gem][minip][pktbuf] Improvements for TX scatter gather functionality

- GEM now supports asynchronous scatter-gather queuing and properly handling
  pktbuf ownership
- General stack cleanups in Minip
- pktbufs now grab buffers from the user, or from a preallocated pool
This commit is contained in:
Christopher Anderson
2014-12-15 13:59:17 -08:00
parent 94cf843de1
commit d352ac1d2b
12 changed files with 540 additions and 419 deletions

View File

@@ -27,6 +27,8 @@
#include <string.h>
#include <malloc.h>
#include <stdio.h>
#include <kernel/thread.h>
#include <kernel/mutex.h>
#include <trace.h>
typedef union {
@@ -42,6 +44,8 @@ typedef struct {
uint8_t mac[6];
} arp_entry_t;
static mutex_t arp_mutex = MUTEX_INITIAL_VALUE(arp_mutex);
void arp_cache_init(void)
{
list_initialize(&arp_list);
@@ -71,6 +75,7 @@ void arp_cache_update(uint32_t addr, const uint8_t mac[6])
/* If the entry is in the cache update the address and move
* it to head */
mutex_acquire(&arp_mutex);
list_for_every_entry(&arp_list, arp, arp_entry_t, node) {
if (arp->addr == addr) {
arp->addr = addr;
@@ -86,30 +91,38 @@ void arp_cache_update(uint32_t addr, const uint8_t mac[6])
mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
arp = malloc(sizeof(arp_entry_t));
if (arp == NULL) {
return;
goto err;
}
arp->addr = addr;
memcpy(arp->mac, mac, sizeof(arp->mac));
list_add_head(&arp_list, &arp->node);
}
err:
mutex_release(&arp_mutex);
return;
}
/* Looks up and returns a MAC address based on the provided ip addr */
uint8_t *arp_cache_lookup(uint32_t addr)
{
arp_entry_t *arp;
arp_entry_t *arp = NULL;
uint8_t *ret = NULL;
/* If the entry is in the cache update the address and move
* it to head */
mutex_acquire(&arp_mutex);
list_for_every_entry(&arp_list, arp, arp_entry_t, node) {
if (arp->addr == addr) {
mru_update(&arp->node);
return arp->mac;
ret = arp->mac;
break;
}
}
mutex_release(&arp_mutex);
return NULL;
return ret;
}
void arp_cache_dump(void)

View File

@@ -23,6 +23,7 @@
#include "minip-internal.h"
#include <err.h>
#include <platform/gem.h>
#include <platform.h>
#include <stdio.h>
@@ -54,6 +55,8 @@ typedef struct dhcp_msg {
u8 options[0];
} dhcp_msg_t;
udp_socket_t *dhcp_udp_handle;
#define DHCP_FLAG_BROADCAST 0x8000
#define DHCP_REQUEST 1
@@ -124,8 +127,11 @@ static void dhcp_discover(u32 xid) {
*opt++ = OPT_DONE;
minip_udp_send(&s.msg, sizeof(dhcp_msg_t) + (opt - s.opt),
IPV4_BCAST, DHCP_SERVER_PORT, DHCP_CLIENT_PORT);
udp_send(&s.msg, sizeof(dhcp_msg_t) + (opt - s.opt), dhcp_udp_handle);
status_t ret = udp_send(&s.msg, sizeof(dhcp_msg_t) + (opt - s.opt), dhcp_udp_handle);
if (ret != NO_ERROR) {
printf("DHCP_DISCOVER failed: %d\n", ret);
}
}
static void dhcp_request(u32 xid, u32 server, u32 reqip) {
@@ -167,8 +173,10 @@ static void dhcp_request(u32 xid, u32 server, u32 reqip) {
*opt++ = OPT_DONE;
minip_udp_send(&s.msg, sizeof(dhcp_msg_t) + (opt - s.opt),
IPV4_BCAST, DHCP_SERVER_PORT, DHCP_CLIENT_PORT);
status_t ret = udp_send(&s.msg, sizeof(dhcp_msg_t) + (opt - s.opt), dhcp_udp_handle);
if (ret != NO_ERROR) {
printf("DHCP_REQUEST failed: %d\n", ret);
}
}
static void dhcp_cb(void *data, size_t sz, uint32_t srcip, uint16_t srcport, void *arg) {
@@ -276,6 +284,9 @@ void minip_init_dhcp(tx_func_t tx_func, void *tx_arg) {
minip_init(tx_func, tx_arg, IPV4_NONE, IPV4_NONE, IPV4_NONE);
int ret = udp_open(IPV4_BCAST, DHCP_CLIENT_PORT, DHCP_SERVER_PORT, &dhcp_udp_handle);
printf("dhcp opened udp: %d\n", ret);
minip_udp_listen(DHCP_CLIENT_PORT, dhcp_cb, NULL);
dhcp_thr = thread_create("dhcp", dhcp_thread, NULL, DEFAULT_PRIORITY, DEFAULT_STACK_SIZE);

View File

@@ -32,6 +32,7 @@
#define IPV4(a,b,c,d) (((a)&0xFF)|(((b)&0xFF)<<8)|(((c)&0xFF)<<16)|(((d)&0xFF)<<24))
#define IPV4_SPLIT(a) (a & 0xFF), ((a >> 8) & 0xFF), ((a >> 16) & 0xFF), ((a >> 24) & 0xFF)
#define IPV4_PACK(a) (a[3] << 24 | a[2] << 16 | a[1] << 8 | a[0])
#define IPV4_BCAST (0xFFFFFFFF)
#define IPV4_NONE (0)
@@ -59,23 +60,17 @@ void minip_set_ipaddr(const uint32_t addr);
void minip_set_hostname(const char *name);
const char *minip_get_hostname(void);
/* udp socket interface */
typedef struct {
uint32_t addr;
uint16_t port;
} minip_fd_t;
minip_fd_t *minip_open(uint32_t addr, uint16_t port);
void minip_close(minip_fd_t *fd);
int send(minip_fd_t *fd, void *buf, size_t len, int flags);
/* raw udp transmit */
int minip_udp_send(const void *data, size_t len,
uint32_t dstaddr, uint16_t dstport,
uint16_t srcport);
/* install udp listener */
int minip_udp_listen(uint16_t port, udp_callback_t rx_handler, void *arg);
uint32_t host;
uint16_t sport;
uint16_t dport;
uint8_t *mac;
} udp_socket_t;
status_t udp_open(uint32_t host, uint16_t sport, uint16_t dport, udp_socket_t **handle);
status_t udp_close(udp_socket_t *handle);
status_t udp_send(void *buf, size_t len, udp_socket_t *handle);
int minip_udp_listen(uint16_t port, udp_callback_t cb, void *arg);
/* tcp */
typedef struct tcp_socket tcp_socket_t;

View File

@@ -27,22 +27,37 @@
#include <sys/types.h>
#include <list.h>
#define PKTBUF_SIZE 2048
#define PKTBUF_BUF_SIZE 1984
/* PAGE_SIZE minus 16 bytes of metadata in pktbuf_buf */
#define PKTBUF_SIZE 2032
#define PKTBUF_MAX_DATA 1536
#define PKTBUF_MAX_HDR (PKTBUF_BUF_SIZE - PKTBUF_MAX_DATA)
#define PKTBUF_MAX_HDR (PKTBUF_SIZE - PKTBUF_MAX_DATA)
typedef struct pktbuf {
struct list_node list;
u32 magic;
u8 *data;
u32 dlen;
u32 phys_base;
u32 rsv0;
u32 rsv1;
u32 rsv2;
u8 buffer[PKTBUF_BUF_SIZE];
u32 id;
struct list_node list;
bool managed;
bool eof;
u8 *buffer;
} pktbuf_t;
/* metadata is stored at the end of the structure to catch overflows of
* the packet data itself */
#define PKTBUF_HDR_MAGIC 'PKTH'
#define PKTBUF_BUF_MAGIC 'PKTB'
typedef struct pktbuf_buf {
uint8_t data[PKTBUF_SIZE];
uint32_t magic;
uintptr_t phys_addr;
struct list_node list;
} pktbuf_buf_t;
/* Return the physical address offset of data in the packet */
static inline u32 pktbuf_data_phys(pktbuf_t *p) {
return p->phys_base + (p->data - p->buffer);
}
@@ -54,7 +69,7 @@ static inline u32 pktbuf_avail_head(pktbuf_t *p) {
// number of bytes available for _append or _append_data
static inline u32 pktbuf_avail_tail(pktbuf_t *p) {
return PKTBUF_BUF_SIZE - (p->data - p->buffer) - p->dlen;
return PKTBUF_SIZE - (p->data - p->buffer) - p->dlen;
}
// allocate packet buffer from buffer pool
@@ -71,7 +86,7 @@ void pktbuf_append_data(pktbuf_t *p, const void *data, size_t sz);
void *pktbuf_append(pktbuf_t *p, size_t sz);
// grow the front of the buffer and return a pointer
// to the new start of packet
// to the new start of packet
void *pktbuf_prepend(pktbuf_t *p, size_t sz);
// shrink the buffer by discarding the first sz bytes
@@ -85,8 +100,12 @@ void pktbuf_consume_tail(pktbuf_t *p, size_t sz);
// create a new packet buffer from raw memory and add
// it to the free pool
void pktbuf_create(void *ptr, u32 phys, size_t size);
void pktbuf_create(void *ptr, size_t size);
// Create buffers for pktbufs of size PKTBUF_BUF_SIZE out of size
void pktbuf_create_bufs(void *ptr, size_t size);
void pktbuf_dump(pktbuf_t *p);
#endif
// vim: set noexpandtab:

View File

@@ -28,12 +28,33 @@
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <platform.h>
#include <kernel/timer.h>
#include <err.h>
#if WITH_LIB_CONSOLE
uint32_t str_ip_to_int(const char *s, size_t len)
{
uint8_t ip[4] = { 0, 0, 0, 0 };
uint8_t pos = 0, i = 0;
while (pos < len) {
char c = s[pos];
if (c == '.') {
i++;
} else {
ip[i] *= 10;
ip[i] += c - '0';
}
pos++;
}
return IPV4_PACK(ip);
}
static int cmd_minip(int argc, const cmd_args *argv)
{
static minip_fd_t fd = {IPV4(192, 168, 1, 1), 65456};
if (argc == 1) {
minip_usage:
printf("minip commands\n");
@@ -48,49 +69,6 @@ minip_usage:
arp_cache_dump();
break;
case 'c':
if (argc < 4)
goto minip_usage;
int i = 0, pos = 0, len = strlen(argv[2].str);
memset(&fd, 0, sizeof(fd));
uint8_t ip[4] = { 0, 0, 0, 0 };
while (pos < len) {
char c = argv[2].str[pos];
if (c == '.') {
i++;
} else {
ip[i] *= 10;
ip[i] += c - '0';
}
pos++;
}
memcpy(&fd.addr, ip, 4);
fd.port = argv[3].u;
if (arp_cache_lookup(fd.addr) == NULL) {
send_arp_request(fd.addr);
}
printf("new config: %u.%u.%u.%u:%u\n",
ip[0], ip[1], ip[2], ip[3], fd.port);
break;
case 'l': {
uint32_t buf[256];
uint32_t wait = 0;
memset(buf, 0x00, sizeof(buf));
wait = argv[2].u;
while (1) {
send(&fd, buf, sizeof(buf), 0);
if (wait > 0) {
thread_sleep(wait);
}
}
}
break;
case 's': {
uint32_t ipaddr = minip_get_ipaddr();
@@ -99,25 +77,41 @@ minip_usage:
}
break;
case 't': {
uint8_t buf[256];
uint32_t c = 1;
if (argc > 2) {
c = argv[2].u;
uint8_t buf[1470];
uint32_t cnt = 1;
uint32_t host, port;
udp_socket_t *handle;
if (argc < 5) {
return -1;
}
memset(buf, 0x00, sizeof(buf));
printf("sending %u packet(s)\n", c);
host = str_ip_to_int(argv[2].str, strlen(argv[2].str));
port = argv[3].u;
cnt = argv[4].u;
printf("host is %s\n", argv[2].str);
if (udp_open(host, port, port, &handle) != NO_ERROR) {
printf("udp_open to %u.%u.%u.%u:%u failed\n", IPV4_SPLIT(host), port);
return -1;
}
memset(&buf, 0x00, sizeof(buf));
printf("sending %u packet(s) to %u.%u.%u.%u:%u\n", cnt, IPV4_SPLIT(host), port);
uint32_t failures = 0;
while (c--) {
buf[255] = c;
if (send(&fd, buf, sizeof(buf), 0) != 0) {
while (cnt--) {
if (udp_send(buf, sizeof(buf), handle) != 0) {
failures++;
}
buf[128]++;
}
printf("%d pkts failed\n", failures);
}
break;
default:
goto minip_usage;
}
}

View File

@@ -116,6 +116,10 @@ uint16_t rfc1701_chksum(const uint8_t *buf, size_t len);
uint16_t rfc768_chksum(struct ipv4_hdr *ipv4, struct udp_hdr *udp);
uint16_t ones_sum16(uint32_t sum, const void *_buf, int len);
/* Helper methods for building headers */
void minip_build_mac_hdr(struct eth_hdr *pkt, uint8_t *dst, uint16_t type);
void minip_build_ipv4_hdr(struct ipv4_hdr *ipv4, uint32_t dst, uint8_t proto, uint16_t len);
int send_arp_request(uint32_t addr);
status_t minip_ipv4_send(pktbuf_t *p, uint32_t dest_addr, uint8_t proto);

View File

@@ -24,15 +24,17 @@
#include "minip-internal.h"
#include <err.h>
#include <stdio.h>
#include <debug.h>
#include <endian.h>
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <trace.h>
#include <malloc.h>
#include <list.h>
#include <kernel/mutex.h>
#include <kernel/thread.h>
struct udp_listener {
struct list_node list;
@@ -58,8 +60,6 @@ static uint8_t bcast_mac[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
static char minip_hostname[32] = "";
static mutex_t tx_mutex;
void minip_set_hostname(const char *name) {
size_t len = strlen(name);
if (len >= sizeof(minip_hostname)) {
@@ -129,7 +129,6 @@ void minip_init(tx_func_t tx_handler, void *tx_arg,
minip_gateway = gateway;
compute_broadcast_address();
mutex_init(&tx_mutex);
arp_cache_init();
net_timer_init();
}
@@ -139,14 +138,14 @@ uint16_t ipv4_payload_len(struct ipv4_hdr *pkt)
return (pkt->len - ((pkt->ver_ihl >> 4) * 5));
}
static void fill_in_mac_header(struct eth_hdr *pkt, uint8_t *dst, uint16_t type)
void minip_build_mac_hdr(struct eth_hdr *pkt, uint8_t *dst, uint16_t type)
{
memcpy(pkt->dst_mac, dst, sizeof(pkt->dst_mac));
memcpy(pkt->src_mac, minip_mac, sizeof(minip_mac));
pkt->type = htons(type);
}
static void fill_in_ipv4_header(struct ipv4_hdr *ipv4, uint32_t dst, uint8_t proto, uint16_t len)
void minip_build_ipv4_hdr(struct ipv4_hdr *ipv4, uint32_t dst, uint8_t proto, uint16_t len)
{
ipv4->ver_ihl = 0x45;
ipv4->dscp_ecn = 0;
@@ -175,7 +174,7 @@ int send_arp_request(uint32_t addr)
eth = pktbuf_prepend(p, sizeof(struct eth_hdr));
arp = pktbuf_append(p, sizeof(struct arp_pkt));
fill_in_mac_header(eth, bcast_mac, ETH_TYPE_ARP);
minip_build_mac_hdr(eth, bcast_mac, ETH_TYPE_ARP);
arp->htype = htons(0x0001);
arp->ptype = htons(0x0800);
@@ -191,6 +190,116 @@ int send_arp_request(uint32_t addr)
return 0;
}
static void handle_arp_timeout_cb(void *arg) {
*(bool *)arg = true;
}
static inline uint8_t *get_dest_mac(uint32_t host)
{
uint8_t *dst_mac = NULL;
bool arp_timeout = false;
net_timer_t arp_timeout_timer;
if (host == IPV4_BCAST) {
return bcast_mac;
}
dst_mac = arp_cache_lookup(host);
if (dst_mac == NULL) {
send_arp_request(host);
memset(&arp_timeout_timer, 0, sizeof(arp_timeout_timer));
net_timer_set(&arp_timeout_timer, handle_arp_timeout_cb, &arp_timeout, 100);
while (!arp_timeout) {
dst_mac = arp_cache_lookup(host);
if (dst_mac) {
net_timer_cancel(&arp_timeout_timer);
break;
}
}
}
return dst_mac;
}
status_t udp_open(uint32_t host, uint16_t sport, uint16_t dport, udp_socket_t **handle)
{
TRACEF("host %u.%u.%u.%u sport %u dport %u handle %p\n",
IPV4_SPLIT(host), sport, dport, handle);
udp_socket_t *socket;
uint8_t *dst_mac;
if (handle == NULL) {
return -EINVAL;
}
socket = (udp_socket_t *) malloc(sizeof(udp_socket_t));
if (!socket) {
return -ENOMEM;
}
dst_mac = get_dest_mac(host);
if (dst_mac == NULL) {
return -EHOSTUNREACH;
}
socket->host = host;
socket->sport = sport;
socket->dport = dport;
socket->mac = dst_mac;
*handle = socket;
return NO_ERROR;
}
status_t udp_close(udp_socket_t *handle)
{
if (handle == NULL) {
return -EINVAL;
}
free(handle);
return NO_ERROR;
}
status_t udp_send(void *buf, size_t len, udp_socket_t *handle)
{
pktbuf_t *p;
struct eth_hdr *eth;
struct ipv4_hdr *ip;
struct udp_hdr *udp;
status_t ret = NO_ERROR;
if (handle == NULL || buf == NULL || len == 0) {
return -EINVAL;
}
if ((p = pktbuf_alloc()) == NULL) {
return -ENOMEM;
}
udp = pktbuf_prepend(p, sizeof(struct udp_hdr));
ip = pktbuf_prepend(p, sizeof(struct ipv4_hdr));
eth = pktbuf_prepend(p, sizeof(struct eth_hdr));
pktbuf_append_data(p, buf, len);
udp->src_port = htons(handle->sport);
udp->dst_port = htons(handle->dport);
udp->len = htons(sizeof(struct udp_hdr) + len);
udp->chksum = 0;
memcpy(udp->data, buf, len);
minip_build_mac_hdr(eth, handle->mac, ETH_TYPE_IPV4);
minip_build_ipv4_hdr(ip, handle->host, IP_PROTO_UDP, len + sizeof(struct udp_hdr));
#if (MINIP_USE_UDP_CHECKSUM != 0)
udp->chksum = rfc768_chksum(ip, udp);
#endif
minip_tx_handler(p);
return ret;
}
status_t minip_ipv4_send(pktbuf_t *p, uint32_t dest_addr, uint8_t proto)
{
status_t ret = 0;
@@ -200,103 +309,26 @@ status_t minip_ipv4_send(pktbuf_t *p, uint32_t dest_addr, uint8_t proto)
struct ipv4_hdr *ip = pktbuf_prepend(p, sizeof(struct ipv4_hdr));
struct eth_hdr *eth = pktbuf_prepend(p, sizeof(struct eth_hdr));
mutex_acquire(&tx_mutex);
if (dest_addr == IPV4_BCAST || dest_addr == minip_broadcast) {
dst_mac = bcast_mac;
goto ready;
}
/* If we're missing an address in the cache send out a request periodically for a bit */
dst_mac = arp_cache_lookup(dest_addr);
dst_mac = get_dest_mac(dest_addr);
if (!dst_mac) {
send_arp_request(dest_addr);
// TODO: Add a timeout here rather than an arbitrary iteration limit
for (int i = 50000; i > 0; i--) {
if ((dst_mac = arp_cache_lookup(dest_addr)) != NULL) {
break;
}
}
if (dst_mac == NULL) {
ret = -1;
goto err;
}
pktbuf_free(p);
ret = -EHOSTUNREACH;
goto err;
}
ready:
fill_in_mac_header(eth, dst_mac, ETH_TYPE_IPV4);
fill_in_ipv4_header(ip, dest_addr, proto, data_len);
minip_build_mac_hdr(eth, dst_mac, ETH_TYPE_IPV4);
minip_build_ipv4_hdr(ip, dest_addr, proto, data_len);
minip_tx_handler(p);
err:
mutex_release(&tx_mutex);
return ret;
}
int minip_udp_send(const void *buf, size_t len, uint32_t addr,
uint16_t dstport, uint16_t srcport)
{
pktbuf_t *p;
struct eth_hdr *eth;
struct ipv4_hdr *ip;
struct udp_hdr *udp;
uint8_t *dst_mac;
int ret = 0;
if ((p = pktbuf_alloc()) == NULL) {
return -1;
}
udp = pktbuf_prepend(p, sizeof(struct udp_hdr));
ip = pktbuf_prepend(p, sizeof(struct ipv4_hdr));
eth = pktbuf_prepend(p, sizeof(struct eth_hdr));
memset(p->data, 0, p->dlen);
pktbuf_append_data(p, buf, len);
mutex_acquire(&tx_mutex);
if (addr == IPV4_BCAST) {
dst_mac = bcast_mac;
goto ready;
}
/* If we're missing an address in the cache send out a request periodically for a bit */
dst_mac = arp_cache_lookup(addr);
if (!dst_mac) {
send_arp_request(addr);
// TODO: Add a timeout here rather than an arbitrary iteration limit
for (int i = 50000; i > 0; i--) {
if ((dst_mac = arp_cache_lookup(addr)) != NULL) {
break;
}
}
if (dst_mac == NULL) {
ret = -1;
goto err;
}
}
ready:
udp->src_port = htons(srcport);
udp->dst_port = htons(dstport);
udp->len = htons(sizeof(struct udp_hdr) + len);
udp->chksum = 0;
memcpy(udp->data, buf, len);
fill_in_mac_header(eth, dst_mac, ETH_TYPE_IPV4);
fill_in_ipv4_header(ip, addr, IP_PROTO_UDP, len + sizeof(struct udp_hdr));
#if (MINIP_USE_UDP_CHECKSUM != 0)
udp->chksum = rfc768_chksum(ip, udp);
#endif
minip_tx_handler(p);
err:
mutex_release(&tx_mutex);
return ret;
}
@@ -323,8 +355,8 @@ void send_ping_reply(uint32_t ipaddr, struct icmp_pkt *req, size_t reqdatalen)
len = sizeof(struct icmp_pkt) + reqdatalen;
fill_in_mac_header(eth, arp_cache_lookup(ipaddr), ETH_TYPE_IPV4);
fill_in_ipv4_header(ip, ipaddr, IP_PROTO_ICMP, len);
minip_build_mac_hdr(eth, arp_cache_lookup(ipaddr), ETH_TYPE_IPV4);
minip_build_ipv4_hdr(ip, ipaddr, IP_PROTO_ICMP, len);
icmp->type = ICMP_ECHO_REPLY;
icmp->code = 0;
@@ -368,27 +400,27 @@ __NO_INLINE static void handle_ipv4_packet(pktbuf_t *p, const uint8_t *src_mac)
/* reject bad packets */
if (((ip->ver_ihl >> 4) & 0xf) != 4) {
/* not version 4 */
//LTRACEF("REJECT: not version 4\n");
LTRACEF("REJECT: not version 4\n");
return;
}
/* do we have enough buffer to hold the full header + options? */
size_t header_len = (ip->ver_ihl & 0xf) * 4;
if (p->dlen < header_len) {
//LTRACEF("REJECT: not enough buffer to hold header\n");
LTRACEF("REJECT: not enough buffer to hold header\n");
return;
}
/* compute checksum */
if (rfc1701_chksum((void *)ip, header_len) != 0) {
/* bad checksum */
//LTRACEF("REJECT: bad checksum\n");
LTRACEF("REJECT: bad checksum\n");
return;
}
/* is the pkt_buf large enough to hold the length the header says the packet is? */
if (htons(ip->len) > p->dlen) {
//LTRACEF("REJECT: packet exceeds size of buffer (header %d, dlen %d)\n", htons(ip->len), p->dlen);
LTRACEF("REJECT: packet exceeds size of buffer (header %d, dlen %d)\n", htons(ip->len), p->dlen);
return;
}
@@ -408,7 +440,7 @@ __NO_INLINE static void handle_ipv4_packet(pktbuf_t *p, const uint8_t *src_mac)
/* see if it's for us */
if (ip->dst_addr != IPV4_BCAST) {
if (minip_ip != IPV4_NONE && ip->dst_addr != minip_ip && ip->dst_addr != minip_broadcast) {
//LTRACEF("REJECT: for another host\n");
LTRACEF("REJECT: for another host\n");
return;
}
}
@@ -477,7 +509,7 @@ __NO_INLINE static int handle_arp_pkt(pktbuf_t *p)
rarp = pktbuf_append(rp, sizeof(struct arp_pkt));
// Eth header
fill_in_mac_header(reth, eth->src_mac, ETH_TYPE_ARP);
minip_build_mac_hdr(reth, eth->src_mac, ETH_TYPE_ARP);
// ARP packet
rarp->oper = htons(ARP_OPER_REPLY);
@@ -516,37 +548,15 @@ void minip_rx_driver_callback(pktbuf_t *p)
switch(htons(eth->type)) {
case ETH_TYPE_IPV4:
LTRACEF("ipv4 pkt\n");
handle_ipv4_packet(p, eth->src_mac);
break;
case ETH_TYPE_ARP:
LTRACEF("arp pkt\n");
handle_arp_pkt(p);
break;
}
}
minip_fd_t *minip_open(uint32_t addr, uint16_t port)
{
minip_fd_t *fd = malloc(sizeof(minip_fd_t));
if (!fd) {
return fd;
}
send_arp_request(addr);
fd->addr = addr;
fd->port = port;
return fd;
}
void minip_close(minip_fd_t *fd)
{
free(fd);
}
int send(minip_fd_t *fd, void *buf, size_t len, int flags)
{
return minip_udp_send(buf, len, fd->addr, fd->port, fd->port);
}
// vim: set ts=4 sw=4 expandtab:

View File

@@ -30,24 +30,96 @@
#include <kernel/semaphore.h>
#include <lib/pktbuf.h>
#if WITH_KERNEL_VM
#include <kernel/vm.h>
#endif
#define LOCAL_TRACE 0
static struct list_node pb_freelist = LIST_INITIAL_VALUE(pb_freelist);
static struct list_node pb_buflist = LIST_INITIAL_VALUE(pb_buflist);
static semaphore_t pb_sem = SEMAPHORE_INITIAL_VALUE(pb_sem, 0);
void pktbuf_create(void *ptr, u32 phys, size_t size) {
pktbuf_t *p = ptr;
if (size != PKTBUF_SIZE) {
panic("pktbuf_create: invalid size %d\n", size);
}
p->phys_base = phys + __offsetof(pktbuf_t, buffer);
p->rsv0 = 0;
p->rsv1 = 0;
p->rsv2 = 0;
static unsigned int cur_id = 0;
void pktbuf_create(void *ptr, size_t size) {
pktbuf_t *p = ptr;
p->magic = PKTBUF_HDR_MAGIC;
p->phys_base = 0;
p->id = cur_id++;
list_add_tail(&pb_freelist, &(p->list));
sem_post(&pb_sem, false);
}
/* Carve buffers for pktbufs of size PKTBUF_BUF_SIZE from the memory pointed at by ptr */
void pktbuf_create_bufs(void *ptr, size_t size) {
uintptr_t phys_addr;
#if WITH_KERNEL_VM
if (arch_mmu_query((uintptr_t) ptr, &phys_addr, NULL) < 0) {
printf("Failed to get physical address for pktbuf slab, using virtual\n");
}
#else
phys_addr = ptr;
#endif
while (size > sizeof(pktbuf_buf_t)) {
pktbuf_buf_t *pkt = ptr;
pkt->magic = PKTBUF_BUF_MAGIC;
pkt->phys_addr = phys_addr;
list_add_tail(&pb_buflist, &pkt->list);
ptr += sizeof(pktbuf_buf_t);
phys_addr += sizeof(pktbuf_buf_t);
size -= sizeof(pktbuf_buf_t);
}
}
static inline pktbuf_buf_t *pktbuf_get_buf(void) {
return list_remove_head_type(&pb_buflist, pktbuf_buf_t, list);
}
pktbuf_t *pktbuf_alloc(void) {
pktbuf_t *p = NULL;
pktbuf_buf_t *b = NULL;
/* Check for buffers first to reduce the complexity of cases where we have a pktbuf
* pointer but no buffer and would otherwise have to do sem / list bookkeeping on
* cleanup */
sem_wait(&pb_sem);
enter_critical_section();
b = pktbuf_get_buf();
if (b) {
p = list_remove_head_type(&pb_freelist, pktbuf_t, list);
}
exit_critical_section();
if (b->magic != PKTBUF_BUF_MAGIC) {
panic("pktbuf id %u has corrupted buffer magic value\n"
"buf_addr %p magic: 0x%08X (expected 0x%08X), phys_addr: %p\n",
p->id, b, b->magic, PKTBUF_BUF_MAGIC, (void *) b->phys_addr);
}
if (!p) {
return NULL;
}
p->buffer = (uint8_t *) b;
p->data = p->buffer + PKTBUF_MAX_HDR;
p->dlen = 0;
p->managed = true;
/* TODO: This will be moved to the stack soon */
p->eof = true;
p->phys_base = b->phys_addr;
return p;
}
pktbuf_t *pktbuf_alloc_empty(void *buf, size_t dlen) {
pktbuf_t *p;
sem_wait(&pb_sem);
@@ -59,14 +131,24 @@ pktbuf_t *pktbuf_alloc(void) {
return NULL;
}
p->data = p->buffer + PKTBUF_MAX_HDR;
p->dlen = 0;
p->buffer = buf;
p->data = p->buffer;
p->dlen = dlen;
p->managed = false;
return p;
}
void pktbuf_free(pktbuf_t *p) {
enter_critical_section();
list_add_tail(&pb_freelist, &(p->list));
if (p->managed && p->buffer) {
pktbuf_buf_t *pkt = (pktbuf_buf_t *)p->buffer;
list_add_tail(&pb_buflist, &pkt->list);
}
p->buffer = NULL;
p->data = NULL;
p->eof = false;
p->managed = false;
exit_critical_section();
sem_post(&pb_sem, true);
@@ -125,4 +207,9 @@ void pktbuf_consume_tail(pktbuf_t *p, size_t sz) {
p->dlen -= sz;
}
void pktbuf_dump(pktbuf_t *p) {
printf("pktbuf id %u, data %p, buffer %p, dlen %u, data offset %lu, phys_base %p, managed %u\n",
p->id, p->data, p->buffer, p->dlen, (uintptr_t) p->data - (uintptr_t) p->buffer,
(void *)p->phys_base, p->managed);
}
// vim: set noexpandtab:

View File

@@ -25,6 +25,7 @@
#include <debug.h>
#include <list.h>
#include <err.h>
#include <errno.h>
#include <reg.h>
#include <endian.h>
#include <stdio.h>
@@ -52,12 +53,8 @@
#define GEM_RX_BUF_SIZE 1536
#define GEM_TX_BUF_SIZE 1536
struct list_node active_tx_list;
struct list_node pending_tx_list;
struct list_node pktbuf_to_free_list;
struct list_node *active_tx_ptr = &active_tx_list;
struct list_node *pending_tx_ptr = &pending_tx_list;
struct list_node tx_queue;
struct list_node queued_pbufs;
gem_cb_t rx_callback = NULL;
struct gem_desc {
@@ -65,12 +62,23 @@ struct gem_desc {
uint32_t ctrl;
};
/* Quick overview:
* RX:
* rx_tbl contains rx descriptors. A pktbuf is allocated for each of these and a descriptor
* entry in the table points to a buffer in the pktbuf. rx_tbl[X]'s pktbuf is stored in rx_pbufs[X]
*
* TX:
* The current position to write new tx descriptors to is maintained by tx_pos. As frames are
* queued in tx_tbl their pktbufs are stored in the list queued_pbufs. As frame transmission is
* completed these pktbufs are released back to the pool by the interrupt handler for TX_COMPLETE
*/
struct gem_state {
struct gem_desc rx[GEM_RX_BUF_CNT];
struct gem_desc tx[GEM_TX_BUF_CNT];
struct gem_desc rx_tbl[GEM_RX_BUF_CNT];
struct gem_desc tx_tbl[GEM_TX_BUF_CNT];
pktbuf_t *rx_pbufs[GEM_RX_BUF_CNT];
unsigned int tx_pos;
};
static pktbuf_t *gem_rx_buffers[GEM_RX_BUF_CNT];
static event_t rx_pending;
static event_t tx_complete;
@@ -78,23 +86,6 @@ static bool debug_rx = false;
static struct gem_state *state;
static paddr_t state_phys;
static volatile struct gem_regs *regs = NULL;
static mutex_t tx_mutex;
static void dump_gem_descriptors(bool rx, bool tx) {
if (rx) {
for (int i = 0; i < GEM_RX_BUF_CNT; i++) {
printf("gem rx desc %02d [%p]: [addr] = %#08x, [ctrl] = %#08x\n", i, &state->rx[i],
state->rx[i].addr, state->rx[i].ctrl);
}
}
if (tx) {
for (int i = 0; i < GEM_TX_BUF_CNT; i++) {
printf("gem tx desc %02d [%p]: [addr] = %#08x, [ctrl] = %#08x\n", i, &state->tx[i],
state->tx[i].addr, state->tx[i].ctrl);
}
}
}
static void debug_rx_handler(pktbuf_t *p)
{
@@ -109,47 +100,48 @@ static void debug_rx_handler(pktbuf_t *p)
}
}
bool gem_tx_active(void) {
return (regs->tx_status & TX_STATUS_GO);
static void free_completed_pbuf_frame(void) {
pktbuf_t *p;
bool eof;
if (!list_is_empty(&queued_pbufs)) {
do {
p = list_remove_head_type(&queued_pbufs, pktbuf_t, list);
eof = p->eof;
pktbuf_free(p);
} while (!eof);
}
}
void gem_queue_for_tx(void) {
struct list_node *tmp;
struct pktbuf *p;
void queue_pkts_in_tx_tbl(void) {
pktbuf_t *p;
unsigned int cur_pos;
/* Packets being sent will be constantly appended to the pending queue. By swapping the pending
* ptr to the active pointer here atomically we don't need a lock that would risk being acquired
* in both thread and interrupt context */
enter_critical_section();
if (list_is_empty(&pending_tx_list) || gem_tx_active())
if (list_is_empty(&tx_queue)) {
goto exit;
LTRACEF("pending %d, free %d, active %d\n", list_length(&pending_tx_list),
list_length(&pktbuf_to_free_list), list_length(&active_tx_list));
tmp = pending_tx_ptr;
pending_tx_ptr = active_tx_ptr;
active_tx_ptr = tmp;
int pos = 0;
while ((p = list_remove_head_type(pending_tx_ptr, pktbuf_t, list)) != NULL) {
arch_clean_cache_range((addr_t)p->buffer, sizeof(p->buffer));
state->tx[pos].addr = (uintptr_t) pktbuf_data_phys(p);
state->tx[pos].ctrl = TX_BUF_LEN(p->dlen);
if (pos == GEM_TX_BUF_CNT) {
state->tx[pos - 1].ctrl |= TX_DESC_WRAP;
}
list_add_tail(&pktbuf_to_free_list, &p->list);
pos++;
}
state->tx[pos - 1].ctrl |= TX_LAST_BUF;
state->tx[pos].addr = 0;
state->tx[pos].ctrl = TX_DESC_USED;
/* Queue packets in the descriptor table until we're either out of space in the table
* or out of packets in our tx queue. Any packets left will remain in the list and be
* processed the next time available */
while ((state->tx_tbl[state->tx_pos].ctrl & TX_DESC_USED) &&
((p = list_remove_head_type(&tx_queue, pktbuf_t, list)) != NULL)) {
cur_pos = state->tx_pos;
state->tx_tbl[cur_pos].addr = pktbuf_data_phys(p);
state->tx_tbl[cur_pos].ctrl &= TX_DESC_WRAP; /* Protect the wrap bit the table's end */
state->tx_tbl[cur_pos].ctrl |= TX_BUF_LEN(p->dlen);
/* Mark end of frame for if we've reached the end of a pktbuf chain */
if (p->eof) {
state->tx_tbl[cur_pos].ctrl |= TX_LAST_BUF;
}
state->tx_pos = (state->tx_pos + 1) % GEM_TX_BUF_CNT;
list_add_tail(&queued_pbufs, &p->list);
}
regs->tx_qbar = ((uintptr_t)&state->tx[0] - (uintptr_t)state) + state_phys;
regs->net_ctrl |= NET_CTRL_START_TX;
exit:
@@ -159,23 +151,18 @@ exit:
int gem_send_raw_pkt(struct pktbuf *p)
{
status_t ret = NO_ERROR;
struct list_node pbuf_head = LIST_INITIAL_VALUE(pbuf_head);
if (!p || !p->dlen) {
ret = -1;
goto err;
}
LTRACEF("buf %p, len %zu, pkt %p\n", p->data, p->dlen, p);
enter_critical_section();
list_add_tail(pending_tx_ptr, &p->list);
list_add_tail(&tx_queue, &p->list);
queue_pkts_in_tx_tbl();
exit_critical_section();
if (!gem_tx_active()) {
gem_queue_for_tx();
}
err:
return ret;
}
@@ -199,7 +186,7 @@ enum handler_return gem_int_handler(void *arg) {
if (intr_status & INTR_RX_USED_READ) {
for (int i = 0; i < GEM_RX_BUF_CNT; i++) {
state->rx[i].addr &= ~RX_DESC_USED;
state->rx_tbl[i].addr &= ~RX_DESC_USED;
}
regs->rx_status &= ~RX_STATUS_BUFFER_NOT_AVAIL;
@@ -212,17 +199,16 @@ enum handler_return gem_int_handler(void *arg) {
printf("tx ahb error!\n");
}
if (intr_status & INTR_TX_COMPLETE || intr_status & INTR_TX_USED_READ) {
LTRACEF("pending %d, free %d, active %d\n", list_length(&pending_tx_list),
list_length(&pktbuf_to_free_list), list_length(&active_tx_list));
pktbuf_t *p;
while ((p = list_remove_head_type(&pktbuf_to_free_list, pktbuf_t, list)) != NULL) {
pktbuf_free(p);
}
/* A frame has been completed so we can clean up ownership of its buffers */
if (intr_status & INTR_TX_COMPLETE) {
free_completed_pbuf_frame();
regs->tx_status |= TX_STATUS_COMPLETE;
}
regs->tx_status |= (TX_STATUS_COMPLETE | TX_STATUS_USED_READ);
gem_queue_for_tx();
resched = true;
/* The controller has processed packets until it hit a buffer owned by the driver */
if (intr_status & INTR_TX_USED_READ) {
queue_pkts_in_tx_tbl();
regs->tx_status |= TX_STATUS_USED_READ;
}
regs->intr_status = intr_status;
@@ -247,30 +233,37 @@ static bool gem_phy_init(void) {
return wait_for_phy_idle();
}
static void gem_cfg_buffer_descs(void)
static status_t gem_cfg_buffer_descs(void)
{
memset(state, 0, sizeof(struct gem_state));
/* RX setup */
for (int i = 0; i < GEM_RX_BUF_CNT; i++) {
pktbuf_t *p = gem_rx_buffers[i];
DEBUG_ASSERT(p);
/* Take pktbufs from the allocated target pool and assign them to the gem RX
* descriptor table */
for (unsigned int n = 0; n < GEM_RX_BUF_CNT; n++) {
pktbuf_t *p = pktbuf_alloc();
if (!p) {
return -1;
}
/* make sure the buffers start off with no stale data in them */
arch_invalidate_cache_range((addr_t)p->buffer, sizeof(p->buffer));
state->rx[i].addr = (uintptr_t) pktbuf_data_phys(p);
state->rx[i].ctrl = 0;
state->rx_pbufs[n] = p;
state->rx_tbl[n].addr = (uintptr_t) p->phys_base;
state->rx_tbl[n].ctrl = 0;
}
memset(state->tx, 0, sizeof(state->tx));
/* Claim ownership of TX descriptors for the driver */
for (unsigned i = 0; i < GEM_TX_BUF_CNT; i++) {
state->tx_tbl[i].ctrl |= TX_DESC_USED;
}
/* Both set of descriptors need wrap bits set */
state->rx[GEM_RX_BUF_CNT-1].addr |= RX_DESC_WRAP;
state->rx[GEM_TX_BUF_CNT-1].ctrl |= TX_DESC_WRAP;
/* Both set of descriptors need wrap bits set at the end of their tables*/
state->rx_tbl[GEM_RX_BUF_CNT-1].addr |= RX_DESC_WRAP;
state->tx_tbl[GEM_TX_BUF_CNT-1].ctrl |= TX_DESC_WRAP;
/* load the physical address of the rx descriptor */
regs->rx_qbar = ((uintptr_t)&state->rx[0] - (uintptr_t)state) + state_phys;
/* Point the controller at the offset into state's physical location for RX descs */
regs->rx_qbar = ((uintptr_t)&state->rx_tbl[0] - (uintptr_t)state) + state_phys;
regs->tx_qbar = ((uintptr_t)&state->tx_tbl[0] - (uintptr_t)state) + state_phys;
return NO_ERROR;
}
static void gem_cfg_ints(void)
@@ -302,9 +295,9 @@ int gem_rx_thread(void *arg)
event_wait(&rx_pending);
for (;;) {
if (state->rx[bp].addr & RX_DESC_USED) {
p = gem_rx_buffers[bp];
p->dlen = RX_BUF_LEN(state->rx[bp].ctrl);
if (state->rx_tbl[bp].addr & RX_DESC_USED) {
p = state->rx_pbufs[bp];
p->dlen = RX_BUF_LEN(state->rx_tbl[bp].ctrl);
p->data = p->buffer + 2;
if (debug_rx) {
debug_rx_handler(p);
@@ -312,11 +305,8 @@ int gem_rx_thread(void *arg)
rx_callback(p);
}
/* invalidate the buffer before putting it back */
arch_invalidate_cache_range((addr_t)p->buffer, sizeof(p->buffer));
state->rx[bp].addr &= ~RX_DESC_USED;
state->rx[bp].ctrl = 0;
state->rx_tbl[bp].addr &= ~RX_DESC_USED;
state->rx_tbl[bp].ctrl = 0;
bp = (bp + 1) % GEM_RX_BUF_CNT;
} else {
break;
@@ -327,80 +317,8 @@ int gem_rx_thread(void *arg)
return 0;
}
status_t gem_init(uintptr_t base, uint32_t dmasize)
void gem_deinit(uintptr_t base)
{
int n;
status_t ret;
thread_t *rx_thread;
DEBUG_ASSERT(base == GEM0_BASE || base == GEM1_BASE);
regs = (struct gem_regs *) base;
list_initialize(&pending_tx_list);
list_initialize(&active_tx_list);
list_initialize(&pktbuf_to_free_list);
/* make sure we can allocate at least enough memory for a gem_state
* + some buffers
*/
if (dmasize <= PAGE_ALIGN(sizeof(struct gem_state)))
return ERR_INVALID_ARGS;
/* allocate a block of contiguous memory for the descriptors */
vaddr_t dmabase;
ret = vmm_alloc_contiguous(vmm_get_kernel_aspace(), "gem_desc",
sizeof(*state), (void **)&dmabase, 0, 0, ARCH_MMU_FLAG_UNCACHED);
if (ret < 0)
return ret;
/* get the physical address */
paddr_t dmabase_phys;
ret = arch_mmu_query(dmabase, &dmabase_phys, NULL);
if (ret < 0)
return ret;
TRACEF("dmabase 0x%lx, dmabase_phys 0x%lx, size %zu\n", dmabase, dmabase_phys, sizeof(*state));
/* tx/rx descriptor tables */
state = (void *)dmabase;
state_phys = dmabase_phys;
/* allocate packet buffers */
ret = vmm_alloc_contiguous(vmm_get_kernel_aspace(), "gem_desc",
dmasize, (void **)&dmabase, 0, 0, ARCH_MMU_FLAG_CACHED);
if (ret < 0)
return ret;
ret = arch_mmu_query(dmabase, &dmabase_phys, NULL);
if (ret < 0)
return ret;
TRACEF("packetbuf 0x%lx, packetbuf_phys 0x%lx, size %zu\n", dmabase, dmabase_phys, dmasize);
/* allocate packet buffers */
while (dmasize >= PKTBUF_SIZE) {
pktbuf_create((void *)dmabase, dmabase_phys, PKTBUF_SIZE);
dmasize -= PKTBUF_SIZE;
dmabase += PKTBUF_SIZE;
dmabase_phys += PKTBUF_SIZE;
}
for (n = 0; n < GEM_RX_BUF_CNT; n++) {
if ((gem_rx_buffers[n] = pktbuf_alloc()) == NULL) {
printf("cannot allocate %d rx buffers\n", GEM_RX_BUF_CNT);
return -1;
}
gem_rx_buffers[n]->data = gem_rx_buffers[n]->buffer;
}
/* Lock / scheduling init */
mutex_init(&tx_mutex);
event_init(&tx_complete, false, EVENT_FLAG_AUTOUNSIGNAL);
event_init(&rx_pending, false, EVENT_FLAG_AUTOUNSIGNAL);
/* rx background thread */
rx_thread = thread_create("gem_rx", gem_rx_thread, NULL, HIGH_PRIORITY, DEFAULT_STACK_SIZE);
thread_resume(rx_thread);
/* reset the gem peripheral */
uint32_t rst_mask;
if (base == GEM0_BASE) {
@@ -412,32 +330,78 @@ status_t gem_init(uintptr_t base, uint32_t dmasize)
spin(1);
SLCR->GEM_RST_CTRL &= ~rst_mask;
/* Clear Network control / status registers */
regs->net_ctrl |= NET_CTRL_STATCLR;
regs->rx_status = 0x0F;
regs->tx_status = 0xFF;
/* Disable interrupts */
regs->intr_dis = 0x7FFFEFF;
/* Empty out the buffer queues */
regs->rx_qbar = 0;
regs->tx_qbar = 0;
}
/* Configure for:
* Ghz enabled, 100mhz default
* broadcast / multicast enabled, hw checksums,
* clock divider 48, assuming 80MHz < cpu_1xclk < 120MHz
* skip first two bytes of rx buffer (ensure ip header alignment)
*/
regs->net_cfg = NET_CFG_FULL_DUPLEX | NET_CFG_GIGE_EN | NET_CFG_SPEED_100 |
NET_CFG_RX_CHKSUM_OFFLD_EN | NET_CFG_FCS_REMOVE | NET_CFG_MDC_CLK_DIV(0x7) |
NET_CFG_RX_BUF_OFFSET(2);
/* TODO: Fix signature */
status_t gem_init(uintptr_t gem_base)
{
status_t ret;
uint32_t reg_val;
thread_t *rx_thread;
vaddr_t state_vaddr;
paddr_t state_paddr;
DEBUG_ASSERT(gem_base == GEM0_BASE || gem_base == GEM1_BASE);
/* Data structure init */
event_init(&tx_complete, false, EVENT_FLAG_AUTOUNSIGNAL);
event_init(&rx_pending, false, EVENT_FLAG_AUTOUNSIGNAL);
list_initialize(&queued_pbufs);
list_initialize(&tx_queue);
/* allocate a block of contiguous memory for the peripheral state */
if ((ret = vmm_alloc_contiguous(vmm_get_kernel_aspace(), "gem_desc",
sizeof(*state), (void **)&state_vaddr, 0, 0, ARCH_MMU_FLAG_UNCACHED)) < 0) {
return ret;
}
if ((ret = arch_mmu_query(state_vaddr, &state_paddr, NULL)) < 0) {
return ret;
}
/* tx/rx descriptor tables and memory mapped registers */
state = (void *)state_vaddr;
state_phys = state_paddr;
regs = (struct gem_regs *) gem_base;
/* rx background thread */
rx_thread = thread_create("gem_rx", gem_rx_thread, NULL, HIGH_PRIORITY, DEFAULT_STACK_SIZE);
thread_resume(rx_thread);
/* Bring whatever existing configuration is up down so we can do it cleanly */
gem_deinit(gem_base);
gem_cfg_buffer_descs();
/* Self explanatory configuration for the gige */
reg_val = NET_CFG_FULL_DUPLEX;
reg_val |= NET_CFG_GIGE_EN;
reg_val |= NET_CFG_SPEED_100;
reg_val |= NET_CFG_RX_CHKSUM_OFFLD_EN;
reg_val |= NET_CFG_FCS_REMOVE;
reg_val |= NET_CFG_MDC_CLK_DIV(0x7);
reg_val |= NET_CFG_RX_BUF_OFFSET(2);
regs->net_cfg = reg_val;
/* Set DMA to 1600 byte rx buffer, 8KB addr space for rx, 4KB addr space for tx,
* hw checksumming, little endian, and use INCR16 ahb bursts
*/
regs->dma_cfg = DMA_CFG_AHB_MEM_RX_BUF_SIZE(0x19) | DMA_CFG_RX_PKTBUF_MEMSZ_SEL(0x3) |
DMA_CFG_TX_PKTBUF_MEMSZ_SEL | DMA_CFG_CSUM_GEN_OFFLOAD_EN |
DMA_CFG_AHB_FIXED_BURST_LEN(0x16);
reg_val = DMA_CFG_AHB_MEM_RX_BUF_SIZE(0x19);
reg_val |= DMA_CFG_RX_PKTBUF_MEMSZ_SEL(0x3);
reg_val |= DMA_CFG_TX_PKTBUF_MEMSZ_SEL;
reg_val |= DMA_CFG_CSUM_GEN_OFFLOAD_EN;
reg_val |= DMA_CFG_AHB_FIXED_BURST_LEN(0x16);
regs->dma_cfg = reg_val;
/* Enable VREF from GPIOB */
SLCR_REG(GPIOB_CTRL) = 0x1;
@@ -448,10 +412,12 @@ status_t gem_init(uintptr_t base, uint32_t dmasize)
return ret;
}
gem_cfg_buffer_descs();
gem_cfg_ints();
regs->net_ctrl = NET_CTRL_MD_EN | NET_CTRL_RX_EN | NET_CTRL_TX_EN;
reg_val = NET_CTRL_MD_EN;
reg_val |= NET_CTRL_RX_EN;
reg_val |= NET_CTRL_TX_EN;
regs->net_ctrl = reg_val;
return NO_ERROR;
}
@@ -522,11 +488,11 @@ static int cmd_gem(int argc, const cmd_args *argv)
(mac_bot >> 8) & 0xFF, mac_bot & 0xFF);
uint32_t rx_used = 0, tx_used = 0;
for (int i = 0; i < GEM_RX_BUF_CNT; i++) {
rx_used += !!(state->rx[i].addr & RX_DESC_USED);
rx_used += !!(state->rx_tbl[i].addr & RX_DESC_USED);
}
for (int i = 0; i < GEM_TX_BUF_CNT; i++) {
tx_used += !!(state->tx[i].ctrl & TX_DESC_USED);
tx_used += !!(state->tx_tbl[i].ctrl & TX_DESC_USED);
}
frames_tx += regs->frames_tx;
@@ -537,10 +503,6 @@ static int cmd_gem(int argc, const cmd_args *argv)
frames_rx, frames_tx);
} else if (argv[1].str[0] == 'd') {
debug_rx = !debug_rx;
} else if (argv[1].str[0] == 'p') {
dump_gem_descriptors(true, true);
} else if (argv[1].str[0] == 'r') {
gem_cfg_buffer_descs();
}
return 0;

View File

@@ -4,7 +4,7 @@
struct pktbuf;
typedef void (*gem_cb_t)(struct pktbuf *p);
status_t gem_init(uintptr_t regsbase, uint32_t dmasize);
status_t gem_init(uintptr_t regsbase);
void gem_set_callback(gem_cb_t rx);
void gem_set_macaddr(uint8_t mac[6]);
int gem_send_raw_pkt(struct pktbuf *p);

View File

@@ -180,7 +180,7 @@ void target_early_init(void)
void target_init(void)
{
gem_init(GEM0_BASE, 256*1024);
gem_init(GEM0_BASE);
}
void target_set_debug_led(unsigned int led, bool on)

View File

@@ -22,9 +22,14 @@
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <stdio.h>
#include <kernel/vm.h>
#include <lib/pktbuf.h>
#include <platform/zynq.h>
#include <platform/gem.h>
#define ZYNQ_PKTBUF_CNT 128
zynq_pll_cfg_tree_t zynq_pll_cfg = {
.arm = {
.lock_cnt = 375,
@@ -179,5 +184,26 @@ void target_early_init(void)
void target_init(void)
{
gem_init(GEM0_BASE, 256*1024);
paddr_t buf_vaddr;
void *hdr_addr;
if (vmm_alloc_contiguous(vmm_get_kernel_aspace(), "pktbuf_headers",
ZYNQ_PKTBUF_CNT * sizeof(pktbuf_buf_t), (void **)&hdr_addr, 0, 0, ARCH_MMU_FLAG_CACHED) < 0) {
printf("Failed to initialize pktbuf hdr slab\n");
return;
}
for (size_t i = 0; i < ZYNQ_PKTBUF_CNT; i++) {
pktbuf_create((void *)hdr_addr, sizeof(pktbuf_t));
hdr_addr += sizeof(pktbuf_t);
}
if (vmm_alloc_contiguous(vmm_get_kernel_aspace(), "pktbuf_buffers",
ZYNQ_PKTBUF_CNT * sizeof(pktbuf_buf_t), (void **)&buf_vaddr, 0, 0, ARCH_MMU_FLAG_UNCACHED) < 0) {
printf("Failed to initialize pktbuf vm slab\n");
return;
}
pktbuf_create_bufs((void *)buf_vaddr, ZYNQ_PKTBUF_CNT * sizeof(pktbuf_buf_t));
gem_init(GEM0_BASE);
}