网络服务支持

This commit is contained in:
zhangzheng
2025-02-16 23:11:18 +08:00
parent 72cbd3d0cd
commit 7c5ff01e74
46 changed files with 463 additions and 235 deletions

4
.vscode/launch.json vendored
View File

@@ -71,8 +71,8 @@
// "miDebuggerPath": "/home/zhangzheng/gcc-arm-10.3-2021.07-aarch64-aarch64-none-elf/bin/aarch64-none-elf-gdb",
// "miDebuggerPath": "/home/mkrtos-smart/toolchains/gcc-arm-10.3-2021.07-x86_64-aarch64-none-elf/bin/aarch64-none-elf-gdb",
// "miDebuggerPath": "/home/toolchains/gcc-arm-10.3-2021.07-x86_64-aarch64-none-elf/bin/aarch64-none-elf-gdb",
// "miDebuggerPath": "/Users/zhangzheng/gcc-arm-none-eabi-10.3-2021.10/bin/arm-none-eabi-gdb",
"miDebuggerPath": "/home/zhangzheng/toolchain/gcc-arm-10.3-2021.07-x86_64-arm-none-eabi/bin/arm-none-eabi-gdb",
"miDebuggerPath": "/Users/zhangzheng/gcc-arm-none-eabi-10.3-2021.10/bin/arm-none-eabi-gdb",
// "miDebuggerPath": "/home/zhangzheng/toolchain/gcc-arm-10.3-2021.07-x86_64-arm-none-eabi/bin/arm-none-eabi-gdb",
"miDebuggerServerAddress": "127.0.0.1:33333",
"MIMode": "gdb",
"setupCommands": [

View File

@@ -29,7 +29,7 @@
- [ ] toybox support
- [ ] ota support
- [ ] lvgl support
- [ ] modubs support
- [ ] modbus support
#### must low prio
- [ ] AT proctol support

View File

@@ -50,8 +50,8 @@ CONFIG_PAGE_SHIFT=9
CONFIG_MMU=n
CONFIG_KNL_TEST=n
CONFIG_ELF_LAUNCH=n
CONFIG_THREAD_MSG_BUG_LEN=128
CONFIG_THREAD_IPC_MSG_LEN=24
CONFIG_THREAD_MSG_BUG_LEN=256
CONFIG_THREAD_IPC_MSG_LEN=56
CONFIG_THREAD_MAP_BUF_LEN=4
CONFIG_THREAD_USER_BUF_LEN=4
CONFIG_MSG_BUF_VADDR=0xE0000000

11
mkrtos_doc/booting.md Normal file
View File

@@ -0,0 +1,11 @@
image文件打包分布
bootstarp(bootloader) --> dtbo(设备树) --> knl image -> appfs image
appfs image
--> init server
--> block server
--> appfs server
系统加载流程:
bootstarp --> knl image --> init server --> block server --> appfs server --> other appliction.

View File

@@ -3,6 +3,8 @@
#include "thread.h"
#include "futex.h"
#include "thread_knl.h"
#include "sleep.h"
static umword_t sys_tick_cnt;
umword_t sys_tick_cnt_get(void)
@@ -19,7 +21,10 @@ void SysTick_Handler(void)
atomic_inc(&thread_get_current()->time_count);
}
sys_tick_cnt++;
#if 0
thread_timeout_check(1);
#endif
thread_check_timeout();
futex_timeout_times_tick();
cpulock_set(status);
thread_calc_cpu_usage();

View File

@@ -32,6 +32,7 @@ typedef struct task
void *nofity_point; //!< commint point func.
addr_t nofity_stack; //!< nofity_point_stack.
mutex_t nofity_lock;
sema_t notify_sema; //!< sema
addr_t nofity_msg_buf; //!<
umword_t *nofity_map_buf;
umword_t *nofity_bitmap; //!<

View File

@@ -15,7 +15,8 @@
static slab_t *sema_slab;
#endif
enum SEMA_OP {
enum SEMA_OP
{
SEMA_UP,
SEMA_DOWN,
};
@@ -28,7 +29,8 @@ static void sema_mem_init(void)
#endif
}
INIT_KOBJ_MEM(sema_mem_init);
typedef struct sema_wait_item {
typedef struct sema_wait_item
{
slist_head_t node;
thread_t *thread;
} sema_wait_item_t;
@@ -45,29 +47,47 @@ void sema_up(sema_t *obj)
umword_t status;
status = spinlock_lock(&obj->lock);
if (slist_is_empty(&obj->suspend_head)) {
if (obj->cnt < obj->max_cnt) {
if (slist_is_empty(&obj->suspend_head))
{
if (obj->cnt < obj->max_cnt)
{
obj->cnt++;
}
// printk("up0 sema cnt:%d max:%d.\n", obj->cnt, obj->max_cnt);
} else {
}
else
{
slist_head_t *first_wait_node;
sema_wait_item_t *first_wait;
first_wait_node = slist_first(&obj->suspend_head);
first_wait = container_of(first_wait_node, sema_wait_item_t, node);
// assert(first_wait->thread->status == THREAD_SUSPEND);
if (thread_get_status(first_wait->thread) == THREAD_SUSPEND)
{
slist_del(first_wait_node);
if (ref_counter_dec_and_release(&first_wait->thread->ref, &first_wait->thread->kobj) != 1) {
if (ref_counter_dec_and_release(&first_wait->thread->ref, &first_wait->thread->kobj) != 1)
{
// thread_ready_remote(first_wait->thread, FALSE);
thread_sleep_del_and_wakeup(first_wait->thread);
}
if (obj->cnt < obj->max_cnt) {
}
else
{
// 超时退出但是切出来的时候切到了唤醒线程中所以这里不是suspend状态。
thread_sleep_del(first_wait->thread);
}
if (obj->cnt < obj->max_cnt)
{
obj->cnt++;
}
// printk("up1 sema cnt:%d max:%d.\n", obj->cnt, obj->max_cnt);
}
spinlock_set(&obj->lock, status);
if (cpulock_get_status())
{
preemption();
}
}
umword_t sema_down(sema_t *obj, umword_t ticks)
{
@@ -79,25 +99,31 @@ umword_t sema_down(sema_t *obj, umword_t ticks)
again:
status = spinlock_lock(&obj->lock);
if (obj->cnt == 0) {
if (obj->cnt == 0)
{
sema_wait_item_init(&wait_item, th);
ref_counter_inc(&th->ref);
slist_add_append(&obj->suspend_head, &wait_item.node);
remain_sleep = thread_sleep(ticks);
if (remain_sleep == 0 && ticks != 0) {
if (remain_sleep == 0 && ticks != 0)
{
// 超时退出的,直接从列表中删除
assert(slist_in_list(&wait_item.node));
slist_del(&wait_item.node);
ref_counter_dec(&th->ref);
}
if (!(remain_sleep == 0 && ticks != 0)) {
else
{
spinlock_set(&obj->lock, status);
if (cpulock_get_status()) {
if (cpulock_get_status())
{
preemption();
}
goto again;
}
} else {
}
else
{
assert(obj->cnt > 0);
obj->cnt--;
// printk("down sema cnt:%d max:%d.\n", obj->cnt, obj->max_cnt);
@@ -113,16 +139,21 @@ static void sema_syscall(kobject_t *kobj, syscall_prot_t sys_p,
msg_tag_t tag = msg_tag_init4(0, 0, 0, -EINVAL);
task_t *task = thread_get_current_task();
if (sys_p.prot != SEMA_PROT) {
if (sys_p.prot != SEMA_PROT)
{
f->regs[0] = msg_tag_init4(0, 0, 0, -EPROTO).raw;
return;
}
switch (sys_p.op) {
case SEMA_UP: {
switch (sys_p.op)
{
case SEMA_UP:
{
sema_up(sema);
tag = msg_tag_init4(0, 0, 0, 0);
} break;
case SEMA_DOWN: {
}
break;
case SEMA_DOWN:
{
umword_t ret;
ret = sema_down(sema, f->regs[0]);
@@ -142,7 +173,8 @@ static sema_t *sema_create(ram_limit_t *lim, umword_t cnt, umword_t max)
#else
kobj = mm_limit_alloc(lim, sizeof(sema_t));
#endif
if (!kobj) {
if (!kobj)
{
return NULL;
}
sema_init(kobj, cnt, max);
@@ -166,10 +198,12 @@ static void sema_release_stage1(kobject_t *kobj)
first_wait_node = slist_first(&obj->suspend_head);
first_wait = container_of(first_wait_node, sema_wait_item_t, node);
slist_del(first_wait_node);
if (ref_counter_dec_and_release(&first_wait->thread->ref, &first_wait->thread->kobj) != 1) {
if (ref_counter_dec_and_release(&first_wait->thread->ref, &first_wait->thread->kobj) != 1)
{
thread_ready_remote(first_wait->thread, FALSE);
}
if (obj->cnt < obj->max_cnt) {
if (obj->cnt < obj->max_cnt)
{
obj->cnt++;
}
}
@@ -188,13 +222,16 @@ static void sema_release_stage2(kobject_t *kobj)
void sema_init(sema_t *obj, int cnt, int max)
{
if (max <= 0) {
if (max <= 0)
{
max = 1;
}
if (cnt < 0) {
if (cnt < 0)
{
cnt = 0;
}
if (cnt > max) {
if (cnt > max)
{
cnt = max;
}
obj->cnt = cnt;
@@ -213,7 +250,8 @@ static kobject_t *sema_func(ram_limit_t *lim, umword_t arg0, umword_t arg1,
umword_t arg2, umword_t arg3)
{
sema_t *sema = sema_create(lim, arg0, arg1);
if (!sema) {
if (!sema)
{
return NULL;
}
return &sema->kobj;

View File

@@ -20,6 +20,7 @@
#include "mpu.h"
#include "init.h"
#include "dlist.h"
#include "printk.h"
#if IS_ENABLED(CONFIG_BUDDY_SLAB)
#include <slab.h>
static slab_t *share_mem_slab;
@@ -261,6 +262,7 @@ static int share_mem_alloc_pmem(share_mem_t *obj)
{
return -ENOMEM;
}
printk("share mem:[0x%x 0x%x]\n", obj->mem, (char *)obj->mem + obj->size);
memset(obj->mem, 0, obj->size);
break;
case SHARE_MEM_CNT_DPD:
@@ -464,7 +466,7 @@ static void share_mem_release_stage2(kobject_t *kobj)
{
share_mem_t *sm = container_of(kobj, share_mem_t, kobj);
assert(dlist_is_empty(&sm->task_head));//TODO:有bug
assert(dlist_is_empty(&sm->task_head)); // TODO:有bug
#if IS_ENABLED(CONFIG_MMU)
share_mem_free_pmem(sm);

View File

@@ -53,8 +53,8 @@ void thread_check_timeout(void)
if (pos->times == 0)
{
assert(pos->th->status == THREAD_SUSPEND);
thread_ready(pos->th, TRUE);
slist_del(&pos->node);
thread_ready(pos->th, TRUE);
}
} // !< 如果是0则一直休眠
pos = next;
@@ -74,7 +74,7 @@ thread_wait_entry_t *thread_sleep_del(thread_t *th)
pos, (slist_head_t *)&wait_list,
node)
{
assert(pos->th->status == THREAD_SUSPEND);
// assert(pos->th->status == THREAD_SUSPEND);
thread_wait_entry_t *next = slist_next_entry(
pos, (slist_head_t *)wait_list,
node);
@@ -119,6 +119,5 @@ umword_t thread_sleep(umword_t tick)
thread_suspend(cur_th);
spinlock_set(&lock, status);
preemption();
return entry.times;
}

View File

@@ -432,6 +432,7 @@ static void task_syscall_func(kobject_t *kobj, syscall_prot_t sys_p, msg_tag_t i
tag_task->nofity_bitmap_len = (f->regs[4]);
tag_task->nofity_msg_buf = (addr_t)f->regs[5];
tag_task->nofity_map_buf = (umword_t *)((addr_t)f->regs[5] + THREAD_MSG_BUG_LEN);
sema_init(&tag_task->notify_sema, tag_task->nofity_bitmap_len, tag_task->nofity_bitmap_len);
tag = msg_tag_init4(0, 0, 0, 0);
}
break;
@@ -578,7 +579,7 @@ task_t *task_create(ram_limit_t *lim, int is_knl)
return NULL;
}
task_init(tk, lim, is_knl);
// printk("create task is 0x%x\n", tk);
printk("create task is 0x%x\n", tk);
return tk;
}

View File

@@ -432,6 +432,7 @@ void thread_ready_remote(thread_t *th, bool_t is_sche)
*/
void thread_ready(thread_t *th, bool_t is_sche)
{
assert(th);
bool_t ret;
umword_t status = cpulock_lock();
@@ -1198,22 +1199,12 @@ msg_tag_t thread_fast_ipc_call(thread_t *to_th, entry_frame_t *f, umword_t user_
printk("task:0x%x, notify point is not set.\n", to_task);
return msg_tag_init4(0, 0, 0, -EIO);
}
_to_unlock:
if (GET_LSB(*to_task->nofity_bitmap, to_task->nofity_bitmap_len) == GET_LSB((~0ULL), to_task->nofity_bitmap_len))
{
thread_sched(TRUE); /*TODO:应该挂起,并在释放时唤醒*/
preemption();
goto _to_unlock;
}
sema_down(&to_task->notify_sema, 0);
mutex_lock(&to_task->nofity_lock, 0);
if (GET_LSB(*to_task->nofity_bitmap, to_task->nofity_bitmap_len) == GET_LSB((~0ULL), to_task->nofity_bitmap_len))
{
mutex_unlock(&to_task->nofity_lock);
thread_sched(TRUE); /*TODO:应该挂起,并在释放时唤醒*/
preemption();
goto _to_unlock;
}
umword_t cpu_status = cpulock_lock();
assert(cur_th->magic == THREAD_MAGIC);
to_task = thread_get_bind_task(to_th);
ref_counter_inc((&to_task->ref_cn));
//!< 执行目标线程时用的是当前线程的资源,这里还需要备份当前线程的上下文。
ret = thread_fast_ipc_save(cur_th, to_task, (void *)(to_task->nofity_stack - 4 * 8 /*FIXME:改成宏*/)); //!< 备份栈和usp
@@ -1252,6 +1243,7 @@ _to_unlock:
pf_t *cur_pf = ((pf_t *)((char *)cur_th + CONFIG_THREAD_BLOCK_SIZE + 8)) - 1;
// 重新设置r9寄存器
cur_pf->regs[5] = (umword_t)(to_task->mm_space.mm_block);
cur_th->sp.user_sp = cur_pf;
//! 寄存器传参数
f->regs[0] = in_tag.raw;
@@ -1274,12 +1266,14 @@ _to_unlock:
{
ref_counter_dec_and_release(&to_task->ref_cn, &to_task->kobj);
mutex_unlock(&to_task->nofity_lock);
sema_up(&to_task->notify_sema);
}
}
else
{
ref_counter_dec_and_release(&to_task->ref_cn, &to_task->kobj);
mutex_unlock(&to_task->nofity_lock);
sema_up(&to_task->notify_sema);
}
cpulock_set(cpu_status);
@@ -1293,6 +1287,7 @@ msg_tag_t thread_fast_ipc_replay(entry_frame_t *f)
task_t *old_task = thread_get_bind_task(cur_th);
int ret;
assert(cur_th->magic == THREAD_MAGIC);
*(cur_task->nofity_bitmap) &= ~(1 << MIN(f->regs[2], cur_task->nofity_bitmap_len)); //!< 解锁bitmap
slist_del(&cur_th->com->fast_ipc_node); // 从链表中删除
@@ -1347,6 +1342,7 @@ msg_tag_t thread_fast_ipc_replay(entry_frame_t *f)
in_tag = msg_tag_init4(0, 0, 0, ret);
}
cpulock_set(cpu_status);
sema_up(&old_task->notify_sema);
if (thread_is_knl(cur_th))
{
arch_to_sche();

View File

@@ -272,7 +272,7 @@ bool_t task_knl_kill(thread_t *kill_thread, bool_t is_knl)
task_t *task = container_of(kill_thread->task, task_t, kobj);
if (!is_knl)
{
printk("kill %s task:0x%x, pid:%d\n", kobject_get_name(&task->kobj), task, task->pid);
printk("kill %s th:0x%x task:0x%x, pid:%d\n", kobject_get_name(&task->kobj), kill_thread, task, task->pid);
umword_t status2;
status2 = spinlock_lock(&del_lock);

View File

@@ -1,10 +1,10 @@
#!/bin/bash
export TOOLCHAIN=/home/zhangzheng/toolchain/gcc-arm-10.3-2021.07-x86_64-arm-none-eabi/bin/
export TOOLCHAIN_LIB=/home/zhangzheng/toolchain/gcc-arm-10.3-2021.07-x86_64-arm-none-eabi/lib/gcc/arm-none-eabi/10.3.1/thumb/v7-m/nofp
# export TOOLCHAIN=/home/zhangzheng/toolchain/gcc-arm-10.3-2021.07-x86_64-arm-none-eabi/bin/
# export TOOLCHAIN_LIB=/home/zhangzheng/toolchain/gcc-arm-10.3-2021.07-x86_64-arm-none-eabi/lib/gcc/arm-none-eabi/10.3.1/thumb/v7-m/nofp
# export TOOLCHAIN=/Users/zhangzheng/gcc-arm-none-eabi-10.3-2021.10/bin/
# export TOOLCHAIN_LIB=/Users/zhangzheng/gcc-arm-none-eabi-10.3-2021.10/lib/gcc/arm-none-eabi/10.3.1/thumb/v7-m/nofp
export TOOLCHAIN=/Users/zhangzheng/gcc-arm-none-eabi-10.3-2021.10/bin/
export TOOLCHAIN_LIB=/Users/zhangzheng/gcc-arm-none-eabi-10.3-2021.10/lib/gcc/arm-none-eabi/10.3.1/thumb/v7-m/nofp
# export TOOLCHAIN=/d/GNUArmEmbeddedToolchain/102021.10/bin/
# export TOOLCHAIN_LIB=/d/GNUArmEmbeddedToolchain/102021.10/lib/gcc/arm-none-eabi/10.3.1/thumb/v7-m/nofp

View File

@@ -43,6 +43,9 @@
#define MEMP_NUM_TCP_SEG 64 // MEMP_NUM_TCP_SEG:最多同时在队列中的TCP段数量
#define MEMP_NUM_SYS_TIMEOUT 4 // MEMP_NUM_SYS_TIMEOUT:能够同时激活的timeout个数
#define LWIP_NETCONN_SEM_PER_THREAD 0
#define LWIP_NETIF_LOOPBACK 1
#define LWIP_HAVE_LOOPIF 1
#define LWIP_LOOPBACK_MAX_PBUFS 10
// pbuf选项
#define PBUF_POOL_SIZE 32 // PBUF_POOL_SIZE:pbuf内存池个数
#define PBUF_POOL_BUFSIZE 1600 // PBUF_POOL_BUFSIZE:每个pbuf内存池大小

View File

@@ -119,7 +119,8 @@ static int lwprot_count = 0;
static struct sys_thread *threads = NULL;
static u_mutex_t threads_mutex;
struct sys_mbox_msg {
struct sys_mbox_msg
{
struct sys_mbox_msg *next;
void *msg;
};
@@ -129,16 +130,17 @@ static AUTO_CALL(101) void init_mutex(void)
obj_handler_t mtx;
#if SYS_LIGHTWEIGHT_PROT
mtx = handler_alloc();
assert(mtx == HANDLER_INVALID);
assert(mtx != HANDLER_INVALID);
u_mutex_init(&lwprot_mutex, mtx);
#endif
mtx = handler_alloc();
assert(mtx == HANDLER_INVALID);
assert(mtx != HANDLER_INVALID);
u_mutex_init(&threads_mutex, mtx);
}
#define SYS_MBOX_SIZE 128
struct sys_mbox {
struct sys_mbox
{
int first, last;
void *msgs[SYS_MBOX_SIZE];
struct sys_sem *not_empty;
@@ -147,16 +149,19 @@ struct sys_mbox {
int wait_send;
};
struct sys_sem {
struct sys_sem
{
obj_handler_t sem;
};
struct sys_mutex {
struct sys_mutex
{
// pthread_mutex_t mutex;
u_mutex_t mutex;
};
struct sys_thread {
struct sys_thread
{
struct sys_thread *next;
pthread_t pthread;
obj_handler_t obj_th;
@@ -195,7 +200,8 @@ introduce_thread(pthread_t id)
thread = (struct sys_thread *)malloc(sizeof(struct sys_thread));
if (thread != NULL) {
if (thread != NULL)
{
sys_thread_set_private_data(pthread_hd_get(id));
u_mutex_lock(&threads_mutex, 0, NULL);
thread->next = threads;
@@ -207,7 +213,8 @@ introduce_thread(pthread_t id)
return thread;
}
struct thread_wrapper_data {
struct thread_wrapper_data
{
lwip_thread_fn function;
void *arg;
};
@@ -247,11 +254,13 @@ sys_thread_new(const char *name, lwip_thread_fn function, void *arg, int stacksi
pthread_setname_np(tmp, name);
#endif
if (0 == code) {
if (0 == code)
{
st = introduce_thread(tmp);
}
if (NULL == st) {
if (NULL == st)
{
LWIP_DEBUGF(SYS_DEBUG, ("sys_thread_new: pthread_create %d, st = 0x%lx\n",
code, (unsigned long)st));
abort();
@@ -271,10 +280,10 @@ void sys_unlock_tcpip_core(void)
}
#endif /* LWIP_TCPIP_CORE_LOCKING */
static pthread_t lwip_tcpip_thread_id;
// static pthread_t lwip_tcpip_thread_id;
void sys_mark_tcpip_thread(void)
{
lwip_tcpip_thread_id = pthread_self();
// lwip_tcpip_thread_id = pthread_self();
}
void sys_check_core_locking(void)
@@ -301,7 +310,8 @@ err_t sys_mbox_new(struct sys_mbox **mb, int size)
LWIP_UNUSED_ARG(size);
mbox = (struct sys_mbox *)malloc(sizeof(struct sys_mbox));
if (mbox == NULL) {
if (mbox == NULL)
{
return ERR_MEM;
}
mbox->first = mbox->last = 0;
@@ -317,7 +327,8 @@ err_t sys_mbox_new(struct sys_mbox **mb, int size)
void sys_mbox_free(struct sys_mbox **mb)
{
if ((mb != NULL) && (*mb != SYS_MBOX_NULL)) {
if ((mb != NULL) && (*mb != SYS_MBOX_NULL))
{
struct sys_mbox *mbox = *mb;
SYS_STATS_DEC(mbox.used);
sys_arch_sem_wait(&mbox->mutex, 0);
@@ -343,22 +354,27 @@ err_t sys_mbox_trypost(struct sys_mbox **mb, void *msg)
LWIP_DEBUGF(SYS_DEBUG, ("sys_mbox_trypost: mbox %p msg %p\n",
(void *)mbox, (void *)msg));
if ((mbox->last + 1) >= (mbox->first + SYS_MBOX_SIZE)) {
if ((mbox->last + 1) >= (mbox->first + SYS_MBOX_SIZE))
{
sys_sem_signal(&mbox->mutex);
return ERR_MEM;
}
mbox->msgs[mbox->last % SYS_MBOX_SIZE] = msg;
if (mbox->last == mbox->first) {
if (mbox->last == mbox->first)
{
first = 1;
} else {
}
else
{
first = 0;
}
mbox->last++;
if (first) {
if (first)
{
sys_sem_signal(&mbox->not_empty);
}
@@ -383,7 +399,8 @@ void sys_mbox_post(struct sys_mbox **mb, void *msg)
LWIP_DEBUGF(SYS_DEBUG, ("sys_mbox_post: mbox %p msg %p\n", (void *)mbox, (void *)msg));
while ((mbox->last + 1) >= (mbox->first + SYS_MBOX_SIZE)) {
while ((mbox->last + 1) >= (mbox->first + SYS_MBOX_SIZE))
{
mbox->wait_send++;
sys_sem_signal(&mbox->mutex);
sys_arch_sem_wait(&mbox->not_full, 0);
@@ -393,15 +410,19 @@ void sys_mbox_post(struct sys_mbox **mb, void *msg)
mbox->msgs[mbox->last % SYS_MBOX_SIZE] = msg;
if (mbox->last == mbox->first) {
if (mbox->last == mbox->first)
{
first = 1;
} else {
}
else
{
first = 0;
}
mbox->last++;
if (first) {
if (first)
{
sys_sem_signal(&mbox->not_empty);
}
@@ -416,21 +437,26 @@ u32_t sys_arch_mbox_tryfetch(struct sys_mbox **mb, void **msg)
sys_arch_sem_wait(&mbox->mutex, 0);
if (mbox->first == mbox->last) {
if (mbox->first == mbox->last)
{
sys_sem_signal(&mbox->mutex);
return SYS_MBOX_EMPTY;
}
if (msg != NULL) {
if (msg != NULL)
{
LWIP_DEBUGF(SYS_DEBUG, ("sys_mbox_tryfetch: mbox %p msg %p\n", (void *)mbox, *msg));
*msg = mbox->msgs[mbox->first % SYS_MBOX_SIZE];
} else {
}
else
{
LWIP_DEBUGF(SYS_DEBUG, ("sys_mbox_tryfetch: mbox %p, null msg\n", (void *)mbox));
}
mbox->first++;
if (mbox->wait_send) {
if (mbox->wait_send)
{
sys_sem_signal(&mbox->not_full);
}
@@ -450,34 +476,43 @@ u32_t sys_arch_mbox_fetch(struct sys_mbox **mb, void **msg, u32_t timeout)
stuff here. */
sys_arch_sem_wait(&mbox->mutex, 0);
while (mbox->first == mbox->last) {
while (mbox->first == mbox->last)
{
sys_sem_signal(&mbox->mutex);
/* We block while waiting for a mail to arrive in the mailbox. We
must be prepared to timeout. */
if (timeout != 0) {
if (timeout != 0)
{
time_needed = sys_arch_sem_wait(&mbox->not_empty, timeout);
if (time_needed == SYS_ARCH_TIMEOUT) {
if (time_needed == SYS_ARCH_TIMEOUT)
{
return SYS_ARCH_TIMEOUT;
}
} else {
}
else
{
sys_arch_sem_wait(&mbox->not_empty, 0);
}
sys_arch_sem_wait(&mbox->mutex, 0);
}
if (msg != NULL) {
if (msg != NULL)
{
LWIP_DEBUGF(SYS_DEBUG, ("sys_mbox_fetch: mbox %p msg %p\n", (void *)mbox, *msg));
*msg = mbox->msgs[mbox->first % SYS_MBOX_SIZE];
} else {
}
else
{
LWIP_DEBUGF(SYS_DEBUG, ("sys_mbox_fetch: mbox %p, null msg\n", (void *)mbox));
}
mbox->first++;
if (mbox->wait_send) {
if (mbox->wait_send)
{
sys_sem_signal(&mbox->not_full);
}
@@ -494,16 +529,19 @@ sys_sem_new_internal(u8_t count)
struct sys_sem *sem;
sem = (struct sys_sem *)malloc(sizeof(struct sys_sem));
if (sem != NULL) {
if (sem != NULL)
{
sem->sem = handler_alloc();
if (sem->sem == HANDLER_INVALID) {
if (sem->sem == HANDLER_INVALID)
{
free(sem);
return NULL;
}
msg_tag_t tag;
tag = facotry_create_sema(FACTORY_PROT, vpage_create_raw3(KOBJ_ALL_RIGHTS, 0, sem->sem), 0, count);
if (msg_tag_get_val(tag) < 0) {
tag = facotry_create_sema(FACTORY_PROT, vpage_create_raw3(KOBJ_ALL_RIGHTS, 0, sem->sem), count, INT_MAX);
if (msg_tag_get_val(tag) < 0)
{
handler_free(sem->sem);
free(sem);
return NULL;
@@ -516,7 +554,8 @@ err_t sys_sem_new(struct sys_sem **sem, u8_t count)
{
SYS_STATS_INC_USED(sem);
*sem = sys_sem_new_internal(count);
if (*sem == NULL) {
if (*sem == NULL)
{
return ERR_MEM;
}
return ERR_OK;
@@ -526,21 +565,31 @@ u32_t sys_arch_sem_wait(struct sys_sem **s, u32_t timeout)
{
umword_t time_needed = 0;
struct sys_sem *sem;
msg_tag_t tag;
LWIP_ASSERT("invalid sem", (s != NULL) && (*s != NULL));
sem = *s;
u_sema_down(sem->sem, timeout, &time_needed);
tag = u_sema_down(sem->sem, timeout, &time_needed);
assert(msg_tag_get_val(tag) >= 0);
if (time_needed == 0 && timeout != 0)
{
return SYS_ARCH_TIMEOUT;
}
return (u32_t)(timeout - time_needed);
}
void sys_sem_signal(struct sys_sem **s)
{
struct sys_sem *sem;
msg_tag_t tag;
LWIP_ASSERT("invalid sem", (s != NULL) && (*s != NULL));
sem = *s;
u_sema_up(sem->sem);
tag = u_sema_up(sem->sem);
assert(msg_tag_get_val(tag) >= 0);
}
static void
@@ -552,7 +601,8 @@ sys_sem_free_internal(struct sys_sem *sem)
void sys_sem_free(struct sys_sem **sem)
{
if ((sem != NULL) && (*sem != SYS_SEM_NULL)) {
if ((sem != NULL) && (*sem != SYS_SEM_NULL))
{
SYS_STATS_DEC(sem.used);
sys_sem_free_internal(*sem);
}
@@ -568,22 +618,27 @@ err_t sys_mutex_new(struct sys_mutex **mutex)
struct sys_mutex *mtx;
mtx = (struct sys_mutex *)malloc(sizeof(struct sys_mutex));
if (mtx != NULL) {
if (mtx != NULL)
{
obj_handler_t mutex_obj;
mutex_obj = handler_alloc();
if (mutex_obj == HANDLER_INVALID) {
if (mutex_obj == HANDLER_INVALID)
{
free(mtx);
return ERR_MEM;
}
if (u_mutex_init(&mtx->mutex, mutex_obj) < 0) {
if (u_mutex_init(&mtx->mutex, mutex_obj) < 0)
{
handler_free(mutex_obj);
free(mtx);
return ERR_MEM;
}
*mutex = mtx;
return ERR_OK;
} else {
}
else
{
return ERR_MEM;
}
}
@@ -592,7 +647,6 @@ err_t sys_mutex_new(struct sys_mutex **mutex)
* @param mutex the mutex to lock */
void sys_mutex_lock(struct sys_mutex **mutex)
{
// pthread_mutex_lock(&((*mutex)->mutex));
u_mutex_lock(&((*mutex)->mutex), 0, NULL);
}
@@ -600,7 +654,6 @@ void sys_mutex_lock(struct sys_mutex **mutex)
* @param mutex the mutex to unlock */
void sys_mutex_unlock(struct sys_mutex **mutex)
{
// pthread_mutex_unlock(&((*mutex)->mutex));
u_mutex_unlock(&((*mutex)->mutex));
}
@@ -623,7 +676,8 @@ sys_thread_sem_free(void *data)
{
sys_sem_t *sem = (sys_sem_t *)(data);
if (sem) {
if (sem)
{
sys_sem_free(sem);
free(sem);
}
@@ -649,7 +703,8 @@ sys_sem_t *
sys_arch_netconn_sem_get(void)
{
sys_sem_t *sem = (sys_sem_t *)pthread_getspecific(sys_thread_sem_key);
if (!sem) {
if (!sem)
{
sem = sys_thread_sem_alloc();
}
LWIP_DEBUGF(SYS_DEBUG, ("sys_thread_sem_get s=%p\n", (void *)sem));
@@ -729,14 +784,17 @@ sys_arch_protect(void)
/* Note that for the UNIX port, we are using a lightweight mutex, and our
* own counter (which is locked by the mutex). The return code is not actually
* used. */
if (lwprot_thread != sys_thread_get_private_data_self()) {
// printf("thread:%d\n", lwprot_thread);
if (lwprot_thread != sys_thread_get_private_data_self())
{
/* We are locking the mutex where it has not been locked before *
* or is being locked by another thread */
// pthread_mutex_lock(&lwprot_mutex);
u_mutex_lock(&lwprot_mutex, 0, NULL);
lwprot_thread = sys_thread_get_private_data_self();
lwprot_count = 1;
} else
}
else
/* It is already locked by THIS thread */
lwprot_count++;
return 0;
@@ -752,9 +810,11 @@ an operating system.
void sys_arch_unprotect(sys_prot_t pval)
{
LWIP_UNUSED_ARG(pval);
if (lwprot_thread == sys_thread_get_private_data_self()) {
if (lwprot_thread == sys_thread_get_private_data_self())
{
lwprot_count--;
if (lwprot_count == 0) {
if (lwprot_count == 0)
{
lwprot_thread = (umword_t)0xDEAD;
// pthread_mutex_unlock(&lwprot_mutex);
u_mutex_unlock(&lwprot_mutex);

View File

@@ -102,6 +102,9 @@ struct sockaddr_in6 {
struct sockaddr {
u8_t sa_len;
#ifdef MKRTOS
unsigned char is_null;
#endif
sa_family_t sa_family;
char sa_data[14];
};

View File

@@ -369,6 +369,7 @@ struct linger {
struct sockaddr {
#ifdef MKRTOS
unsigned char sa_len;
unsigned char is_null;
#endif
sa_family_t sa_family;
char sa_data[14];

View File

@@ -18,4 +18,4 @@ msg_tag_t factory_create_thread(obj_handler_t obj, vpage_t vpage);
msg_tag_t factory_create_thread_vcpu(obj_handler_t obj, vpage_t vpage);
msg_tag_t factory_create_task(obj_handler_t obj, vpage_t vpage);
msg_tag_t facotry_create_share_mem(obj_handler_t obj, vpage_t vpage, share_mem_type_t mem_type, umword_t size);
msg_tag_t facotry_create_sema(obj_handler_t obj, vpage_t vpage, int cnt, uint32_t max);
msg_tag_t facotry_create_sema(obj_handler_t obj, vpage_t vpage, int cnt, int max);

View File

@@ -105,7 +105,7 @@ msg_tag_t facotry_create_share_mem(obj_handler_t obj, vpage_t vpage, share_mem_t
return tag;
}
msg_tag_t facotry_create_sema(obj_handler_t obj, vpage_t vpage, int cnt, uint32_t max)
msg_tag_t facotry_create_sema(obj_handler_t obj, vpage_t vpage, int cnt, int max)
{
register volatile umword_t r0 asm(ARCH_REG_0);

View File

@@ -24,6 +24,7 @@ int u_mutex_init(u_mutex_t *lock, obj_handler_t sema_hd)
{
return msg_tag_get_val(tag);
}
lock->obj = sema_hd;
return 0;
}
void u_mutex_lock(u_mutex_t *lock, umword_t timeout, umword_t *remain_times)

View File

@@ -64,7 +64,7 @@ int net_accept(sd_t s, struct sockaddr *addr, socklen_t *addrlen)
}
*addr = rpc_addr.data;
*addrlen = rpc_addrlen.data;
return msg_tag_get_val(tag);
return mk_sd_init2(hd, msg_tag_get_val(tag)).raw;
}
RPC_GENERATION_CALL3(net_t, NET_PROT, NET_BIND, bind,
rpc_int_t, rpc_int_t, RPC_DIR_IN, RPC_TYPE_DATA, s,
@@ -345,7 +345,7 @@ int net_recv(int s, void *mem, size_t len, int flags)
.data = mem + rlen,
// .len = MIN(len, sizeof(rpc_mem.data)),
};
r_once_len = MIN(sizeof(rpc_mem.data), len - rlen);
r_once_len = MIN(128, len - rlen);
rpc_mem.len = r_once_len;
rpc_size_t_t rpc_len = {
.data = r_once_len,
@@ -363,7 +363,7 @@ int net_recv(int s, void *mem, size_t len, int flags)
}
}
return msg_tag_get_val(tag);
return rlen;
}
RPC_GENERATION_CALL6(net_t, NET_PROT, NET_RECVFROM, recvfrom,
rpc_int_t, rpc_int_t, RPC_DIR_IN, RPC_TYPE_DATA, s,
@@ -375,7 +375,7 @@ RPC_GENERATION_CALL6(net_t, NET_PROT, NET_RECVFROM, recvfrom,
int net_recvfrom(int s, void *mem, size_t len, int flags,
struct sockaddr *from, socklen_t *fromlen)
{
if (mem == NULL || from == NULL || fromlen == NULL)
if (mem == NULL)
{
return -EINVAL;
}
@@ -390,12 +390,20 @@ int net_recvfrom(int s, void *mem, size_t len, int flags,
.data = flags,
};
rpc_socketaddr_t_t rpc_form = {
.data = *from,
};
rpc_socklen_t_t rpc_fromlen = {
.data = *fromlen,
};
rpc_socketaddr_t_t rpc_form = {};
if (from)
{
rpc_form.data = *from;
}
else
{
rpc_form.data.is_null = 1;
}
rpc_socklen_t_t rpc_fromlen = {};
if (fromlen)
{
rpc_fromlen.data = *fromlen;
}
msg_tag_t tag;
int rlen = 0;
@@ -407,7 +415,7 @@ int net_recvfrom(int s, void *mem, size_t len, int flags,
.data = mem + rlen,
// .len = MIN(len, sizeof(rpc_mem.data)),
};
r_once_len = MIN(sizeof(rpc_mem.data), len - rlen);
r_once_len = MIN(128, len - rlen);
rpc_mem.len = r_once_len;
rpc_size_t_t rpc_len = {
.data = r_once_len,
@@ -425,7 +433,7 @@ int net_recvfrom(int s, void *mem, size_t len, int flags,
}
}
return msg_tag_get_val(tag);
return rlen;
}
RPC_GENERATION_CALL4(net_t, NET_PROT, NET_SEND, send,
rpc_int_t, rpc_int_t, RPC_DIR_IN, RPC_TYPE_DATA, s,
@@ -460,7 +468,7 @@ int net_send(int s, const void *dataptr, size_t size, int flags)
.data = (void *)dataptr + rlen,
// .len = MIN(len, sizeof(rpc_mem.data)),
};
w_once_len = MIN(sizeof(rpc_mem.data), size - rlen);
w_once_len = MIN(128, size - rlen);
rpc_mem.len = w_once_len;
rpc_size_t_t rpc_len = {
.data = w_once_len,
@@ -478,7 +486,7 @@ int net_send(int s, const void *dataptr, size_t size, int flags)
}
}
return msg_tag_get_val(tag);
return rlen;
}
RPC_GENERATION_CALL6(net_t, NET_PROT, NET_SENDTO, sendto,
rpc_int_t, rpc_int_t, RPC_DIR_IN, RPC_TYPE_DATA, s,
@@ -490,7 +498,7 @@ RPC_GENERATION_CALL6(net_t, NET_PROT, NET_SENDTO, sendto,
int net_sendto(int s, const void *dataptr, size_t size, int flags,
const struct sockaddr *to, socklen_t tolen)
{
if (dataptr == NULL || to == NULL)
if (dataptr == NULL)
{
return -EINVAL;
}
@@ -504,9 +512,15 @@ int net_sendto(int s, const void *dataptr, size_t size, int flags,
rpc_int_t rpc_flags = {
.data = flags,
};
rpc_socketaddr_t_t rpc_to = {
.data = *to,
};
rpc_socketaddr_t_t rpc_to = {};
if (to)
{
rpc_to.data = *to;
}
else
{
rpc_to.data.is_null = 1;
}
rpc_socklen_t_t rpc_tolen = {
.data = tolen,
};
@@ -520,7 +534,7 @@ int net_sendto(int s, const void *dataptr, size_t size, int flags,
.data = (void *)(dataptr + rlen),
// .len = MIN(len, sizeof(rpc_mem.data)),
};
w_once_len = MIN(sizeof(rpc_mem.data), size - rlen);
w_once_len = MIN(128, size - rlen);
rpc_mem.len = w_once_len;
rpc_size_t_t rpc_len = {
.data = w_once_len,
@@ -538,7 +552,7 @@ int net_sendto(int s, const void *dataptr, size_t size, int flags,
}
}
return msg_tag_get_val(tag);
return rlen;
}
RPC_GENERATION_CALL3(net_t, NET_PROT, NET_SOCKET, socket,
rpc_int_t, rpc_int_t, RPC_DIR_IN, RPC_TYPE_DATA, s,

View File

@@ -192,7 +192,26 @@ RPC_GENERATION_OP_DISPATCH6(net_t, NET_PROT, NET_RECVFROM, recvfrom,
{
return -ENOSYS;
}
ret = obj->op->recvfrom(s->data, mem->data, len->data, flags->data, &from->data, &fromlen->data);
socketaddr_t *p_from;
socklen_t *p_fromlen;
if (from->data.is_null == 1)
{
p_from = NULL;
}
else
{
p_from = &from->data;
}
if (!p_from)
{
p_fromlen = NULL;
}
else
{
p_fromlen = &fromlen->data;
}
ret = obj->op->recvfrom(s->data, mem->data, len->data, flags->data, p_from, p_fromlen);
return ret;
}
// ssize_t (*recvmsg)(int s, struct msghdr *message, int flags);
@@ -257,7 +276,17 @@ RPC_GENERATION_OP_DISPATCH6(net_t, NET_PROT, NET_SENDTO, sendto,
{
return -ENOSYS;
}
ret = obj->op->sendto(s->data, mem->data, size->data, flags->data, &to->data, tolen->data);
socketaddr_t *p_to;
if (to->data.is_null == 1)
{
p_to = NULL;
}
else
{
p_to = &to->data;
}
ret = obj->op->sendto(s->data, mem->data, size->data, flags->data, p_to, tolen->data);
return ret;
}
// int (*socket)(int domain, int type, int protocol);

View File

@@ -9,7 +9,7 @@
#include <u_task.h>
#include <u_thread.h>
#include <u_types.h>
#include <stdio.h>
#define MAGIC_NS_USERPID 0xbabababa
int fast_ipc_setsp(int i, void *stack);
@@ -43,8 +43,10 @@ static int fast_ipc_dat_copy(ipc_msg_t *dst_ipc, ipc_msg_t *src_ipc, msg_tag_t t
}
static void update_map_buf(void)
{
for (int i = 0; i < CONFIG_THREAD_MAP_BUF_LEN; i++) {
if (cons_map_buf[i] == 0) {
for (int i = 0; i < CONFIG_THREAD_MAP_BUF_LEN; i++)
{
if (cons_map_buf[i] == 0)
{
cons_map_buf[i] = vpage_create_raw3(0, 0, handler_alloc()).raw; /*TODO:申请失败检查*/
}
}
@@ -59,19 +61,23 @@ static msg_tag_t process_ipc(int j, umword_t obj, long tag)
msg = (ipc_msg_t *)(&cons_msg_buf[j * MSG_BUG_LEN]);
ret_tag = msg_tag_init4(0, 0, 0, -EIO);
svr_obj = (rpc_svr_obj_t *)obj;
if (svr_obj == NULL) {
if (svr_obj == NULL)
{
ret_tag = msg_tag_init4(0, 0, 0, -EACCES);
goto end;
}
if (svr_obj == (void *)MAGIC_NS_USERPID) {
if (svr_obj == (void *)MAGIC_NS_USERPID)
{
/*获取ns的user id*/
svr_obj = meta_find_svr_obj(NS_PROT);
}
if (svr_obj == NULL) {
if (svr_obj == NULL)
{
ret_tag = msg_tag_init4(0, 0, 0, -EACCES);
goto end;
}
if (svr_obj->dispatch) {
if (svr_obj->dispatch)
{
ret_tag = svr_obj->dispatch(svr_obj, msg_tag_init(tag), msg);
}
end:
@@ -79,10 +85,13 @@ end:
}
static void update_map_buf_last(void)
{
for (int i = 0; i < CONFIG_THREAD_MAP_BUF_LEN; i++) {
for (int i = 0; i < CONFIG_THREAD_MAP_BUF_LEN; i++)
{
vpage_t vpage = vpage_create_raw(cons_map_buf[i]);
if (handler_is_used(vpage.addr)) {
if (task_obj_valid(TASK_THIS, vpage.addr, 0).prot == 1) {
if (handler_is_used(vpage.addr))
{
if (task_obj_valid(TASK_THIS, vpage.addr, 0).prot == 1)
{
cons_map_buf[i] = vpage_create_raw3(0, 0, handler_alloc()).raw;
}
}
@@ -96,7 +105,7 @@ static void fast_ipc_goto_process(int j, long tag, umword_t obj, umword_t arg1,
msg = (void *)(&cons_msg_buf[j * MSG_BUG_LEN]);
thread_msg_buf_set(-1, msg);
update_map_buf();
msg->user[3] = cons_thread_th[j];//设置私有变量FIXME: 一种更加通用的方法
msg->user[3] = cons_thread_th[j]; // 设置私有变量FIXME: 一种更加通用的方法
task_com_unlock(TASK_THIS);
ret_tag = process_ipc(j, obj, tag);
task_com_lock(TASK_THIS);
@@ -107,8 +116,10 @@ static void fast_ipc_goto_process(int j, long tag, umword_t obj, umword_t arg1,
static __attribute__((optimize(0))) void fast_ipc_com_point(msg_tag_t tag, umword_t arg0, umword_t arg1, umword_t arg2)
{
int i;
for (i = 0; i < stack_array_nr; i++) {
if ((cons_stack_bitmap & (1 << i)) == 0) {
for (i = 0; i < stack_array_nr; i++)
{
if ((cons_stack_bitmap & (1 << i)) == 0)
{
cons_stack_bitmap |= (1 << i);
break;
}
@@ -126,16 +137,24 @@ int u_fast_ipc_init(uint8_t *stack_array, uint8_t *msg_buf_array, int stack_msgb
cons_msg_buf = msg_buf_array;
cons_thread_th = threads_obj;
ipc_msg_t *msg = (void *)cons_msg_buf_main;
for (int i = 0; i < CONFIG_THREAD_MAP_BUF_LEN; i++) {
for (int i = 0; i < CONFIG_THREAD_MAP_BUF_LEN; i++)
{
cons_map_buf[i] = vpage_create_raw3(0, 0, handler_alloc()).raw;
msg->map_buf[i] = cons_map_buf[i];
}
#if 1
for (int i = 0; i < stack_msgbuf_array_num; i++)
{
printf("stack 0x%x %x\n", stack_array + stack_size * i, stack_size);
}
#endif
msg->user[0] = (umword_t)((char *)fake_pthread + sizeof(fake_pthread));
tag = task_set_com_point(TASK_THIS, &fast_ipc_com_point, (addr_t)com_stack,
sizeof(com_stack), (void *)(&cons_stack_bitmap),
stack_msgbuf_array_num, cons_msg_buf_main);
if (msg_tag_get_val(tag) < 0) {
if (msg_tag_get_val(tag) < 0)
{
return msg_tag_get_val(tag);
}
return msg_tag_get_val(tag);

View File

@@ -1,7 +1,6 @@
#一次读取一行,每行代表启动的应用程序
block /block
appfs -m /bin -d /block
# cpiofs -m /bin
# fatfs
# pin
# i2c

View File

@@ -0,0 +1,28 @@
#include "ns.h"
#include "u_types.h"
// #include "ns_types.h"
// 其他进程可以注册节点进来,并且可以注册子节点进来,子节点可以注册更多的子节点进来。
// 可以注册两种节点DUMMY和SVR节点
// 只有DUMMY节点里面可以注册SVR节点
// 查找节点时采取最大服务匹配的原则即匹配到最后一个SVR节点时返回最后一个SVR节点的HD以及截断的位置。
#define NS_NODE_NAME_LEN 32
typedef enum node_type
{
NODE_TYPE_DUMMY,
NODE_TYPE_SVR,
} node_type_t;
typedef struct ns_node
{
char name[NS_NODE_NAME_LEN];
node_type_t type;
struct ns_node *next;
struct ns_node *sub;
int ref;
} ns_node_t;

View File

@@ -0,0 +1,3 @@
#pragma once

View File

@@ -30,16 +30,18 @@ static pthread_t pth1;
static pthread_t pth2;
static pthread_t pth3;
static obj_handler_t sema_hd2;
#define TEST_CN 10
#define TEST_CN 100
static void *thread_th1(void *arg)
{
int j = 0;
while (1) {
while (1)
{
printf("sema_up start\n");
u_sema_up(sema_hd2);
u_sleep_ms(100);
printf("sema_up end\n");
if (j == TEST_CN * 2 + 2) {
if (j == TEST_CN * 2 + 2)
{
break;
}
j++;
@@ -50,12 +52,14 @@ static void *thread_th1(void *arg)
static void *thread_th2(void *arg)
{
int j = 0;
while (1) {
while (1)
{
printf("sema_down start\n");
u_sema_down(sema_hd2, 0, NULL);
u_sleep_ms(50);
printf("sema_down end\n");
if (j == TEST_CN) {
if (j == TEST_CN)
{
break;
}
j++;
@@ -66,12 +70,14 @@ static void *thread_th2(void *arg)
static void *thread_th3(void *arg)
{
int j = 0;
while (1) {
while (1)
{
printf("sema_down2 start\n");
u_sema_down(sema_hd2, 0, NULL);
u_sleep_ms(50);
printf("sema_down2 end\n");
if (j == TEST_CN) {
if (j == TEST_CN)
{
break;
}
j++;
@@ -91,31 +97,42 @@ static void u_sema_test2(CuTest *tc)
CuAssert(tc, "pthread_create fail.\n", pthread_create(&pth1, NULL, thread_th1, NULL) == 0);
CuAssert(tc, "pthread_create fail.\n", pthread_create(&pth2, NULL, thread_th2, NULL) == 0);
CuAssert(tc, "pthread_create fail.\n", pthread_create(&pth3, NULL, thread_th3, NULL) == 0);
if (pth1 != PTHREAD_NULL) {
if (pth1 != PTHREAD_NULL)
{
CuAssert(tc, "pthread_join fail.\n", pthread_join(pth1, NULL) == 0);
}
if (pth2 != PTHREAD_NULL) {
if (pth2 != PTHREAD_NULL)
{
CuAssert(tc, "pthread_join fail.\n", pthread_join(pth2, NULL) == 0);
}
if (pth3 != PTHREAD_NULL) {
if (pth3 != PTHREAD_NULL)
{
CuAssert(tc, "pthread_join fail.\n", pthread_join(pth3, NULL) == 0);
}
}
static void u_sema_test3(CuTest *tc)
{
msg_tag_t tag;
obj_handler_t sema_hd2;
umword_t reamin_times;
for (int j = 0; j < 100; j++)
{
obj_handler_t sema_hd2;
sema_hd2 = handler_alloc();
CuAssert(tc, "hd alloc fail.\n", sema_hd2 != HANDLER_INVALID);
tag = facotry_create_sema(FACTORY_PROT,
vpage_create_raw3(KOBJ_ALL_RIGHTS, 0, sema_hd2), 0, 1);
CuAssert(tc, "hd alloc fail.\n", msg_tag_get_val(tag) >= 0);
for (int i = 0; i < 5; i++) {
tag = u_sema_down(sema_hd2, 100, &reamin_times);
for (int i = 0; i < 5; i++)
{
tag = u_sema_down(sema_hd2, 10, &reamin_times);
CuAssert(tc, "sema down fail.\n", msg_tag_get_val(tag) >= 0);
CuAssert(tc, "sema down fail.\n", reamin_times == 0);
}
handler_free_umap(sema_hd2);
}
}
static CuSuite suite;
CuSuite *sema_test_suite(void)

View File

@@ -64,7 +64,9 @@ static err_t low_level_output(struct netif *netif, struct pbuf *p)
int ret;
memcpy((void *)send_shm_addr, p->payload, p->len);
// printf("start write.\n");
ret = blk_drv_cli_write(net_drv_hd, send_shm_hd, p->len, 0);
// printf("start end.\n");
return ret >= 0 ? ERR_OK : ERR_IF;
}

View File

@@ -1,6 +1,6 @@
#include <u_util.h>
#if !IS_ENABLED(CONFIG_MMU)
#define HEAP_SIZE (32 * 1024)
#define HEAP_SIZE (16 * 1024)
#define STACK_SIZE (2048)
#if defined(__CC_ARM)

View File

@@ -23,7 +23,7 @@ static umword_t addr;
static umword_t size;
obj_handler_t net_drv_hd;
#define STACK_COM_ITME_SIZE (2 * 1024 + 512 /*sizeof(struct pthread) + TP_OFFSET*/)
#define STACK_COM_ITME_SIZE (2 * 1024/*sizeof(struct pthread) + TP_OFFSET*/)
#define STACK_NUM 4
ATTR_ALIGN(8)
static uint8_t stack_coms[STACK_COM_ITME_SIZE * STACK_NUM];
@@ -43,7 +43,8 @@ int main(int args, char *argv[])
int ret;
msg_tag_t tag;
obj_handler_t hd;
task_set_obj_name(TASK_THIS, TASK_THIS, "tk_net");
task_set_obj_name(TASK_THIS, THREAD_MAIN, "th_net");
printf("net startup..\n");
fast_ipc_init();
ret = rpc_meta_init(THREAD_MAIN, &hd);
@@ -97,12 +98,14 @@ again:
return -1;
}
cons_write_str("net mount success\n");
net_test();
// net_test();
while (1) {
if (msg_tag_get_prot(u_sema_down(sem_hd, 0, NULL)) < 0) {
printf("error.\n");
}
// printf("start read.\n");
int ret = blk_drv_cli_read(net_drv_hd, shm_hd, 0, 0);
// printf("end read.\n");
if (ret > 0) {
lwip_pkt_handle_raw((uint8_t *)addr, ret);

View File

@@ -101,7 +101,7 @@ static int tcp_client(void)
// 设置服务器地址
memset(&server_addr, 0, sizeof(server_addr));
server_addr.sin_family = AF_INET;
server_addr.sin_addr.s_addr = inet_addr("192.168.3.10"); // 使用本机IP地址
server_addr.sin_addr.s_addr = inet_addr("127.0.0.1"); // 使用本机IP地址
server_addr.sin_port = htons(PORT);
// 连接到服务器

View File

@@ -10,8 +10,11 @@
#include "mm_test.h"
#include <u_sleep.h>
#include "net_test.h"
#include "u_task.h"
int main(int argc, char *argv[])
{
task_set_obj_name(TASK_THIS, TASK_THIS, "tk_tst");
task_set_obj_name(TASK_THIS, THREAD_MAIN, "th_tst");
for (int i = 0; i < argc; i++)
{
printf("argv[%d]: %s\n", i, argv[i]);

View File

@@ -58,12 +58,8 @@ static int tcp_server(void)
printf("Client connected!\n");
// 发送数据
const char *message = "Hello from server!";
send(client_socket, message, strlen(message), 0);
// 接收数据
memset(buffer, 0, BUFFER_SIZE);
while (1)
{
ssize_t bytes_received = recv(client_socket, buffer, BUFFER_SIZE - 1, 0);
if (bytes_received < 0)
{
@@ -71,8 +67,11 @@ static int tcp_server(void)
close(server_socket);
exit(EXIT_FAILURE);
}
printf("Received message from client: %s\n", buffer);
#if 0
printf("svr recv: %s, len:%d\n", buffer, bytes_received);
#endif
send(client_socket, buffer, bytes_received, 0);
}
// 关闭套接字
close(client_socket);
@@ -102,7 +101,7 @@ static int tcp_client(void)
// 设置服务器地址
memset(&server_addr, 0, sizeof(server_addr));
server_addr.sin_family = AF_INET;
server_addr.sin_addr.s_addr = inet_addr("192.168.3.10"); // 使用本机IP地址
server_addr.sin_addr.s_addr = inet_addr("127.0.0.1"); // 使用本机IP地址
server_addr.sin_port = htons(PORT);
// 连接到服务器
@@ -115,6 +114,8 @@ static int tcp_client(void)
printf("Connected to server!\n");
while (1)
{
// 发送数据
const char *message = "Hello from client!";
send(server_socket, message, strlen(message), 0);
@@ -128,8 +129,11 @@ static int tcp_client(void)
close(server_socket);
exit(EXIT_FAILURE);
}
printf("Received message from server: %s\n", buffer);
#if 0
printf("client recv: %s, len:%d\n", buffer, bytes_received);
#endif
usleep(1000);
}
// 关闭套接字
close(server_socket);

View File

@@ -1,6 +1,6 @@
#define HEAP_SIZE (128*1024)
#define STACK_SIZE (1024 * 4)
#define STACK_SIZE (1024 * 3)
#if defined(__CC_ARM)
#define HEAP_ATTR SECTION("HEAP") __attribute__((zero_init))

View File

@@ -127,6 +127,6 @@ int main(int argc, char *argv[])
while (1)
{
rpc_loop();
u_sleep_ms(0);
}
}

View File

@@ -333,10 +333,6 @@ int fs_svr_statfs(const char *path, struct statfs *buf)
return -ENOSYS;
}
void fs_svr_loop(void)
{
rpc_loop();
}
static const fs_operations_t ops =
{
.fs_svr_open = fs_svr_open,

View File

@@ -48,6 +48,6 @@ int main(int argc, char *argv[])
ns_register("/display", hd, FILE_NODE);
while (1)
{
fs_svr_loop();
u_sleep_ms(0);
}
}

View File

@@ -360,7 +360,9 @@ error_status emac_layer2_configuration(void)
emac_dma_software_reset_set();
while (emac_dma_software_reset_get() == SET)
;
{
u_sleep_ms(10);
}
emac_control_para_init(&mac_control_para);
@@ -481,7 +483,7 @@ error_status emac_speed_config(emac_auto_negotiation_type nego, emac_duplex_type
return ERROR;
}
u_sleep_ms(10);
} while (!(data & PHY_LINKED_STATUS_BIT) && (timeout < PHY_TIMEOUT));
} while (!(data & PHY_LINKED_STATUS_BIT) && (timeout < 100));
if (timeout == PHY_TIMEOUT)
{
@@ -503,7 +505,7 @@ error_status emac_speed_config(emac_auto_negotiation_type nego, emac_duplex_type
return ERROR;
}
u_sleep_ms(10);
} while (!(data & PHY_NEGO_COMPLETE_BIT) && (timeout < PHY_TIMEOUT));
} while (!(data & PHY_NEGO_COMPLETE_BIT) && (timeout < 100));
if (timeout == PHY_TIMEOUT)
{

View File

@@ -102,6 +102,6 @@ int main(int argc, char *argv[])
meta_reg_svr_obj(&net_drv.svr, BLK_DRV_PROT);
while (1)
{
rpc_loop();
u_sleep_ms(0);
}
}

View File

@@ -367,10 +367,6 @@ int fs_svr_statfs(const char *path, struct statfs *buf)
return -ENOSYS;
}
void fs_svr_loop(void)
{
rpc_loop();
}
static const fs_operations_t ops =
{
.fs_svr_open = fs_svr_open,

View File

@@ -49,6 +49,6 @@ int main(int argc, char *argv[])
ns_register("/i2c2", hd, FILE_NODE);
while (1)
{
fs_svr_loop();
u_sleep_ms(0);
}
}

View File

@@ -373,10 +373,6 @@ int fs_svr_statfs(const char *path, struct statfs *buf)
return -ENOSYS;
}
void fs_svr_loop(void)
{
rpc_loop();
}
static const fs_operations_t ops =
{
.fs_svr_open = fs_svr_open,

View File

@@ -49,6 +49,6 @@ int main(int argc, char *argv[])
ns_register("/pca9555", hd, FILE_NODE);
while (1)
{
fs_svr_loop();
u_sleep_ms(0);
}
}

View File

@@ -410,10 +410,6 @@ int fs_svr_statfs(const char *path, struct statfs *buf)
return -ENOSYS;
}
void fs_svr_loop(void)
{
rpc_loop();
}
static const fs_operations_t ops =
{
.fs_svr_open = fs_svr_open,

View File

@@ -50,6 +50,6 @@ int main(int argc, char *argv[])
ns_register("/pin", hd, FILE_NODE);
while (1)
{
fs_svr_loop();
u_sleep_ms(0);
}
}

View File

@@ -93,6 +93,6 @@ int main(int argc, char *argv[])
meta_reg_svr_obj(&snd_drv.svr, BLK_DRV_PROT);
while (1)
{
rpc_loop();
u_sleep_ms(0);
}
}