添加服务端并发支持,还未测试

This commit is contained in:
zhangzheng
2024-01-01 22:57:49 +08:00
parent 0bc4a32d6f
commit 1e8d407664
16 changed files with 280 additions and 146 deletions

View File

@@ -1,8 +1,10 @@
#pragma once
#include "thread.h"
#include "types.h"
#include "ref.h"
struct ipc;
typedef struct ipc ipc_t;
struct thread;
typedef struct thread thread_t;
/**
* @brief ipc 对象,用于夸进程消息发送
*
@@ -15,4 +17,7 @@ typedef struct ipc
slist_head_t wait_bind; //!<
ram_limit_t *lim; //!< 内存限额
umword_t user_id; //!< 服务端绑定的数据
ref_counter_t ref; //!< 引用计数
} ipc_t;
int ipc_bind(ipc_t *ipc, obj_handler_t th_hd, umword_t user_id, thread_t *th_kobj);

View File

@@ -131,6 +131,7 @@ typedef struct thread
msg_buf_t msg; //!< 每个线程独有的消息缓存区
slist_head_t wait_send_head; //!< 等待头,那些节点等待给当前线程发送数据
thread_t *last_send_th; //!< 当前线程上次接收到谁的数据
kobject_t *ipc_kobj; //!< 发送者放到一个ipc对象中
umword_t user_id; //!< 接收到的user_id
enum thread_state status; //!< 线程状态
@@ -187,4 +188,5 @@ void thread_ready(thread_t *th, bool_t is_sche);
void thread_timeout_check(ssize_t tick);
msg_tag_t thread_do_ipc(kobject_t *kobj, entry_frame_t *f, umword_t user_id);
int thread_ipc_call(thread_t *to_th, msg_tag_t in_tag, msg_tag_t *ret_tag, ipc_timeout_t timout, umword_t *ret_user_id);
int thread_ipc_call(thread_t *to_th, msg_tag_t in_tag, msg_tag_t *ret_tag,
ipc_timeout_t timout, umword_t *ret_user_id, bool_t is_call);

View File

@@ -33,6 +33,62 @@ typedef struct ipc_wait_bind_entry
slist_head_t node;
thread_t *th;
} ipc_wait_bind_entry_t;
int ipc_bind(ipc_t *ipc, obj_handler_t th_hd, umword_t user_id, thread_t *th_kobj)
{
int ret = -EINVAL;
task_t *cur_task = thread_get_current_task();
/*TODO:原子操作,绑定其他线程不一定是当前线程*/
if (ipc->svr_th == NULL)
{
mword_t status = spinlock_lock(&cur_task->kobj.lock); //!< 锁住当前的task
if (status < 0)
{
return -EACCES;
}
ref_counter_inc(&cur_task->ref_cn); //!< task引用计数+1
thread_t *recv_kobj;
if (!th_kobj)
{
recv_kobj = (thread_t *)obj_space_lookup_kobj_cmp_type(&cur_task->obj_space, th_hd, THREAD_TYPE);
}
else
{
recv_kobj = th_kobj;
}
if (!recv_kobj)
{
ret = -ENOENT;
goto end_bind;
}
ref_counter_inc(&recv_kobj->ref); //!< 绑定后线程的引用计数+1防止被删除
ipc->svr_th = recv_kobj;
ipc->user_id = user_id;
ipc_wait_bind_entry_t *pos;
slist_foreach_not_next(pos, &ipc->wait_bind, node) //!< 唤醒所有等待绑定的线程
{
ipc_wait_bind_entry_t *next = slist_next_entry(pos, &ipc->wait_bind, node);
assert(pos->th->status == THREAD_SUSPEND);
slist_del(&next->node);
thread_ready(pos->th, TRUE);
pos = next;
}
ret = 0;
end_bind:
//!< 先解锁然后在给task的引用计数-1
spinlock_set(&cur_task->kobj.lock, status);
ref_counter_dec_and_release(&cur_task->ref_cn, &cur_task->kobj);
}
else
{
ret = -ECANCELED;
}
return ret;
}
/**
* @brief ipc的系统调用
*
@@ -66,48 +122,8 @@ static void ipc_syscall(kobject_t *kobj, syscall_prot_t sys_p, msg_tag_t in_tag,
tag = msg_tag_init4(0, 0, 0, -EPROTO);
break;
}
/*TODO:原子操作,绑定其他线程不一定是当前线程*/
if (ipc->svr_th == NULL)
{
mword_t status = spinlock_lock(&cur_task->kobj.lock); //!< 锁住当前的task
if (status < 0)
{
tag = msg_tag_init4(0, 0, 0, -EACCES);
break;
}
ref_counter_inc(&cur_task->ref_cn); //!< task引用计数+1
thread_t *recv_kobj = (thread_t *)obj_space_lookup_kobj_cmp_type(&cur_task->obj_space, f->r[0], THREAD_TYPE);
if (!recv_kobj)
{
ret = -ENOENT;
goto end_bind;
}
ref_counter_inc(&recv_kobj->ref); //!< 绑定后线程的引用计数+1防止被删除
ipc->svr_th = recv_kobj;
ipc->user_id = f->r[1];
ipc_wait_bind_entry_t *pos;
slist_foreach_not_next(pos, &ipc->wait_bind, node) //!< 唤醒所有等待绑定的线程
{
ipc_wait_bind_entry_t *next = slist_next_entry(pos, &ipc->wait_bind, node);
assert(pos->th->status == THREAD_SUSPEND);
slist_del(&next->node);
thread_ready(pos->th, TRUE);
pos = next;
}
ret = 0;
end_bind:
//!< 先解锁然后在给task的引用计数-1
spinlock_set(&cur_task->kobj.lock, status);
ref_counter_dec_and_release(&cur_task->ref_cn, &cur_task->kobj);
tag = msg_tag_init4(0, 0, 0, ret);
}
else
{
tag = msg_tag_init4(0, 0, 0, -ECANCELED);
}
ret = ipc_bind(ipc, f->r[0], f->r[1], NULL);
tag = msg_tag_init4(0, 0, 0, ret);
}
break;
case IPC_DO:
@@ -177,12 +193,21 @@ static void ipc_release_stage2(kobject_t *kobj)
mm_limit_free(ipc->lim, kobj);
printk("ipc release 0x%x\n", kobj);
}
static bool_t ipc_put(kobject_t *kobj)
{
ipc_t *ipc = container_of(kobj, ipc_t, kobj);
return ref_counter_dec(&ipc->ref) == 1;
}
static void ipc_init(ipc_t *ipc, ram_limit_t *lim)
{
kobject_init(&ipc->kobj, IPC_TYPE);
slist_init(&ipc->wait_bind);
spinlock_init(&ipc->lock);
ref_counter_init(&ipc->ref);
ref_counter_inc(&ipc->ref);
ipc->lim = lim;
ipc->kobj.put_func = ipc_put;
ipc->kobj.invoke_func = ipc_syscall;
ipc->kobj.stage_1_func = ipc_release_stage1;
ipc->kobj.stage_2_func = ipc_release_stage2;

View File

@@ -25,7 +25,7 @@
#include "err.h"
#include "map.h"
#include "access.h"
#include "ipc.h"
enum thread_op
{
SET_EXEC_REGS,
@@ -423,13 +423,13 @@ static int ipc_data_copy(thread_t *dst_th, thread_t *src_th, msg_tag_t tag)
dst_th->msg.tag = tag;
return 0;
}
/**
* @brief 当前线程接收数据
*
* @return int
*/
static int thread_ipc_recv(msg_tag_t *ret_msg, ipc_timeout_t timeout, umword_t *ret_user_id)
static int thread_ipc_recv(msg_tag_t *ret_msg, ipc_timeout_t timeout,
umword_t *ret_user_id, ipc_t *ipc_kobj)
{
int ret = 0;
assert(ret_msg);
@@ -458,11 +458,22 @@ static int thread_ipc_recv(msg_tag_t *ret_msg, ipc_timeout_t timeout, umword_t *
//!< 加入等待队列
if (timeout.recv_timeout)
{
thread_wait_entry_init(&wait, cur_th, timeout.recv_timeout);
slist_add_append(&wait_recv_queue, &wait.node); //!< 放到等待队列中
}
}
if (ipc_kobj)
{
/*IPC对象的引用计数+1*/
ref_counter_inc(&ipc_kobj->ref);
cur_th->ipc_kobj = &ipc_kobj->kobj;
}
else
{
cur_th->ipc_kobj = NULL;
}
thread_suspend(cur_th); //!< 挂起
preemption(); //!< 进行调度
if (cur_th->ipc_status == THREAD_IPC_ABORT)
@@ -533,70 +544,11 @@ static int thread_ipc_reply(msg_tag_t in_tag)
cpulock_set(status);
return ret;
}
static int thread_ipc_send(thread_t *to_th, msg_tag_t in_tag, ipc_timeout_t timout)
int thread_ipc_call(thread_t *to_th, msg_tag_t in_tag, msg_tag_t *ret_tag,
ipc_timeout_t timout, umword_t *ret_user_id, bool_t is_call)
{
int ret = -EINVAL;
thread_t *cur_th = thread_get_current();
thread_t *recv_kobj = to_th;
mword_t lock_stats = spinlock_lock(&cur_th->kobj.lock);
if (lock_stats < 0)
{
//!< 锁已经无效了
return -EACCES;
}
again_check:
if (recv_kobj->status == THREAD_READY)
{
thread_wait_entry_t wait;
cur_th->ipc_status = THREAD_SEND; //!< 因为发送挂起
thread_wait_entry_init(&wait, cur_th, timout.send_timeout);
slist_add_append(&recv_kobj->wait_send_head, &wait.node); //!< 放到线程的等待队列中
slist_add_append(&wait_send_queue, &wait.node_timeout);
thread_suspend(cur_th); //!< 挂起
preemption(); //!< 进行调度
if (cur_th->ipc_status == THREAD_IPC_ABORT)
{
cur_th->ipc_status = THREAD_NONE;
ret = -ESHUTDOWN;
goto end;
}
else if (cur_th->ipc_status == THREAD_TIMEOUT)
{
ret = -EWTIMEDOUT;
goto end;
}
cur_th->ipc_status = THREAD_NONE;
goto again_check;
}
else if (recv_kobj->status == THREAD_SUSPEND && recv_kobj->ipc_status == THREAD_RECV)
{
// if (slist_in_list(&recv_kobj->wait_node))
// {
// //!< 如果已经在队列中,则删除
// slist_del(&recv_kobj->wait_node);
// }
//!< 开始发送数据
ret = ipc_data_copy(recv_kobj, cur_th, in_tag); //!< 拷贝数据
if (ret < 0)
{
//!< 拷贝失败
goto end;
}
recv_kobj->last_send_th = cur_th; //!< 设置接收者的上一次发送者是谁
ref_counter_inc(&cur_th->ref); //!< 作为发送者增加一次引用
thread_ready(recv_kobj, TRUE); //!< 直接唤醒接受者
preemption(); //!< 进行调度
}
ret = 0;
end:
spinlock_set(&cur_th->kobj.lock, lock_stats);
}
int thread_ipc_call(thread_t *to_th, msg_tag_t in_tag, msg_tag_t *ret_tag, ipc_timeout_t timout, umword_t *ret_user_id)
{
assert(ret_tag);
assert(is_call && ret_tag);
int ret = -EINVAL;
thread_t *cur_th = thread_get_current();
thread_t *recv_kobj = to_th;
@@ -635,11 +587,6 @@ again_check:
}
else if (recv_kobj->status == THREAD_SUSPEND && recv_kobj->ipc_status == THREAD_RECV)
{
// if (slist_in_list(&recv_kobj->wait_node))
// {
// //!< 如果已经在队列中,则删除
// slist_del(&recv_kobj->wait_node);
// }
//!< 开始发送数据
ret = ipc_data_copy(recv_kobj, cur_th, in_tag); //!< 拷贝数据
if (ret < 0)
@@ -647,14 +594,29 @@ again_check:
//!< 拷贝失败
goto end;
}
recv_kobj->last_send_th = cur_th; //!< 设置接收者的上一次发送者是谁
ref_counter_inc(&cur_th->ref); //!< 作为发送者增加一次引用
thread_ready(recv_kobj, TRUE); //!< 直接唤醒接受者
ret = thread_ipc_recv(ret_tag, timout, ret_user_id); //!< 当前线程进行接收
if (ret < 0)
if (recv_kobj->ipc_kobj)
{
//!< 接收超时
goto end;
// 绑定回复的ipc到当前的线程
assert(ipc_bind(((ipc_t *)(recv_kobj->ipc_kobj)), -1, 0, cur_th) >= 0);
ref_counter_dec_and_release(&((ipc_t *)(recv_kobj->ipc_kobj))->ref,
recv_kobj->ipc_kobj);
recv_kobj->ipc_kobj = NULL;
recv_kobj->last_send_th = NULL;
}
else
{
recv_kobj->last_send_th = cur_th; //!< 设置接收者的上一次发送者是谁
ref_counter_inc(&cur_th->ref); //!< 作为发送者增加一次引用
}
thread_ready(recv_kobj, TRUE); //!< 直接唤醒接受者
if (is_call)
{
ret = thread_ipc_recv(ret_tag, timout, ret_user_id, NULL); //!< 当前线程进行接收
if (ret < 0)
{
//!< 接收超时
goto end;
}
}
preemption(); //!< 进行调度
}
@@ -691,7 +653,7 @@ msg_tag_t thread_do_ipc(kobject_t *kobj, entry_frame_t *f, umword_t user_id)
ipc_timeout_t ipc_tm_out = ipc_timeout_create(f->r[3]);
to_th->user_id = user_id;
ret = thread_ipc_call(to_th, in_tag, &recv_tag, ipc_tm_out, &f->r[1]);
ret = thread_ipc_call(to_th, in_tag, &recv_tag, ipc_tm_out, &f->r[1], TRUE);
if (ret < 0)
{
return msg_tag_init4(0, 0, 0, ret);
@@ -710,8 +672,9 @@ msg_tag_t thread_do_ipc(kobject_t *kobj, entry_frame_t *f, umword_t user_id)
{
msg_tag_t ret_msg;
ipc_timeout_t ipc_tm_out = ipc_timeout_create(f->r[3]);
kobject_t *ipc_kobj = obj_space_lookup_kobj_cmp_type(&cur_task->obj_space, f->r[4], IPC_PROT);
int ret = thread_ipc_recv(&ret_msg, ipc_tm_out, &f->r[1]);
int ret = thread_ipc_recv(&ret_msg, ipc_tm_out, &f->r[1], (ipc_t *)ipc_kobj);
if (ret < 0)
{
return msg_tag_init4(0, 0, 0, ret);
@@ -726,7 +689,7 @@ msg_tag_t thread_do_ipc(kobject_t *kobj, entry_frame_t *f, umword_t user_id)
ipc_timeout_t ipc_tm_out = ipc_timeout_create(f->r[3]);
to_th->user_id = user_id;
ret = thread_ipc_send(to_th, in_tag, ipc_tm_out);
ret = thread_ipc_call(to_th, in_tag, NULL, ipc_tm_out, NULL, FALSE);
return msg_tag_init4(0, 0, 0, ret);
}
default:

View File

@@ -64,7 +64,7 @@ static void knl_main(void)
msg->msg_buf[1] = pos->pid;
msg->msg_buf[2] = 0;
int ret = thread_ipc_call(init_thread, msg_tag_init4(0, 3, 0, 0x0005 /*PM_PROT*/),
&tag, ipc_timeout_create2(3000, 3000), &user_id);
&tag, ipc_timeout_create2(3000, 3000), &user_id, TRUE);
if (ret < 0)
{

View File

@@ -19,7 +19,7 @@ typedef struct ipc_msg
{
umword_t msg_buf[IPC_MSG_SIZE / WORD_BYTES];
umword_t map_buf[MAP_BUF_SIZE / WORD_BYTES];
umword_t user[IPC_USER_SIZE / WORD_BYTES];
umword_t user[IPC_USER_SIZE / WORD_BYTES]; // 0 pthread使用 1驱动使用 2 ipc通信时存储目标的pid
};
uint8_t data[MSG_BUG_LEN];
};
@@ -58,18 +58,11 @@ msg_tag_t thread_exec_regs(obj_handler_t obj, umword_t pc, umword_t sp, umword_t
msg_tag_t thread_run(obj_handler_t obj, uint8_t prio);
msg_tag_t thread_bind_task(obj_handler_t obj, obj_handler_t tk_obj);
msg_tag_t thread_ipc_wait(ipc_timeout_t timeout, umword_t *ret_obj);
msg_tag_t thread_ipc_wait(ipc_timeout_t timeout, umword_t *obj, obj_handler_t ipc_obj);
msg_tag_t thread_ipc_reply(msg_tag_t in_tag, ipc_timeout_t timeout);
msg_tag_t thread_ipc_send(msg_tag_t in_tag, obj_handler_t target_th_obj, ipc_timeout_t timeout);
msg_tag_t thread_ipc_call(msg_tag_t in_tag, obj_handler_t target_th_obj, ipc_timeout_t timeout);
// #define thread_get_cur_ipc_msg() \
// ( \
// { \
// umword_t buf; \
// thread_msg_buf_get(-1, &buf, NULL); \
// ((ipc_msg_t *)buf); \
// })
static inline ipc_msg_t *thread_get_cur_ipc_msg(void)
{
umword_t buf;

View File

@@ -20,7 +20,7 @@ enum IPC_TYPE
IPC_RECV,
IPC_SEND,
};
msg_tag_t thread_ipc_wait(ipc_timeout_t timeout, umword_t *obj)
msg_tag_t thread_ipc_wait(ipc_timeout_t timeout, umword_t *obj, obj_handler_t ipc_obj)
{
register volatile umword_t r0 asm("r0");
register volatile umword_t r1 asm("r1");
@@ -29,7 +29,7 @@ msg_tag_t thread_ipc_wait(ipc_timeout_t timeout, umword_t *obj)
IPC_WAIT,
0,
timeout.raw,
0,
ipc_obj,
0);
asm __volatile__(""
:

View File

@@ -36,9 +36,11 @@ target_link_libraries(
sys
muslc
cpio
util
)
set_target_properties(sys_util PROPERTIES LINK_FLAGS "-pie ")
add_dependencies(sys_util sys)
add_dependencies(sys_util muslc)
add_dependencies(sys_util util)

View File

@@ -6,6 +6,8 @@
#include "u_thread.h"
#include "u_util.h"
#include <assert.h>
// FIXME: 每个线程应该有一个buf_hd
#define RPC_SVR_MAP_OBJ_NR (MAP_BUF_SIZE / sizeof(umword_t))
static obj_handler_t buf_hd[RPC_SVR_MAP_OBJ_NR];
@@ -46,6 +48,15 @@ int rpc_hd_alloc(void)
{
return -1;
}
// 如果新申请的和后面的一样,则后面的需要清空
for (int j = i + 1; j < RPC_SVR_MAP_OBJ_NR; j++)
{
if (hd == buf_hd[j])
{
buf_hd[j] = 0;
break;
}
}
}
else
{

View File

@@ -8,9 +8,16 @@
#include "u_err.h"
#include "u_rpc_buf.h"
#include "cons_cli.h"
#include "u_env.h"
#include "u_sleep.h"
#include "u_thread_util.h"
#include "u_slist.h"
#include "cons_cli.h"
#include <pthread.h>
#include <errno.h>
#include <assert.h>
#include <stdio.h>
#include <malloc.h>
static meta_t meta_obj;
static msg_tag_t rpc_meta_t_dispatch(struct rpc_svr_obj *obj, msg_tag_t in_tag, ipc_msg_t *ipc_msg);
@@ -151,7 +158,7 @@ int rpc_meta_init(obj_handler_t th, obj_handler_t *ret_ipc_hd)
*/
void rpc_loop(void)
{
umword_t obj;
umword_t obj = 0;
msg_tag_t tag;
umword_t buf;
ipc_msg_t *msg;
@@ -162,7 +169,7 @@ void rpc_loop(void)
while (1)
{
rpc_hd_alloc();
tag = thread_ipc_wait(ipc_timeout_create2(0, 0), &obj);
tag = thread_ipc_wait(ipc_timeout_create2(0, 0), &obj, -1);
if (msg_tag_get_val(tag) < 0)
{
continue;
@@ -175,3 +182,128 @@ void rpc_loop(void)
thread_ipc_reply(tag, ipc_timeout_create2(0, 0));
}
}
#define RPC_MTD_TH_STACK_SIZE 1024
typedef struct mtd_params
{
rpc_svr_obj_t *obj;
msg_tag_t in_tag;
obj_handler_t ipc_obj;
obj_handler_t th_obj;
slist_head_t node;
} mtd_params_t;
static slist_head_t th_head;
static pthread_spinlock_t lock;
static void rpc_mtc_thread(void *arg)
{
rpc_svr_obj_t *svr_obj;
msg_tag_t tag;
ipc_msg_t *msg;
mtd_params_t *params = (mtd_params_t *)arg;
svr_obj = (rpc_svr_obj_t *)params->obj;
if (svr_obj->dispatch)
{
tag = svr_obj->dispatch(svr_obj, params->in_tag, msg);
}
thread_ipc_send(tag, params->ipc_obj, ipc_timeout_create2(0, 0));
handler_free_umap(params->ipc_obj);
params->ipc_obj = HANDLER_INVALID;
u_thread_del(params->th_obj);
while (1)
;
}
static void check_release_stack_mem(void)
{
mtd_params_t *pos;
slist_foreach_not_next(pos, &th_head, node)
{
mtd_params_t *next = slist_next_entry(pos, &th_head, node);
slist_del(&pos->node);
void *stack = (void *)((char *)pos - (RPC_MTD_TH_STACK_SIZE + MSG_BUG_LEN));
free(stack);
pos = next;
}
}
extern void __pthread_new_thread_entry__(void);
int rpc_mtd_loop(void)
{
umword_t obj = 0;
msg_tag_t tag;
umword_t buf;
obj_handler_t ipc_hd;
slist_init(&th_head);
while (1)
{
rpc_hd_alloc();
check_release_stack_mem();
ipc_hd = handler_alloc();
if (ipc_hd == HANDLER_INVALID)
{
cons_write_str("mtd alloc is fial.\n");
u_sleep_ms(1000);
continue;
}
tag = factory_create_ipc(FACTORY_PROT, vpage_create_raw3(0, 0, ipc_hd));
if (msg_tag_get_val(tag) < 0)
{
cons_write_str("mtd factory ipc fail.\n");
handler_free(ipc_hd);
u_sleep_ms(1000);
continue;
}
tag = thread_ipc_wait(ipc_timeout_create2(0, 0), &obj, ipc_hd);
if (msg_tag_get_val(tag) < 0)
{
continue;
}
again_create:;
obj_handler_t th_obj;
void *stack = malloc(RPC_MTD_TH_STACK_SIZE + MSG_BUG_LEN + sizeof(mtd_params_t));
if (!stack)
{
cons_write_str("mtd no stack mem.\n");
check_release_stack_mem();
u_sleep_ms(1000);
goto again_create;
}
int ret_val;
umword_t *stack_tmp = (umword_t *)stack;
mtd_params_t *params = (mtd_params_t *)((char *)stack + RPC_MTD_TH_STACK_SIZE + MSG_BUG_LEN);
// 设置调用参数等
*(--stack_tmp) = (umword_t)(params);
*(--stack_tmp) = (umword_t)0; // 保留
*(--stack_tmp) = (umword_t)rpc_mtc_thread;
params->in_tag = tag;
params->ipc_obj = ipc_hd;
params->obj = (rpc_svr_obj_t *)obj;
slist_init(&params->node);
slist_add(&th_head, &params->node);
again_th_create:
ret_val = u_thread_create(&params->th_obj,
stack,
RPC_MTD_TH_STACK_SIZE,
(char *)stack + RPC_MTD_TH_STACK_SIZE,
(void (*)(void))__pthread_new_thread_entry__);
if (ret_val < 0)
{
cons_write_str("mtd no mem.\n");
check_release_stack_mem();
u_sleep_ms(1000);
goto again_th_create;
}
// thread_ipc_reply(tag, ipc_timeout_create2(0, 0));
}
return 0;
}

View File

@@ -40,7 +40,7 @@ static void sig_func(void)
while (1)
{
msg_tag_t tag = thread_ipc_wait(ipc_timeout_create2(0, 0), NULL);
msg_tag_t tag = thread_ipc_wait(ipc_timeout_create2(0, 0), NULL, -1);
if (msg_tag_get_val(tag) < 0)
{
continue;

View File

@@ -12,5 +12,5 @@ static obj_handler_t hd = HANDLER_INVALID;
void u_sleep_ms(size_t ms)
{
thread_ipc_wait(ipc_timeout_create2(0, ms / (1000 / CONFIG_SYS_SCHE_HZ)), NULL);
thread_ipc_wait(ipc_timeout_create2(0, ms / (1000 / CONFIG_SYS_SCHE_HZ)), NULL, -1);
}

View File

@@ -9,7 +9,7 @@
#include <u_thread_util.h>
void u_thread_del(obj_handler_t th_hd)
{
task_unmap(TASK_THIS, vpage_create_raw3(KOBJ_DELETE_RIGHT, 0, th_hd));
handler_free_umap(th_hd);
}
int u_thread_create(obj_handler_t *th_hd, void *stack, umword_t stack_size, void *msg_buf, void (*thread_func)(void))
{

View File

@@ -41,7 +41,7 @@ static void thread_test_func(void)
ipc_bind(ipc_hd, th1_hd, 0);
while (1)
{
thread_ipc_wait(ipc_timeout_create2(0, 0), NULL);
thread_ipc_wait(ipc_timeout_create2(0, 0), NULL, -1);
printf("srv recv:%s", buf);
hard_sleep();
buf[0] = '_';

View File

@@ -76,10 +76,11 @@ static void thread_test_func(void)
{
char *buf;
umword_t len;
thread_msg_buf_get(th1_hd, (umword_t *)(&buf), NULL);
while (1)
{
thread_ipc_wait(ipc_timeout_create2(0, 0), NULL);
thread_ipc_wait(ipc_timeout_create2(0, 0), NULL, -1);
printf("srv recv:%s", buf);
hard_sleep();
// u_sleep_ms(10);

View File

@@ -37,7 +37,7 @@ static void thread_test_func(void)
thread_msg_buf_get(th1_hd, (umword_t *)(&buf), NULL);
ipc_msg = (ipc_msg_t *)buf;
ipc_msg->map_buf[0] = vpage_create_raw3(0, 0, log_hd).raw;
thread_ipc_wait(ipc_timeout_create2(0, 0), NULL);
thread_ipc_wait(ipc_timeout_create2(0, 0), NULL, -1);
printf("srv recv:%s", buf);
ulog_write_str(log_hd, "map test success.\n");
hard_sleep();