增加cpu占用率支持

This commit is contained in:
zhangzheng
2025-02-05 14:44:49 +08:00
parent 8e2e588298
commit a1184c8e95
22 changed files with 372 additions and 156 deletions

View File

@@ -3,8 +3,13 @@
## TODO list
### high prio
* [x] APPFS 在flash中执行能够支持app直接运行的文件系统。
* [x] 完善mpu缺页管理支持。
* [x] 完善mpu缺页模拟支持,红黑树管理,支持整块或者分块缺页支持。
* [x] 内存管理对象mpu支持完善在缺页时自动映射
* [x] 一种新的ipc机制暂时取名fastipc不切上下文只切内存流程更加简单解决原来ipc优先级问题以及并发性问题。
* [ ] 进程管理机制完善,进程状态订阅,进程间信号发送。
* [ ] 内核信号量对象完善(支持优先级反转,支持超时)。
* [ ] FPU完善支持目前版本偶发压栈错误
* [ ] cpu占用率支持
### mid prio
* [ ] net server support
* [x] block driver

View File

@@ -1,6 +1,6 @@
message("========use armv7_8.cmake")
set(CMAKE_C_FLAGS "-mcpu=${CONFIG_ARCH} -O0 -g3 -mfloat-abi=${CONFIG_FLOAT_TYPE} -mthumb -DMKRTOS \
set(CMAKE_C_FLAGS "-mcpu=${CONFIG_ARCH} -Ofast -g3 -mfloat-abi=${CONFIG_FLOAT_TYPE} -mthumb -DMKRTOS \
-std=gnu11 -ffunction-sections -fdata-sections -fno-builtin -u=_printf_float \
-nostartfiles -nodefaultlibs -nostdlib -nostdinc \
-fno-stack-protector -Wl,--gc-section -D__ARM_ARCH_7M__ \

View File

@@ -32,7 +32,7 @@ CONFIG_FD_MAP_ROW_NR=16
#
# Sys util config
#
CONFIG_USING_SIG=n
CONFIG_USING_SIG=y
CONFIG_SIG_THREAD_STACK_SIZE=512
CONFIG_SIG_THREAD_PRIO=3
# end of Sys util config

View File

@@ -4,6 +4,7 @@
#include <types.h>
#include <thread.h>
#include <util.h>
#include <atomics.h>
/**
* @brief 调度函数
*
@@ -23,13 +24,21 @@ sp_info_t *schde_to(void *usp, void *ksp, umword_t sp_type)
assert(next_th->magic == THREAD_MAGIC);
if (sche->sched_reset)
thread_t *cur_th = thread_get_current();
if (sche->sched_reset == 1)
{
thread_t *cur_th = thread_get_current();
cur_th->sp.knl_sp = ksp;
cur_th->sp.user_sp = usp;
cur_th->sp.sp_type = sp_type;
}
else if (sche->sched_reset == 2)
{
cur_th->sp.knl_sp = ksp;
// cur_th->sp.user_sp = usp;
// cur_th->sp.sp_type = sp_type;
}
atomic_inc(&cur_th->time_count);
sche->sched_reset = 1;
return &next_th->sp;
}

View File

@@ -56,6 +56,21 @@ void thread_user_pf_set(thread_t *cur_th, void *pc, void *user_sp, void *ram)
cur_th->sp.user_sp = cur_pf;
cur_th->sp.sp_type = 0xfffffffd;
}
void thread_set_user_pf_noset_knl_sp(thread_t *cur_th, void *pc, void *user_sp, void *ram)
{
umword_t usp = ((umword_t)(user_sp) & ~0x7UL);
pf_t *cur_pf = (pf_t *)(usp)-1; // thread_get_pf(cur_th);
cur_pf->pf_s.xpsr = 0x01000000L;
cur_pf->pf_s.lr = (umword_t)NULL; //!< 线程退出时调用的函数
cur_pf->pf_s.pc = (umword_t)pc | 0x1;
cur_pf->regs[5] = (umword_t)ram;
// cur_th->sp.knl_sp = ((char *)cur_th + CONFIG_THREAD_BLOCK_SIZE - 8);
cur_th->sp.user_sp = cur_pf;
cur_th->sp.sp_type = 0xfffffffd;
}
void thread_user_pf_restore(thread_t *cur_th, void *user_sp)
{
cur_th->sp.knl_sp = ((char *)cur_th + CONFIG_THREAD_BLOCK_SIZE - 8);

View File

@@ -60,6 +60,21 @@ void thread_user_pf_set(thread_t *cur_th, void *pc, void *user_sp, void *ram)
cur_th->sp.user_sp = cur_pf;
cur_th->sp.sp_type = 0xfffffffd;
}
void thread_set_user_pf_noset_knl_sp(thread_t *cur_th, void *pc, void *user_sp, void *ram)
{
umword_t usp = ((umword_t)(user_sp) & ~0x7UL);
pf_t *cur_pf = (pf_t *)(usp)-1; // thread_get_pf(cur_th);
cur_pf->pf_s.xpsr = 0x01000000L;
cur_pf->pf_s.lr = (umword_t)NULL; //!< 线程退出时调用的函数
cur_pf->pf_s.pc = (umword_t)pc | 0x1;
cur_pf->regs[5] = (umword_t)ram;
// cur_th->sp.knl_sp = ((char *)cur_th + CONFIG_THREAD_BLOCK_SIZE - 8);
cur_th->sp.user_sp = cur_pf;
cur_th->sp.sp_type = 0xfffffffd;
}
void thread_user_pf_restore(thread_t *cur_th, void *user_sp)
{
cur_th->sp.knl_sp = ((char *)cur_th + CONFIG_THREAD_BLOCK_SIZE - 8);

View File

@@ -3,6 +3,7 @@
#include "thread.h"
#include "futex.h"
#include "irq.h"
#include "thread_knl.h"
static umword_t sys_tick_cnt;
umword_t sys_tick_cnt_get(void)
@@ -17,8 +18,9 @@ void SysTick_Handler(void)
sys_tick_cnt++;
thread_timeout_check(1);
futex_timeout_times_tick();
#if 0
thread_calc_cpu_usage();
#if 0
extern void uart_check_timeover(irq_entry_t * irq);
uart_check_timeover(irq_get(LOG_INTR_NO));
#endif
#endif
}

View File

@@ -293,3 +293,7 @@ void thread_timeout_check(ssize_t tick);
msg_tag_t thread_do_ipc(kobject_t *kobj, entry_frame_t *f, umword_t user_id);
int thread_ipc_call(thread_t *to_th, msg_tag_t in_tag, msg_tag_t *ret_tag,
ipc_timeout_t timout, umword_t *ret_user_id, bool_t is_call);
bool_t thread_is_knl(thread_t *thread);
msg_tag_t thread_fast_ipc_replay(entry_frame_t *f);
msg_tag_t thread_fast_ipc_call(thread_t *to_th, entry_frame_t *f, umword_t user_id);

View File

@@ -1,5 +1,7 @@
#pragma once
#include <task.h>
void thread_calc_cpu_usage(void);
uint16_t cpu_get_current_usage(void);
bool_t task_knl_kill(thread_t *kill_thread, bool_t is_knl);
void knl_init_1(void);

View File

@@ -2,4 +2,5 @@
void thread_knl_pf_set(thread_t *cur_th, void *pc);
void thread_user_pf_set(thread_t *cur_th, void *pc, void *user_sp, void *ram);
void thread_user_pf_restore(thread_t *cur_th, void *user_sp);
void thread_set_user_pf_noset_knl_sp(thread_t *cur_th, void *pc, void *user_sp, void *ram);
void task_knl_init(task_t *knl_tk);

View File

@@ -175,6 +175,11 @@ static int share_mem_free_pmem(share_mem_t *obj)
assert(obj);
switch (obj->mem_type)
{
case SHARE_MEM_CNT_CMA_CNT:
#if IS_ENABLED(CONFIG_MMU)
/*TODO:support CMA mem.*/
return -ENOSYS;
#endif
case SHARE_MEM_CNT_BUDDY_CNT:
#if IS_ENABLED(CONFIG_MMU)
mm_limit_free_buddy(obj->lim, obj->mem, obj->size);
@@ -182,9 +187,6 @@ static int share_mem_free_pmem(share_mem_t *obj)
mm_limit_free_align(obj->lim, obj->mem, obj->size);
#endif
break;
case SHARE_MEM_CNT_CMA_CNT:
/*TODO:support CMA mem.*/
return -ENOSYS;
case SHARE_MEM_CNT_DPD:
{
#if IS_ENABLED(CONFIG_MMU)
@@ -221,6 +223,11 @@ static int share_mem_alloc_pmem(share_mem_t *obj)
}
switch (obj->mem_type)
{
case SHARE_MEM_CNT_CMA_CNT:
#if IS_ENABLED(CONFIG_MMU)
/*TODO:support CMA mem.*/
return -ENOSYS;
#endif
case SHARE_MEM_CNT_BUDDY_CNT:
#if IS_ENABLED(CONFIG_MMU)
obj->mem = mm_limit_alloc_buddy(obj->lim, obj->size);
@@ -256,9 +263,6 @@ static int share_mem_alloc_pmem(share_mem_t *obj)
}
memset(obj->mem, 0, obj->size);
break;
case SHARE_MEM_CNT_CMA_CNT:
/*TODO:support CMA mem.*/
return -ENOSYS;
case SHARE_MEM_CNT_DPD:
{
#if IS_ENABLED(CONFIG_MMU)

View File

@@ -26,6 +26,7 @@
#include <globals.h>
#include <task.h>
#include <thread.h>
#include <thread_knl.h>
typedef struct sys
{
@@ -97,6 +98,7 @@ static void sys_syscall(kobject_t *kobj, syscall_prot_t sys_p, msg_tag_t in_tag,
#else
f->regs[1] = 0;
#endif
f->regs[2] = cpu_get_current_usage();
tag = msg_tag_init4(0, 0, 0, ret);
break;
}

View File

@@ -36,6 +36,9 @@
#if IS_ENABLED(CONFIG_SMP)
#include <ipi.h>
#endif
#if IS_ENABLED(CONFIG_MPU)
#include <mpu.h>
#endif
#define TAG "[thread]"
enum thread_op
@@ -362,9 +365,9 @@ bool_t thread_sched(bool_t is_sche)
assert(th->magic == THREAD_MAGIC);
if (next_sche == &th->sche)
{
atomic_inc(&th->time_count);
//!< 线程没有发生变化,则不用切换
cpulock_set(status);
return FALSE;
}
if (is_sche)
@@ -1156,6 +1159,180 @@ end:
#endif
return ret;
}
msg_tag_t thread_fast_ipc_call(thread_t *to_th, entry_frame_t *f, umword_t user_id)
{
task_t *cur_task = thread_get_current_task();
thread_t *cur_th = thread_get_current();
msg_tag_t in_tag = msg_tag_init(f->regs[0]);
int ret;
// 1.需要切换的资源有r9task和用户栈
// 2.在对方进程中被杀死,需要还原当前线程的状态,并返回一个错误
// 3.多线程访问时,服务端提供一个小的用户线程栈,然后内核到用户部分为临界区域,在服务端重新分配用户栈用,使用新的用户栈。
// 4.fastipc嵌套访问会有问题内核必须要提供一个软件上的调用栈。
// 在嵌套调用时,如果在其它进程中挂掉,如果是当前线程则需要还原
task_t *to_task = thread_get_bind_task(to_th);
if (to_task->nofity_point == NULL)
{
printk("task:0x%x, notify point is not set.\n", to_task);
return msg_tag_init4(0, 0, 0, -EIO);
}
_to_unlock:
if (GET_LSB(*to_task->nofity_bitmap, to_task->nofity_bitmap_len) == GET_LSB((~0ULL), to_task->nofity_bitmap_len))
{
thread_sched(TRUE); /*TODO:应该挂起,并在释放时唤醒*/
preemption();
goto _to_unlock;
}
mutex_lock(&to_task->nofity_lock);
if (GET_LSB(*to_task->nofity_bitmap, to_task->nofity_bitmap_len) == GET_LSB((~0ULL), to_task->nofity_bitmap_len))
{
mutex_unlock(&to_task->nofity_lock);
thread_sched(TRUE); /*TODO:应该挂起,并在释放时唤醒*/
preemption();
goto _to_unlock;
}
umword_t cpu_status = cpulock_lock();
ref_counter_inc((&to_task->ref_cn));
//!< 执行目标线程时用的是当前线程的资源,这里还需要备份当前线程的上下文。
ret = thread_fast_ipc_save(cur_th, to_task, (void *)(to_task->nofity_stack - 4 * 8 /*FIXME:改成宏*/)); //!< 备份栈和usp
if (ret >= 0)
{
ipc_msg_t *dst_ipc = (void *)to_task->nofity_msg_buf;
ipc_msg_t *src_ipc = (void *)cur_th->msg.msg;
ret = ipc_dat_copy_raw(&to_task->obj_space, &cur_task->obj_space, to_task->lim,
dst_ipc, src_ipc, in_tag, FALSE);
if (ret >= 0)
{
dst_ipc->user[2] = task_pid_get(cur_task); // 设置pid
slist_add(&to_task->nofity_theads_head, &cur_th->fast_ipc_node); // 添加到链表中,用于进程关闭时进行释放
pf_s_t *usr_stask_point = (void *)arch_get_user_sp();
if (thread_is_knl(cur_th))
{
// 如果是内核线程则全部重新设置
thread_set_user_pf_noset_knl_sp(cur_th, to_task->nofity_point,
(void *)to_task->nofity_stack, (void *)to_task->mm_space.mm_block);
usr_stask_point->rg0[0] = in_tag.raw;
usr_stask_point->rg0[1] = user_id;
usr_stask_point->rg0[2] = f->regs[2];
usr_stask_point->rg0[3] = f->regs[3];
scheduler_get_current()->sched_reset = 2;
}
else
{
usr_stask_point->r12 = 0x12121212;
usr_stask_point->xpsr = 0x01000000L;
usr_stask_point->lr = (umword_t)NULL; //!< 线程退出时调用的函数
usr_stask_point->pc = (umword_t)(to_task->nofity_point) | 0x1;
//! 获得内核栈栈顶
pf_t *cur_pf = ((pf_t *)((char *)cur_th + CONFIG_THREAD_BLOCK_SIZE + 8)) - 1;
// 重新设置r9寄存器
cur_pf->regs[5] = (umword_t)(to_task->mm_space.mm_block);
//! 寄存器传参数
f->regs[0] = in_tag.raw;
f->regs[1] = user_id;
f->regs[2] = f->regs[2];
f->regs[3] = f->regs[3];
}
// 切换mpu
mpu_switch_to_task(to_task);
cpulock_set(cpu_status);
if (thread_is_knl(cur_th))
{
// 内核线程则立刻进行调度
arch_to_sche();
preemption();
}
return in_tag;
}
else
{
ref_counter_dec_and_release(&to_task->ref_cn, &to_task->kobj);
mutex_unlock(&to_task->nofity_lock);
}
}
else
{
ref_counter_dec_and_release(&to_task->ref_cn, &to_task->kobj);
mutex_unlock(&to_task->nofity_lock);
}
cpulock_set(cpu_status);
return msg_tag_init4(0, 0, 0, ret);
}
msg_tag_t thread_fast_ipc_replay(entry_frame_t *f)
{
task_t *cur_task = thread_get_current_task();
thread_t *cur_th = thread_get_current();
msg_tag_t in_tag = msg_tag_init(f->regs[0]);
task_t *old_task = thread_get_bind_task(cur_th);
int ret;
*(cur_task->nofity_bitmap) &= ~(1 << MIN(f->regs[2], cur_task->nofity_bitmap_len)); //!< 解锁bitmap
slist_del(&cur_th->fast_ipc_node); // 从链表中删除
ret = thread_fast_ipc_restore(cur_th); // 还原栈和usp
if (ret < 0)
{
mutex_unlock(&old_task->nofity_lock);
return msg_tag_init4(0, 0, 0, ret);
}
umword_t cpu_status = cpulock_lock();
cur_task = thread_get_current_task();
ipc_msg_t *dst_ipc = (void *)cur_th->msg.msg;
ipc_msg_t *src_ipc = (void *)old_task->nofity_msg_buf;
ret = ipc_dat_copy_raw(&cur_task->obj_space, &old_task->obj_space, cur_task->lim,
dst_ipc, src_ipc, in_tag, TRUE); // copy数据
// if (ret >=0 ) {
for (int i = 0; i < CONFIG_THREAD_MAP_BUF_LEN; i++)
{
if (i < ret)
{
src_ipc->map_buf[i] = old_task->nofity_map_buf[i];
old_task->nofity_map_buf[i] = 0;
}
else
{
src_ipc->map_buf[i] = old_task->nofity_map_buf[i];
}
}
// }
mutex_unlock(&old_task->nofity_lock);
if (thread_is_knl(cur_th))
{
//! 吧r4-r11留出来
// memcpy((char *)cur_th->sp.knl_sp - 4 * 8 /*FIXME:*/, (char *)cur_th->sp.knl_sp, 4 * 8);
// memset(cur_th->sp.knl_sp, 0, 4 * 8);
// cur_th->sp.knl_sp = (char *)cur_th->sp.knl_sp - 4 * 8;
cur_th->sp.user_sp = 0x0;
cur_th->sp.sp_type = 0xfffffff9;
scheduler_get_current()->sched_reset = 3;
}
else
{
pf_t *cur_pf = ((pf_t *)((char *)cur_th + CONFIG_THREAD_BLOCK_SIZE + 8)) - 1;
cur_pf->regs[5] = (umword_t)(cur_task->mm_space.mm_block); // 更新r9寄存器
}
mpu_switch_to_task(cur_task); // 切换mpu
ref_counter_dec_and_release(&old_task->ref_cn, &old_task->kobj);
if (ret < 0)
{
in_tag = msg_tag_init4(0, 0, 0, ret);
}
cpulock_set(cpu_status);
if (thread_is_knl(cur_th))
{
arch_to_sche();
preemption();
}
return in_tag;
}
/**
* @brief 执行ipc
*
@@ -1178,134 +1355,12 @@ msg_tag_t thread_do_ipc(kobject_t *kobj, entry_frame_t *f, umword_t user_id)
{
case IPC_FAST_REPLAY:
{
msg_tag_t in_tag = msg_tag_init(f->regs[0]);
task_t *old_task = thread_get_bind_task(cur_th);
*(cur_task->nofity_bitmap) &= ~(1 << MIN(f->regs[2], cur_task->nofity_bitmap_len)); //!< 解锁bitmap
slist_del(&cur_th->fast_ipc_node);//从链表中删除
ret = thread_fast_ipc_restore(cur_th); // 还原栈和usp
if (ret < 0)
{
mutex_unlock(&old_task->nofity_lock);
return msg_tag_init4(0, 0, 0, ret);
}
umword_t cpu_status = cpulock_lock();
task_t *cur_task = thread_get_current_task();
ipc_msg_t *dst_ipc = (void *)cur_th->msg.msg;
ipc_msg_t *src_ipc = (void *)old_task->nofity_msg_buf;
ret = ipc_dat_copy_raw(&cur_task->obj_space, &old_task->obj_space, cur_task->lim,
dst_ipc, src_ipc, in_tag, TRUE); // copy数据
// if (ret >=0 ) {
for (int i = 0; i < CONFIG_THREAD_MAP_BUF_LEN; i++)
{
if (i < ret)
{
src_ipc->map_buf[i] = old_task->nofity_map_buf[i];
old_task->nofity_map_buf[i] = 0;
}
else
{
src_ipc->map_buf[i] = old_task->nofity_map_buf[i];
}
}
// }
mutex_unlock(&old_task->nofity_lock);
pf_t *cur_pf = ((pf_t *)((char *)cur_th + CONFIG_THREAD_BLOCK_SIZE + 8)) - 1;
cur_pf->regs[5] = (umword_t)(cur_task->mm_space.mm_block); //更新r9寄存器
extern void mpu_switch_to_task(struct task * tk);
mpu_switch_to_task(cur_task);//切换mpu
ref_counter_dec_and_release(&old_task->ref_cn, &old_task->kobj);
if (ret < 0)
{
in_tag = msg_tag_init4(0, 0, 0, ret);
}
cpulock_set(cpu_status);
return in_tag;
return thread_fast_ipc_replay(f);
}
break;
case IPC_FAST_CALL:
{
msg_tag_t in_tag = msg_tag_init(f->regs[0]);
// 1.需要切换的资源有r9task和用户栈
// 2.在对方进程中被杀死,需要还原当前线程的状态,并返回一个错误
// 3.多线程访问时,服务端提供一个小的用户线程栈,然后内核到用户部分为临界区域,在服务端重新分配用户栈用,使用新的用户栈。
// 4.fastipc嵌套访问会有问题内核必须要提供一个软件上的调用栈。
// 在嵌套调用时,如果在其它进程中挂掉,如果是当前线程则需要还原
task_t *to_task = thread_get_bind_task(to_th);
if (to_task->nofity_point == NULL)
{
printk("task:0x%x, notify point is not set.\n", to_task);
return msg_tag_init4(0, 0, 0, -EIO);
}
_to_unlock:
if (GET_LSB(*to_task->nofity_bitmap, to_task->nofity_bitmap_len) == GET_LSB((~0ULL), to_task->nofity_bitmap_len))
{
thread_sched(TRUE); /*TODO:应该挂起,并在释放时唤醒*/
preemption();
goto _to_unlock;
}
mutex_lock(&to_task->nofity_lock);
if (GET_LSB(*to_task->nofity_bitmap, to_task->nofity_bitmap_len) == GET_LSB((~0ULL), to_task->nofity_bitmap_len))
{
mutex_unlock(&to_task->nofity_lock);
thread_sched(TRUE); /*TODO:应该挂起,并在释放时唤醒*/
preemption();
goto _to_unlock;
}
umword_t cpu_status = cpulock_lock();
ref_counter_inc((&to_task->ref_cn));
//!< 执行目标线程时用的是当前线程的资源,这里还需要备份当前线程的上下文。
ret = thread_fast_ipc_save(cur_th, to_task, (void *)(to_task->nofity_stack - 4 * 8)); //!< 备份栈和usp
if (ret >= 0)
{
ipc_msg_t *dst_ipc = (void *)to_task->nofity_msg_buf;
ipc_msg_t *src_ipc = (void *)cur_th->msg.msg;
ret = ipc_dat_copy_raw(&to_task->obj_space, &cur_task->obj_space, to_task->lim,
dst_ipc, src_ipc, in_tag, FALSE);
if (ret >= 0)
{
dst_ipc->user[2] = task_pid_get(cur_task); // 设置pid
slist_add(&to_task->nofity_theads_head, &cur_th->fast_ipc_node);
pf_s_t *usr_stask_point = (void *)arch_get_user_sp();
usr_stask_point->r12 = 0x12121212;
usr_stask_point->xpsr = 0x01000000L;
usr_stask_point->lr = (umword_t)NULL; //!< 线程退出时调用的函数
usr_stask_point->pc = (umword_t)(to_task->nofity_point) | 0x1;
pf_t *cur_pf = ((pf_t *)((char *)cur_th + CONFIG_THREAD_BLOCK_SIZE + 8)) - 1; //!< 获得内核栈栈顶
cur_pf->regs[5] = (umword_t)(to_task->mm_space.mm_block); // 重新设置r9寄存器
//! 寄存器传参数
f->regs[0] = in_tag.raw;
f->regs[1] = user_id;
f->regs[2] = f->regs[2];
f->regs[3] = f->regs[3];
extern void mpu_switch_to_task(struct task * tk);
mpu_switch_to_task(to_task);
return in_tag;
}
else
{
ref_counter_dec_and_release(&to_task->ref_cn, &to_task->kobj);
mutex_unlock(&to_task->nofity_lock);
}
}
else
{
ref_counter_dec_and_release(&to_task->ref_cn, &to_task->kobj);
mutex_unlock(&to_task->nofity_lock);
}
cpulock_set(cpu_status);
return msg_tag_init4(0, 0, 0, ret);
return thread_fast_ipc_call(to_th, f, user_id);
}
break;
case IPC_CALL:

View File

@@ -23,6 +23,7 @@
#include "thread.h"
#include "thread_task_arch.h"
#include "types.h"
#include "prot.h"
#include <assert.h>
#if IS_ENABLED(CONFIG_KNL_TEST)
#include <knl_test.h>
@@ -39,13 +40,15 @@
#include <appfs_tiny.h>
#endif
#include <boot_info.h>
#include "pre_cpu.h"
static uint8_t knl_msg_buf[CONFIG_CPU][THREAD_MSG_BUG_LEN];
static task_t knl_task;
static thread_t *init_thread;
static task_t *init_task;
static thread_t *knl_thread[CONFIG_CPU];
static slist_head_t del_task_head;
static umword_t cpu_usage[CONFIG_CPU];
static spinlock_t del_lock;
static void knl_main(void)
@@ -69,6 +72,7 @@ static void knl_main(void)
spinlock_set(&del_lock, status2);
continue;
}
// 在这里删除进程
slist_foreach_not_next(pos, &del_task_head, del_node)
{
task_t *next = slist_next_entry(pos, &del_task_head, del_node);
@@ -86,6 +90,7 @@ static void knl_main(void)
if (thread_get_ipc_state(init_thread) != THREAD_IPC_ABORT)
{
#if 0
int ret = thread_ipc_call(init_thread, msg_tag_init4(0, 3, 0, 0x0005 /*PM_PROT*/),
&tag, ipc_timeout_create2(3000, 3000), &user_id, TRUE);
@@ -93,6 +98,20 @@ static void knl_main(void)
{
printk("%s:%d ret:%d\n", __func__, __LINE__, ret);
}
#endif
#define PM_PROT 0x0005
#define MAGIC_NS_USERPID 0xbabababa
entry_frame_t f;
f.regs[0] = msg_tag_init4(0, 3, 0, PM_PROT).raw;
f.regs[1] = 0;
f.regs[2] = 0x2222; /*传递两个参数,没有用到,暂时用不上*/
f.regs[3] = 0x3333;
tag = thread_fast_ipc_call(init_thread, &f, MAGIC_NS_USERPID);
if (msg_tag_get_val(tag) < 0)
{
printk("init thread comm failed, ret:%d\n", __func__, __LINE__, msg_tag_get_val(tag));
}
}
}
}
@@ -102,30 +121,68 @@ static void knl_main(void)
spinlock_set(&del_lock, status2);
}
}
static inline uint32_t thread_knl_get_current_run_nr(void)
{
if (knl_thread[arch_get_current_cpu_id()] == NULL)
{
return 0;
}
return atomic_read(&knl_thread[arch_get_current_cpu_id()]->time_count);
}
static uint32_t cpu_usage_last_tick_val[CONFIG_CPU];
/**
* 计算cpu占用率
*/
void thread_calc_cpu_usage(void)
{
uint8_t cur_cpu_id = arch_get_current_cpu_id();
if (sys_tick_cnt_get() % 1000 == 0)
{
cpu_usage[cur_cpu_id] = 1000 - ((thread_knl_get_current_run_nr() - cpu_usage_last_tick_val[cur_cpu_id]));
cpu_usage_last_tick_val[cur_cpu_id] = thread_knl_get_current_run_nr();
// printk("%d\n", cpu_usage[arch_get_current_cpu_id()]);
}
}
uint16_t cpu_get_current_usage(void)
{
return (uint16_t)cpu_usage[arch_get_current_cpu_id()];
}
/**
* 初始化内核线程
* 初始化内核任务
*/
void knl_init_1(void)
{
thread_t *knl_thread;
thread_t *knl_th;
knl_thread = thread_get_current();
thread_init(knl_thread, &root_factory_get()->limit, FALSE);
knl_thread[arch_get_current_cpu_id()] = thread_get_current();
knl_th = knl_thread[arch_get_current_cpu_id()];
thread_init(knl_th, &root_factory_get()->limit, FALSE);
task_init(&knl_task, &root_factory_get()->limit, TRUE);
task_knl_init(&knl_task);
kobject_set_name(&knl_task.kobj, "tk_knl");
thread_knl_pf_set(knl_thread, knl_main);
thread_bind(knl_thread, &knl_task.kobj);
kobject_set_name(&knl_thread->kobj, "th_knl");
thread_set_msg_buf(knl_thread, knl_msg_buf[arch_get_current_cpu_id()],
thread_knl_pf_set(knl_th, knl_main);
thread_bind(knl_th, &knl_task.kobj);
kobject_set_name(&knl_th->kobj, "th_knl");
thread_set_msg_buf(knl_th, knl_msg_buf[arch_get_current_cpu_id()],
knl_msg_buf[arch_get_current_cpu_id()]);
knl_thread->cpu = arch_get_current_cpu_id();
thread_ready(knl_thread, FALSE);
knl_th->cpu = arch_get_current_cpu_id();
thread_ready(knl_th, FALSE);
}
INIT_STAGE1(knl_init_1);
/**
* 是否是内核线程
*/
bool_t thread_is_knl(thread_t *thread)
{
if (thread == knl_thread[arch_get_current_cpu_id()])
return TRUE;
else
return FALSE;
}
/**
* 初始化init线程
* 初始化用户态任务
@@ -233,7 +290,7 @@ bool_t task_knl_kill(thread_t *kill_thread, bool_t is_knl)
// r9 = (umword_t)(thread_get_bind_task(kill_thread)->mm_space.mm_block);
mpu_switch_to_task(thread_get_bind_task(kill_thread));
ref_counter_dec_and_release(&task->ref_cn, &task->kobj);
reset_ram =TRUE;
reset_ram = TRUE;
}
}
else

View File

@@ -147,4 +147,15 @@ int shell_mem_info(int argc, char *argv[])
printf("sys mem:\ntotal:%dB\nfree:%dB\n", total, free);
return 0;
}
SHELL_EXPORT_CMD(SHELL_CMD_PERMISSION(0) | SHELL_CMD_TYPE(SHELL_TYPE_CMD_MAIN), free, shell_mem_info, free command);
SHELL_EXPORT_CMD(SHELL_CMD_PERMISSION(0) | SHELL_CMD_TYPE(SHELL_TYPE_CMD_MAIN), free, shell_mem_info, free command);
int shell_sys_info(int argc, char *argv[])
{
size_t total;
size_t free;
printf("sys:\n");
printf("\tcpu usage:%2.1f\n", sys_read_cpu_usage() / 10.0f);
return 0;
}
SHELL_EXPORT_CMD(SHELL_CMD_PERMISSION(0) | SHELL_CMD_TYPE(SHELL_TYPE_CMD_MAIN), sys, shell_sys_info, sys command);

View File

@@ -55,8 +55,8 @@ add_custom_target(
c_dump ALL
COMMAND
${CMAKE_OBJCOPY} -O binary -S libc.so libc.bin
COMMAND
${CMAKE_COMMAND} -E copy libc.bin ${CMAKE_SOURCE_DIR}/build/output/cpio/libc
# COMMAND
# ${CMAKE_COMMAND} -E copy libc.bin ${CMAKE_SOURCE_DIR}/build/output/cpio/libc
# COMMAND
# cp libc.so ${CMAKE_SOURCE_DIR}/build/output/libc.so
# COMMAND

View File

@@ -12,6 +12,7 @@ typedef struct sys_info
typedef struct sys_info2
{
umword_t resv_dtbo;
umword_t cpu_usage_cur; //!< 当前cpu占用率
} sys_info2_t;
#define SYS_FLAGS_MAP_CPIO_FS 0x01
@@ -43,3 +44,9 @@ static inline umword_t sys_read_dtbo(void)
sys_read_info2(SYS_PROT, &info, 0);
return info.resv_dtbo;
}
static inline umword_t sys_read_cpu_usage(void)
{
sys_info2_t info;
sys_read_info2(SYS_PROT, &info, 0);
return info.cpu_usage_cur;
}

View File

@@ -64,6 +64,7 @@ msg_tag_t sys_read_info2(obj_handler_t obj, sys_info2_t *info, umword_t flags)
if (info)
{
info->resv_dtbo = r1;
info->cpu_usage_cur = r2;
}
return msg_tag_init(r0);

View File

@@ -32,8 +32,11 @@ typedef struct meta
int rpc_meta_init(obj_handler_t th, obj_handler_t *ret_ipc_hd);
void meta_unreg_svr_obj_raw(meta_t *meta, umword_t prot);
void meta_unreg_svr_obj(umword_t prot);
rpc_svr_obj_t *meta_find_svr_obj(umword_t prot);
int meta_reg_svr_obj(rpc_svr_obj_t *svr_obj, umword_t prot);
int meta_reg_svr_obj_raw(meta_t *meta, rpc_svr_obj_t *svr_obj, umword_t prot);
int rpc_creaite_bind_ipc(obj_handler_t th, void *obj, obj_handler_t *ipc_hd);
void rpc_loop(void);
#if 0
int rpc_mtd_loop(void);
#endif

View File

@@ -140,7 +140,7 @@ int app_load(const char *name, uenv_t *cur_env, pid_t *pid,
#endif
int type;
umword_t addr;
int ret;
int ret = 0;
#if IS_ENABLED(CONFIG_CPIO_SUPPORT)
ret = cpio_find_file((umword_t)sys_info.bootfs_start_addr, (umword_t)(-1), name, NULL, &type, &addr);

View File

@@ -6,9 +6,12 @@
#include <u_rpc_svr.h>
#include <u_rpc_buf.h>
#include <u_hd_man.h>
#include <rpc_prot.h>
#include <string.h>
#include <errno.h>
#define MAGIC_NS_USERPID 0xbabababa
int fast_ipc_setsp(int i, void *stack);
#define FAST_IPC_MAIN_STACK_SIZE 512
@@ -57,6 +60,16 @@ static msg_tag_t process_ipc(int j, umword_t obj, long tag)
ret_tag = msg_tag_init4(0, 0, 0, -EACCES);
goto end;
}
if (svr_obj == (void *)MAGIC_NS_USERPID)
{
/*获取ns的user id*/
svr_obj = meta_find_svr_obj(NS_PROT);
}
if (svr_obj == NULL)
{
ret_tag = msg_tag_init4(0, 0, 0, -EACCES);
goto end;
}
if (svr_obj->dispatch)
{
ret_tag = svr_obj->dispatch(svr_obj, msg_tag_init(tag), msg);

View File

@@ -48,6 +48,10 @@ static rpc_svr_obj_t *meta_svr_find(meta_t *meta, umword_t prot)
pthread_spin_unlock(&meta->lock);
return NULL;
}
rpc_svr_obj_t *meta_find_svr_obj(umword_t prot)
{
return meta_svr_find(&meta_obj, prot);
}
void meta_unreg_svr_obj_raw(meta_t *meta, umword_t prot)
{
pthread_spin_lock(&meta->lock);
@@ -82,6 +86,7 @@ int meta_reg_svr_obj_raw(meta_t *meta, rpc_svr_obj_t *svr_obj, umword_t prot)
pthread_spin_unlock(&meta->lock);
return i < META_PROT_NR;
}
int meta_reg_svr_obj(rpc_svr_obj_t *svr_obj, umword_t prot)
{
return meta_reg_svr_obj_raw(&meta_obj, svr_obj, prot);
@@ -185,14 +190,18 @@ void rpc_loop(void)
continue;
}
svr_obj = (rpc_svr_obj_t *)obj;
if (svr_obj->dispatch)
if (svr_obj != NULL && svr_obj->dispatch)
{
tag = svr_obj->dispatch(svr_obj, tag, msg);
}
else
{
tag = msg_tag_init4(0, 0, 0, -ENOSYS);
}
thread_ipc_reply(tag, ipc_timeout_create2(0, 0));
}
}
#if 0
#define RPC_MTD_TH_STACK_SIZE (1024 + 256)
typedef struct mtd_params
{
@@ -342,3 +351,4 @@ int rpc_mtd_loop(void)
}
return 0;
}
#endif