修复引用计数问题导致内核挂掉问题

This commit is contained in:
zhangzheng
2025-03-17 14:48:28 +08:00
parent 3ecafaed24
commit 58adc1d0b7
22 changed files with 300 additions and 120 deletions

View File

@@ -14,19 +14,20 @@
* [x] 去除原来的ipc机制使用fastipc机制并单独实现sleep接口目前的ipc有概率卡死问题
* [x] TTY驱动支持
* [x] 内核二值信号量支持优先级反转(优先级继承协议)。
* [x] 删除之前用于log的sem
* [x] 集成cmbacktrace
* [x] 新进程中env支持
* [ ] FPU完整支持fastipc FPU支持
* [ ] 文件系统 & 网络协议栈完善自动删除支持(文件描述符自动管理库)
* [ ] 线程占用率统计
* [ ] procfs支持
* [ ] 新进程中env支持
* [ ] dup, dup2等接口支持
* [ ] posix mq支持
* [ ] posix sig支持
* [ ] posix shm支持
* [ ] posix sema支持
* [ ] vfork + exec实现
* [ ] 几大组件稳定性测试
* [x] 删除之前用于log的sem
* [ ] 内核代码review
### mid prio
* [x] net server support
* [x] block driver
@@ -38,6 +39,7 @@
* [x] snd drvier
* [x] ymodem support
* [x] vi support
* [ ] vfork + exec实现
### low prio
- [ ] toybox support
- [ ] ota support

View File

@@ -270,7 +270,7 @@ static const char *get_cur_thread_name(void)
TX_THREAD_GET_CURRENT(ptThread);
return ptThread->tx_thread_name;
#elif (CMB_OS_PLATFORM_TYPE == CMB_OS_PLATFORM_MKRTOS)
return kobject_get_name(&(thread_get_current()->kobj));
return kobject_get_name(&(thread_get_current_task()->kobj));
#endif
}

View File

@@ -158,6 +158,7 @@ typedef struct thread
enum thread_state status; //!< 线程状态
enum thread_ipc_state ipc_status; //!< ipc状态
slist_head_t release_node;
thread_fast_ipc_com_t *com; //!< fast ipc通信这里用指针是为了减少thread block大小

View File

@@ -4,4 +4,5 @@
void thread_calc_cpu_usage(void);
uint16_t cpu_get_current_usage(void);
bool_t task_knl_kill(thread_t *kill_thread, bool_t is_knl);
void thread_knl_release_helper(thread_t *th);
void knl_init_1(void);

View File

@@ -158,7 +158,7 @@ void irq_sender_syscall(kobject_t *kobj, syscall_prot_t sys_p, msg_tag_t in_tag,
{
ref_counter_inc(&th->ref);
int ret = irq_sender_wait(irq, th, f->regs[1]);
ref_counter_dec_and_release(&th->ref, &irq->kobj); //! 引用计数+1
ref_counter_dec_and_release(&th->ref, &th->kobj); //! 引用计数+1
tag = msg_tag_init4(0, 0, 0, ret);
}
break;

View File

@@ -56,7 +56,7 @@ void printk(const char *fmt, ...)
thread_t *cut_th = thread_get_current();
state = spinlock_lock(&lock);
xsprintf(print_cache, "[%8d]%s: ",
xsprintf(print_cache, "[%8d][%8s]: ",
pre_cpu_is_init() ? sys_tick_cnt_get() : 0,
kobject_get_name(&cut_th->kobj));
print_raw(print_cache);

View File

@@ -68,8 +68,10 @@ again:
if (thread_get_status(first_wait->thread) == THREAD_SUSPEND)
{
slist_del(first_wait_node);
thread_sleep_del_and_wakeup(first_wait->thread);
ref_counter_dec_and_release(&first_wait->thread->ref, &first_wait->thread->kobj);
if (ref_counter_dec_and_release(&first_wait->thread->ref, &first_wait->thread->kobj) != 1)
{
thread_sleep_del_and_wakeup(first_wait->thread);
}
if (obj->cnt < obj->max_cnt)
{
obj->cnt++;
@@ -220,7 +222,10 @@ static void sema_release_stage1(kobject_t *kobj)
sema_wait_item_t *next = slist_next_entry(wait_item, &obj->suspend_head, node);
slist_del(&wait_item->node);
thread_sleep_del_and_wakeup(wait_item->thread);
if (ref_counter_dec_and_release(&wait_item->thread->ref, &wait_item->thread->kobj) != 1)
{
thread_sleep_del_and_wakeup(wait_item->thread);
}
if (obj->cnt < obj->max_cnt)
{
obj->cnt++;

View File

@@ -57,7 +57,10 @@ void thread_check_timeout(void)
{
assert(pos->th->status == THREAD_SUSPEND);
slist_del(&pos->node);
thread_ready(pos->th, TRUE);
if (thread_get_ipc_state(pos->th) != THREAD_IPC_ABORT)
{
thread_ready(pos->th, TRUE);
}
}
} else {
// if (pos->times_debug >= 3000)

View File

@@ -34,6 +34,7 @@
#include "types.h"
#include "sema.h"
#include "sleep.h"
#include "thread_knl.h"
#if IS_ENABLED(CONFIG_SMP)
#include <ipi.h>
#endif
@@ -127,6 +128,7 @@ void thread_init(thread_t *th, ram_limit_t *lim, umword_t flags)
kobject_init(&th->kobj, THREAD_TYPE);
sched_init(&th->sche);
slist_init(&th->futex_node);
slist_init(&th->release_node);
#if 0
slist_init(&th->wait_send_head);
spinlock_init(&th->recv_lock);
@@ -330,7 +332,7 @@ void thread_unbind(thread_t *th)
{
task_t *tsk = container_of(th->task, task_t, kobj);
ref_counter_dec_and_release(&tsk->ref_cn, &th->kobj);
ref_counter_dec_and_release(&tsk->ref_cn, &tsk->kobj);
th->task = NULL;
}
}
@@ -1383,6 +1385,7 @@ end:
cpu_status = cpulock_lock();
thread_unbind(cur_th);
thread_suspend(cur_th);
thread_knl_release_helper(cur_th);
cpulock_set(cpu_status);
} else {
ref_counter_dec_and_release(&cur_th->ref, &cur_th->kobj);

View File

@@ -48,69 +48,102 @@ static task_t knl_task;
static thread_t *init_thread;
static task_t *init_task;
static thread_t *knl_thread[CONFIG_CPU];
static slist_head_t del_task_head;
static slist_head_t del_task_head; //!<链表中是需要被删除的进程
static slist_head_t del_thread_head;//!<链表中是需要被释放内存的线程
static umword_t cpu_usage[CONFIG_CPU];
static spinlock_t del_lock;
static umword_t cpu_usage_last_tick_val[CONFIG_CPU];
static void knl_release_thread(void)
{
thread_t *pos;
umword_t status2;
status2 = spinlock_lock(&del_lock);
if (slist_is_empty(&del_thread_head))
{
spinlock_set(&del_lock, status2);
return ;
}
slist_foreach_not_next(pos, &del_thread_head, release_node)
{
thread_t *next = slist_next_entry(pos, &del_thread_head, release_node);
int ret;
// printk("+++++release th:0x%x\n", pos);
ret = ref_counter_dec_and_release(&pos->ref, &pos->kobj);
// if (ret == 1)
// {
// printk("------release th:0x%x\n", pos);
// }
slist_del(&pos->release_node);
pos = next;
}
spinlock_set(&del_lock, status2);
}
static void knl_del_task(void)
{
task_t *pos;
umword_t status2;
status2 = spinlock_lock(&del_lock);
if (slist_is_empty(&del_task_head))
{
spinlock_set(&del_lock, status2);
return ;
}
// 在这里删除进程
slist_foreach_not_next(pos, &del_task_head, del_node)
{
task_t *next = slist_next_entry(pos, &del_task_head, del_node);
slist_del(&pos->del_node);
{
msg_tag_t tag;
umword_t user_id;
ipc_msg_t *msg = (ipc_msg_t *)knl_msg_buf;
if (pos->pid != 0)
{
msg->msg_buf[0] = 1; /*KILL_TASK*/
msg->msg_buf[1] = pos->pid;
msg->msg_buf[2] = 0;
if (thread_get_ipc_state(init_thread) != THREAD_IPC_ABORT)
{
#define PM_PROT 0x0005
#define MAGIC_NS_USERPID 0xbabababa
entry_frame_t f;
f.regs[0] = msg_tag_init4(0, 3, 0, PM_PROT).raw;
f.regs[1] = 0;
f.regs[2] = 0x2222; /*传递两个参数,没有用到,暂时用不上*/
f.regs[3] = 0x3333;
tag = thread_fast_ipc_call(init_task, &f, MAGIC_NS_USERPID);
if (msg_tag_get_val(tag) < 0)
{
printk("init thread comm failed, ret:%d\n", __func__, __LINE__, msg_tag_get_val(tag));
}
}
}
}
task_kill(pos);
pos = next;
}
spinlock_set(&del_lock, status2);
}
static void knl_main(void)
{
umword_t status;
umword_t status2;
printk("knl main run..\n");
while (1)
{
task_t *pos;
if (slist_is_empty(&del_task_head))
if (slist_is_empty(&del_task_head) && slist_is_empty(&del_thread_head))
{
cpu_sleep();
continue;
}
status2 = spinlock_lock(&del_lock);
if (slist_is_empty(&del_task_head))
{
spinlock_set(&del_lock, status2);
continue;
}
// 在这里删除进程
slist_foreach_not_next(pos, &del_task_head, del_node)
{
task_t *next = slist_next_entry(pos, &del_task_head, del_node);
slist_del(&pos->del_node);
{
msg_tag_t tag;
umword_t user_id;
ipc_msg_t *msg = (ipc_msg_t *)knl_msg_buf;
if (pos->pid != 0)
{
msg->msg_buf[0] = 1; /*KILL_TASK*/
msg->msg_buf[1] = pos->pid;
msg->msg_buf[2] = 0;
if (thread_get_ipc_state(init_thread) != THREAD_IPC_ABORT)
{
#define PM_PROT 0x0005
#define MAGIC_NS_USERPID 0xbabababa
entry_frame_t f;
f.regs[0] = msg_tag_init4(0, 3, 0, PM_PROT).raw;
f.regs[1] = 0;
f.regs[2] = 0x2222; /*传递两个参数,没有用到,暂时用不上*/
f.regs[3] = 0x3333;
tag = thread_fast_ipc_call(init_task, &f, MAGIC_NS_USERPID);
if (msg_tag_get_val(tag) < 0)
{
printk("init thread comm failed, ret:%d\n", __func__, __LINE__, msg_tag_get_val(tag));
}
}
}
}
task_kill(pos);
pos = next;
}
spinlock_set(&del_lock, status2);
knl_del_task();
knl_release_thread();
}
}
static inline uint32_t thread_knl_get_current_run_nr(void)
@@ -157,10 +190,10 @@ void knl_init_1(void)
thread_init(knl_th, &root_factory_get()->limit, FALSE);
task_init(&knl_task, &root_factory_get()->limit, TRUE);
task_knl_init(&knl_task);
kobject_set_name(&knl_task.kobj, "tk_knl");
kobject_set_name(&knl_task.kobj, "knl");
thread_knl_pf_set(knl_th, knl_main);
thread_bind(knl_th, &knl_task.kobj);
kobject_set_name(&knl_th->kobj, "th_knl");
kobject_set_name(&knl_th->kobj, "knl");
thread_set_msg_buf(knl_th, knl_msg_buf[arch_get_current_cpu_id()],
knl_msg_buf[arch_get_current_cpu_id()]);
knl_th->cpu = arch_get_current_cpu_id();
@@ -189,6 +222,7 @@ static void knl_init_2(void)
{
mm_trace();
slist_init(&del_task_head);
slist_init(&del_thread_head);
#if IS_ENABLED(CONFIG_KNL_TEST)
knl_test();
#else
@@ -260,6 +294,11 @@ static void knl_init_2(void)
}
INIT_STAGE2(knl_init_2);
void thread_knl_release_helper(thread_t *th)
{
assert(th);
slist_add_append(&del_thread_head, &th->release_node); // 添加到删除队列中
}
bool_t task_knl_kill(thread_t *kill_thread, bool_t is_knl)
{
bool_t reset_ram = FALSE;
@@ -272,10 +311,6 @@ bool_t task_knl_kill(thread_t *kill_thread, bool_t is_knl)
status2 = spinlock_lock(&del_lock);
if (stack_len(&kill_thread->com->fast_ipc_stack) != 0)
{
// 在通信的时候出现了错误
// fast_ipc需要测试场景
// 1. 在ipc到其他进程中时其他进程死亡
// 2. 在ipc到其他进程中时当前进程死亡
int ret;
thread_fast_ipc_item_t ipc_item;
@@ -294,7 +329,7 @@ bool_t task_knl_kill(thread_t *kill_thread, bool_t is_knl)
thread_suspend(kill_thread);
kill_thread->ipc_status = THREAD_IPC_ABORT;
}
slist_add_append(&del_task_head, &task->del_node);
slist_add_append(&del_task_head, &task->del_node); // 添加到删除队列中
spinlock_set(&del_lock, status2);
}
else

View File

@@ -3,64 +3,180 @@
#include "syscall_backend.h"
#include <sys/socket.h>
#include <net_cli.h>
#include "fd_map.h"
int be_accept(int s, struct sockaddr *addr, socklen_t *addrlen)
{
return net_accept(s, addr, addrlen);
fd_map_entry_t u_fd;
int ret = fd_map_get(s, &u_fd);
if (ret < 0)
{
return -EBADF;
}
int fd = net_accept(u_fd.priv_fd, addr, addrlen);
if (fd < 0)
{
return fd;
}
int user_fd = fd_map_alloc(0, fd, FD_FS);
if (user_fd < 0)
{
be_close(user_fd);
}
return user_fd;
}
int be_bind(int s, const struct sockaddr *name, socklen_t namelen)
{
return net_bind(s, name, namelen);
fd_map_entry_t u_fd;
int ret = fd_map_get(s, &u_fd);
if (ret < 0)
{
return -EBADF;
}
return net_bind(u_fd.priv_fd, name, namelen);
}
int be_shutdown(int s, int how)
{
return net_shutdown(s, how);
fd_map_entry_t u_fd;
int ret = fd_map_get(s, &u_fd);
if (ret < 0)
{
return -EBADF;
}
return net_shutdown(u_fd.priv_fd, how);
}
int be_getpeername(int s, struct sockaddr *name, socklen_t *namelen)
{
return net_getpeername(s, name, namelen);
fd_map_entry_t u_fd;
int ret = fd_map_get(s, &u_fd);
if (ret < 0)
{
return -EBADF;
}
return net_getpeername(u_fd.priv_fd, name, namelen);
}
int be_getsockname(int s, struct sockaddr *name, socklen_t *namelen)
{
return net_getsockname(s, name, namelen);
fd_map_entry_t u_fd;
int ret = fd_map_get(s, &u_fd);
if (ret < 0)
{
return -EBADF;
}
return net_getsockname(u_fd.priv_fd, name, namelen);
}
int be_getsockopt(int s, int level, int optname, void *optval, socklen_t *optlen)
{
return net_getsockopt(s, level, optname, optval, optlen);
fd_map_entry_t u_fd;
int ret = fd_map_get(s, &u_fd);
if (ret < 0)
{
return -EBADF;
}
return net_getsockopt(u_fd.priv_fd, level, optname, optval, optlen);
}
int be_setsockopt(int s, int level, int optname, const void *optval, socklen_t optlen)
{
return net_setsockopt(s, level, optname, optval, optlen);
fd_map_entry_t u_fd;
int ret = fd_map_get(s, &u_fd);
if (ret < 0)
{
return -EBADF;
}
return net_setsockopt(u_fd.priv_fd, level, optname, optval, optlen);
}
int be_connect(int s, const struct sockaddr *name, socklen_t namelen)
{
return net_connect(s, name, namelen);
fd_map_entry_t u_fd;
int ret = fd_map_get(s, &u_fd);
if (ret < 0)
{
return -EBADF;
}
return net_connect(u_fd.priv_fd, name, namelen);
}
int be_listen(int s, int backlog)
{
return net_listen(s, backlog);
fd_map_entry_t u_fd;
int ret = fd_map_get(s, &u_fd);
if (ret < 0)
{
return -EBADF;
}
return net_listen(u_fd.priv_fd, backlog);
}
ssize_t be_recv(int s, void *mem, size_t len, int flags)
{
return net_recv(s, mem, len, flags);
fd_map_entry_t u_fd;
int ret = fd_map_get(s, &u_fd);
if (ret < 0)
{
return -EBADF;
}
return net_recv(u_fd.priv_fd, mem, len, flags);
}
ssize_t be_recvfrom(int s, void *mem, size_t len, int flags,
struct sockaddr *from, socklen_t *fromlen)
{
return net_recvfrom(s, mem, len, flags, from, fromlen);
fd_map_entry_t u_fd;
int ret = fd_map_get(s, &u_fd);
if (ret < 0)
{
return -EBADF;
}
return net_recvfrom(u_fd.priv_fd, mem, len, flags, from, fromlen);
}
// ssize_t (*recvmsg)(int s, struct msghdr *message, int flags);
ssize_t be_send(int s, const void *dataptr, size_t size, int flags)
{
return net_send(s, dataptr, size, flags);
fd_map_entry_t u_fd;
int ret = fd_map_get(s, &u_fd);
if (ret < 0)
{
return -EBADF;
}
return net_send(u_fd.priv_fd, dataptr, size, flags);
}
// ssize_t (*sendmsg)(int s, const struct msghdr *message, int flags);
ssize_t be_sendto(int s, const void *dataptr, size_t size, int flags,
const struct sockaddr *to, socklen_t tolen)
{
return net_sendto(s, dataptr, size, flags, to, tolen);
fd_map_entry_t u_fd;
int ret = fd_map_get(s, &u_fd);
if (ret < 0)
{
return -EBADF;
}
return net_sendto(u_fd.priv_fd, dataptr, size, flags, to, tolen);
}
int be_socket(int domain, int type, int protocol)
{
return net_socket(domain, type, protocol);
int fd;
fd = net_socket(domain, type, protocol);
if (fd < 0)
{
return fd;
}
int user_fd = fd_map_alloc(0, fd, FD_FS);
if (user_fd < 0)
{
be_close(user_fd);
}
return user_fd;
}

View File

@@ -798,18 +798,19 @@ sys_arch_protect(void)
* own counter (which is locked by the mutex). The return code is not actually
* used. */
// printf("thread:%d\n", lwprot_thread);
if (lwprot_thread != sys_thread_get_private_data_self())
{
/* We are locking the mutex where it has not been locked before *
* or is being locked by another thread */
// pthread_mutex_lock(&lwprot_mutex);
u_mutex_lock(&lwprot_mutex, 0, NULL);
lwprot_thread = sys_thread_get_private_data_self();
lwprot_count = 1;
}
else
/* It is already locked by THIS thread */
lwprot_count++;
// if (lwprot_thread != sys_thread_get_private_data_self())
// {
// /* We are locking the mutex where it has not been locked before *
// * or is being locked by another thread */
// // pthread_mutex_lock(&lwprot_mutex);
// u_mutex_lock(&lwprot_mutex, 0, NULL);
// lwprot_thread = sys_thread_get_private_data_self();
// lwprot_count = 1;
// }
// else
// /* It is already locked by THIS thread */
// lwprot_count++;
u_mutex_lock(&lwprot_mutex, 0, NULL);
return 0;
}
@@ -823,16 +824,17 @@ an operating system.
void sys_arch_unprotect(sys_prot_t pval)
{
LWIP_UNUSED_ARG(pval);
if (lwprot_thread == sys_thread_get_private_data_self())
{
lwprot_count--;
if (lwprot_count == 0)
{
lwprot_thread = (umword_t)0xDEAD;
// pthread_mutex_unlock(&lwprot_mutex);
u_mutex_unlock(&lwprot_mutex);
}
}
// if (lwprot_thread == sys_thread_get_private_data_self())
// {
// lwprot_count--;
// if (lwprot_count == 0)
// {
// lwprot_thread = (umword_t)0xDEAD;
// // pthread_mutex_unlock(&lwprot_mutex);
// u_mutex_unlock(&lwprot_mutex);
// }
// }
u_mutex_unlock(&lwprot_mutex);
}
#endif /* SYS_LIGHTWEIGHT_PROT */

View File

@@ -677,7 +677,7 @@ lwip_accept(int s, struct sockaddr *addr, socklen_t *addrlen)
err = netconn_accept(sock->conn, &newconn);
if (err != ERR_OK) {
LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d): netconn_acept failed, err=%d\n", s, err));
if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
if (sock->conn && NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
set_errno(EOPNOTSUPP);
} else if (err == ERR_CLSD) {
set_errno(EINVAL);

View File

@@ -42,7 +42,7 @@
/* We link to special sys_arch.c (for basic non-waiting API layers unit tests) */
#define NO_SYS 0
#define SYS_LIGHTWEIGHT_PROT 0
#define SYS_LIGHTWEIGHT_PROT 1
#define LWIP_NETCONN !NO_SYS
#define LWIP_SOCKET !NO_SYS
#define LWIP_NETCONN_FULLDUPLEX LWIP_SOCKET

View File

@@ -6,8 +6,11 @@
#include "syscall.h"
#include "atomic.h"
#include "libc.h"
#ifdef MKRTOS
#include "cons_cli.h"
#include "u_sig.h"
#include "u_task.h"
#endif
static void dummy(void) {}
weak_alias(dummy, _init);
@@ -147,10 +150,16 @@ weak void ipc_init(void)
static int libc_start_main_stage2(int (*main)(int, char **, char **), int argc, char **argv)
{
char **envp = argv + argc + 1;
#ifdef MKRTOS
fs_backend_init();
ipc_init();
if (argv && argv[0])
{
task_set_obj_name(TASK_THIS, TASK_THIS, argv[0]);
task_set_obj_name(TASK_THIS, THREAD_MAIN, argv[0]);
}
#endif
__libc_start_init();
/* Pass control to the application */
exit(main(argc, argv, envp));
return 0;

View File

@@ -147,7 +147,7 @@ int u_fast_ipc_init(uint8_t *stack_array, uint8_t *msg_buf_array, int stack_msgb
cons_map_buf[i] = vpage_create_raw3(0, 0, handler_alloc()).raw;
msg->map_buf[i] = cons_map_buf[i];
}
#if 1
#if 0
for (int i = 0; i < stack_msgbuf_array_num; i++)
{
printf("stack 0x%x %x\n", stack_array + stack_size * i, stack_size);

View File

@@ -9,7 +9,6 @@
#include "ff.h" /* Obtains integer types */
#include "diskio.h" /* Declarations of disk functions */
#include "ram_disk.h"
#include <stdio.h>
#include "blk_drv_cli.h"
#include "ns_cli.h"
@@ -20,7 +19,7 @@
#include <string.h>
#include "u_share_mem.h"
/* Definitions of physical drive number for each drive */
#define DEV_RAM 0 /* Example: Map Ramdisk to physical drive 0 */
#define DEV_MK_BLOCK 0 /* Example: Map Ramdisk to physical drive 0 */
static obj_handler_t dev_hd;
static obj_handler_t shm_hd;
@@ -30,9 +29,9 @@ static blk_drv_info_t blk_info;
int disk_set_dev_path(int pdrv, const char *dev)
{
int ret;
switch (DEV_RAM)
switch (DEV_MK_BLOCK)
{
case DEV_RAM:
case DEV_MK_BLOCK:
{
int try_cn = 0;
again:
@@ -40,11 +39,11 @@ int disk_set_dev_path(int pdrv, const char *dev)
if (ret < 0)
{
try_cn++;
if (try_cn > 10)
if (try_cn > 100)
{
return ret;
}
u_sleep_ms(20);
u_sleep_ms(5);
goto again;
}
ret = blk_drv_cli_info(dev_hd, &blk_info);
@@ -72,7 +71,7 @@ DSTATUS disk_status(
switch (pdrv)
{
case DEV_RAM:
case DEV_MK_BLOCK:
result = 0;
// translate the reslut code here
@@ -95,7 +94,7 @@ DSTATUS disk_initialize(
switch (pdrv)
{
case DEV_RAM:
case DEV_MK_BLOCK:
{
msg_tag_t tag;
@@ -143,7 +142,7 @@ DRESULT disk_read(
switch (pdrv)
{
case DEV_RAM:
case DEV_MK_BLOCK:
// translate the reslut code here
for (umword_t i = sector; i < sector + count; i++)
{
@@ -176,7 +175,7 @@ DRESULT disk_write(
switch (pdrv)
{
case DEV_RAM:
case DEV_MK_BLOCK:
// translate the arguments here
for (umword_t i = sector; i < sector + count; i++)
{
@@ -209,7 +208,7 @@ DRESULT disk_ioctl(
switch (pdrv)
{
case DEV_RAM:
case DEV_MK_BLOCK:
{
switch (cmd)
{ // fatfs内核使用cmd调用

View File

@@ -174,9 +174,13 @@ int parse_cfg(const char *parse_cfg_file_name, uenv_t *env)
for (int i = 0; i < cmd_params_num; i++)
{
args[i] = &cmd_line[cmd_params_off[i]];
#if 0
printf("parse_cfg args[%d] = %s\n", i, args[i]);
#endif
}
#if 0
printf("parse_cfg cmd_params_num:%d\n", cmd_params_num);
#endif
int ret = app_load(cmd_line, env, &pid, args, cmd_params_num,
NULL, 0, mem_block);
if (ret < 0)