From 0d65637d917ad751f3cf849f7fed0d7bf4a47d93 Mon Sep 17 00:00:00 2001 From: zhangzheng <1358745329@qq.comwq> Date: Thu, 29 Aug 2024 23:35:25 +0800 Subject: [PATCH] aarch64 support share_mem obj. --- .gitignore | 1 + .vscode/settings.json | 4 +- mkrtos_knl/arch/aarch64/thread_armv8.c | 4 +- mkrtos_knl/inc/knl/vma.h | 17 +- mkrtos_knl/knl/mm/vma.c | 114 ++++-- mkrtos_knl/knl/mm/vma_obj.c | 2 +- mkrtos_knl/knl/share_mem.c | 382 ++++++++++++++---- mkrtos_script/debug_aarch64_qemu.sh | 3 +- mkrtos_user/lib/sys/inc/u_factory.h | 9 +- mkrtos_user/lib/sys/inc/u_share_mem.h | 3 +- mkrtos_user/lib/sys/inc/u_vmam.h | 2 +- mkrtos_user/lib/sys/src/u_factory.c | 4 +- mkrtos_user/lib/sys/src/u_share_mem.c | 5 +- .../server/init/src/test/share_mem_test.c | 57 +++ mkrtos_user/server/init/src/test/test.h | 1 + mkrtos_user/server/init/src/test/test_main.c | 5 +- mkrtos_user/server/net/src/ethernetif.c | 7 +- mkrtos_user/server/net/src/main.c | 5 +- mkrtos_user/server/test/src/share_mem_test.c | 24 -- 19 files changed, 493 insertions(+), 156 deletions(-) create mode 100644 mkrtos_user/server/init/src/test/share_mem_test.c delete mode 100644 mkrtos_user/server/test/src/share_mem_test.c diff --git a/.gitignore b/.gitignore index 390a2b623..ae1cc4f39 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,4 @@ build/* .config.old mkrtos_knl/stm32_link.lds benos +.vscode diff --git a/.vscode/settings.json b/.vscode/settings.json index 0180aab3d..5b266fda6 100755 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -99,7 +99,9 @@ "stdio.h": "c", "u_thread_util.h": "c", "u_hd_man.h": "c", - "u_mm.h": "c" + "u_mm.h": "c", + "sema.h": "c", + "ref.h": "c" }, "cortex-debug.showRTOS": false, "cortex-debug.variableUseNaturalFormat": false, diff --git a/mkrtos_knl/arch/aarch64/thread_armv8.c b/mkrtos_knl/arch/aarch64/thread_armv8.c index ca4d08812..973196974 100644 --- a/mkrtos_knl/arch/aarch64/thread_armv8.c +++ b/mkrtos_knl/arch/aarch64/thread_armv8.c @@ -108,7 +108,7 @@ void thread_sync_entry(entry_frame_t *regs) case FSC_TRANS_FAULT_LEVEL1: case FSC_TRANS_FAULT_LEVEL2: case FSC_TRANS_FAULT_LEVEL3: - ret = task_vma_page_fault(&tk->mm_space.mem_vma, ALIGN_DOWN(addr, PAGE_SIZE)); + ret = task_vma_page_fault(&tk->mm_space.mem_vma, ALIGN_DOWN(addr, PAGE_SIZE), NULL); if (ret < 0) { printk("[knl] inst abort 0x20 pfa:0x%lx\n", addr); @@ -128,7 +128,7 @@ void thread_sync_entry(entry_frame_t *regs) case FSC_TRANS_FAULT_LEVEL1: case FSC_TRANS_FAULT_LEVEL2: case FSC_TRANS_FAULT_LEVEL3: - ret = task_vma_page_fault(&tk->mm_space.mem_vma, ALIGN_DOWN(addr, PAGE_SIZE)); + ret = task_vma_page_fault(&tk->mm_space.mem_vma, ALIGN_DOWN(addr, PAGE_SIZE), NULL); if (ret < 0) { dump_stack(regs->pc, regs->regs[29]); diff --git a/mkrtos_knl/inc/knl/vma.h b/mkrtos_knl/inc/knl/vma.h index c4ed85636..82e0a9643 100644 --- a/mkrtos_knl/inc/knl/vma.h +++ b/mkrtos_knl/inc/knl/vma.h @@ -17,7 +17,7 @@ enum vpage_prot_attrs VPAGE_PROT_IN_KNL = 0x20, //!< 内核中使用 }; -#define VMA_ADDR_RESV 0x1 //!< 保留内存 +#define VMA_ADDR_RESV 0x1 //!< 保留内存 // #define VMA_ADDR_UNCACHE 0x2 //!< uncache内存 #define VMA_USED_NODE 0x1 //!< 该vma节点被使用,非空闲 @@ -33,7 +33,7 @@ typedef union vma_addr umword_t addr : (sizeof(void *) * 8 - PAGE_SHIFT); }; } vma_addr_t; -static inline vma_addr_t vam_addr_create_raw(umword_t raw) +static inline vma_addr_t vma_addr_create_raw(umword_t raw) { return (vma_addr_t){ .raw = raw, @@ -151,6 +151,17 @@ int task_vma_grant(task_vma_t *src_task_vma, task_vma_t *dst_task_vma, */ int task_vma_free(task_vma_t *task_vma, vaddr_t addr, size_t size); +/** + * @brief 释放掉已经申请的物理内存,但是不释放虚拟内存 + * + * @param task_vma + * @param addr + * @param size 释放的大小 + * @param is_free_mem 是否释放内存 + * @return int + */ +int task_vma_free_pmem(task_vma_t *task_vma, vaddr_t addr, size_t size, bool_t is_free_mem); + /** * @brief 缺页的处理流程 * 1.查找已经分配的表中是否存在 @@ -160,7 +171,7 @@ int task_vma_free(task_vma_t *task_vma, vaddr_t addr, size_t size); * @param addr * @return int */ -int task_vma_page_fault(task_vma_t *task_vma, vaddr_t addr); +int task_vma_page_fault(task_vma_t *task_vma, vaddr_t addr, void *paddr); /** * @brief 释放task_vma diff --git a/mkrtos_knl/knl/mm/vma.c b/mkrtos_knl/knl/mm/vma.c index 646f3836a..01f58cfd8 100644 --- a/mkrtos_knl/knl/mm/vma.c +++ b/mkrtos_knl/knl/mm/vma.c @@ -839,6 +839,7 @@ typedef struct vma_idl_tree_iter_del_params size_t size; mln_rbtree_t *r_tree; mm_space_t *mm_space; + bool_t is_free_mem; } vma_idl_tree_iter_del_params_t; /** * @brief 迭代删除 @@ -858,7 +859,10 @@ static int rbtree_iterate_alloc_tree_del(mln_rbtree_node_t *node, void *udata) // 解除映射 unmap_mm(mm_space_get_pdir(param->mm_space), vma_addr_get_addr(node_data->vaddr), PAGE_SHIFT, 1); - buddy_free(buddy_get_alloter(), (void *)vma_node_get_paddr(node_data)); + if (param->is_free_mem) + { + buddy_free(buddy_get_alloter(), (void *)vma_node_get_paddr(node_data)); + } #if VMA_DEBUG2 printk("free page: vaddr:0x%lx pmem:0x%lx\n", vma_addr_get_addr(node_data->vaddr), vma_node_get_paddr(node_data)); #endif @@ -868,6 +872,28 @@ static int rbtree_iterate_alloc_tree_del(mln_rbtree_node_t *node, void *udata) } return 0; } +/** + * @brief 释放掉已经申请的物理内存,但是不释放虚拟内存 + * + * @param task_vma + * @param addr + * @param size 释放的大小 + * @param is_free_mem 是否释放内存 + * @return int + */ +int task_vma_free_pmem(task_vma_t *task_vma, vaddr_t addr, size_t size, bool_t is_free_mem) +{ + /*释放已经分配的物理内存,并解除mmu的映射*/ + vma_idl_tree_iter_del_params_t param = { + .r_tree = &task_vma->alloc_tree, + .mm_space = container_of(task_vma, mm_space_t, mem_vma), + .addr = addr, + .size = size, + .is_free_mem = is_free_mem, + }; + mln_rbtree_iterate(&task_vma->alloc_tree, rbtree_iterate_alloc_tree_del, ¶m); + return 0; +} /** * @brief 释放申请的虚拟内存,并释放已经申请的物理内存 * 1.从分配树中找到需要的节点 @@ -918,13 +944,7 @@ int task_vma_free(task_vma_t *task_vma, vaddr_t addr, size_t size) goto end; } /*释放已经分配的物理内存,并解除mmu的映射*/ - vma_idl_tree_iter_del_params_t param = { - .r_tree = &task_vma->alloc_tree, - .mm_space = container_of(task_vma, mm_space_t, mem_vma), - .addr = addr, - .size = size, - }; - mln_rbtree_iterate(&task_vma->alloc_tree, rbtree_iterate_alloc_tree_del, ¶m); + task_vma_free_pmem(task_vma, addr, size, TRUE); vma_node_set_unused(node_data); // 设置未使用 #if VMA_DEBUG printk("free pre:\n"); @@ -950,11 +970,12 @@ end: * 1.查找已经分配的表中是否存在 * 2.分配物理内存 * 3.插入到已分配树中去 - * @param task_vma - * @param addr - * @return int + * @param task_vma 缺页的进程vma + * @param addr 缺页的虚拟地址 + * @param paddr 如果该值不为NULL,则page_fault的物理地址才用该地址 + * @return int <0 failed, >=0 success */ -int task_vma_page_fault(task_vma_t *task_vma, vaddr_t addr) +int task_vma_page_fault(task_vma_t *task_vma, vaddr_t addr, void *paddr) { mln_rbtree_node_t *find_node = NULL; vma_t *node_data = NULL; @@ -964,6 +985,9 @@ int task_vma_page_fault(task_vma_t *task_vma, vaddr_t addr) int ret; assert(task_vma); + assert((((vaddr_t)addr) & (PAGE_SIZE - 1)) == 0); + assert((((paddr_t)paddr) & (PAGE_SIZE - 1)) == 0); + lock_status = task_vma_lock(task_vma); if (lock_status < 0) { @@ -990,34 +1014,41 @@ int task_vma_page_fault(task_vma_t *task_vma, vaddr_t addr) ret = -ENOENT; goto end; } - // 2.申请物理内存 - if (vma_addr_get_flags(node_data->vaddr) & VMA_ADDR_RESV) + if (!paddr) { - if (!vma_node_get_paddr(node_data)) + // 2.申请物理内存 + if (vma_addr_get_flags(node_data->vaddr) & VMA_ADDR_RESV) { - mem = NULL; + if (!vma_node_get_paddr(node_data)) + { + mem = NULL; + } + else + { + mem = (void *)(addr - vma_addr_get_addr(node_data->vaddr) + + vma_node_get_paddr(node_data)); + } } else { - mem = (void *)(addr - vma_addr_get_addr(node_data->vaddr) + - vma_node_get_paddr(node_data)); + mem = buddy_alloc(buddy_get_alloter(), PAGE_SIZE); + if (mem) + { + memset(mem, 0, PAGE_SIZE); +#if VMA_DEBUG2 + printk("alloc pmem:0x%lx\n", mem); +#endif + } + else + { + printk("alloc pmem failed.\n"); + // mem = buddy_alloc(buddy_get_alloter(), PAGE_SIZE); + } } } else { - mem = buddy_alloc(buddy_get_alloter(), PAGE_SIZE); - if (mem) - { - memset(mem, 0, PAGE_SIZE); -#if VMA_DEBUG2 - printk("alloc pmem:0x%lx\n", mem); -#endif - } - else - { - printk("alloc pmem failed.\n"); - // mem = buddy_alloc(buddy_get_alloter(), PAGE_SIZE); - } + mem = paddr; } if (!mem) { @@ -1033,10 +1064,7 @@ int task_vma_page_fault(task_vma_t *task_vma, vaddr_t addr) PAGE_SIZE, (paddr_t)mem); if (alloc_node == NULL) { - if (!(vma_addr_get_flags(node_data->vaddr) & VMA_ADDR_RESV)) - { - buddy_free(buddy_get_alloter(), mem); - } + ret = -ENOMEM; goto end; } @@ -1046,18 +1074,28 @@ int task_vma_page_fault(task_vma_t *task_vma, vaddr_t addr) vpage_attrs_to_page_attrs(vma_addr_get_prot(node_data->vaddr))); if (ret < 0) { - vma_node_free(&task_vma->alloc_tree, alloc_node); ret = -ENOMEM; - goto end; + goto end2; } #if VMA_DEBUG2 printk("page falut: vaddr:0x%lx alloc mem:0x%lx\n", addr, mem); #endif task_vma->alloc_tree.cmp = vma_idl_tree_insert_cmp_handler; mln_rbtree_insert(&task_vma->alloc_tree, alloc_node); - flush_all_tlb(); + flush_all_tlb(); // TODO: ret = 0; + goto _ok; +end2: + vma_node_free(&task_vma->alloc_tree, alloc_node); end: + if (!paddr) + { + if (!(vma_addr_get_flags(node_data->vaddr) & VMA_ADDR_RESV)) + { + buddy_free(buddy_get_alloter(), mem); + } + } +_ok: task_vma_unlock(task_vma, lock_status); return ret; } diff --git a/mkrtos_knl/knl/mm/vma_obj.c b/mkrtos_knl/knl/mm/vma_obj.c index 236b2227e..52fa53717 100644 --- a/mkrtos_knl/knl/mm/vma_obj.c +++ b/mkrtos_knl/knl/mm/vma_obj.c @@ -57,7 +57,7 @@ static void vma_obj_syscall(kobject_t *kobj, syscall_prot_t sys_p, msg_tag_t in_ vaddr_t ret_vaddr = 0; ret = task_vma_alloc(&tk->mm_space.mem_vma, - vam_addr_create_raw(f->regs[0]), f->regs[1], + vma_addr_create_raw(f->regs[0]), f->regs[1], f->regs[2], &ret_vaddr); f->regs[0] = msg_tag_init4(0, 0, 0, ret).raw; f->regs[1] = ret_vaddr; diff --git a/mkrtos_knl/knl/share_mem.c b/mkrtos_knl/knl/share_mem.c index d795a2996..dc63648cb 100644 --- a/mkrtos_knl/knl/share_mem.c +++ b/mkrtos_knl/knl/share_mem.c @@ -13,10 +13,17 @@ #include #include #include +#include +#include #include "mm_wrap.h" #include "mpu.h" #include "init.h" #include "dlist.h" +#if IS_ENABLED(CONFIG_BUDDY_SLAB) +#include +static slab_t *share_mem_slab; +static slab_t *share_mem_task_node_slab; +#endif /** * @brief 存储task的节点 @@ -26,18 +33,31 @@ typedef struct share_mem_task_node { dlist_item_t node; task_t *task; + vaddr_t addr; } share_mem_task_node_t; + +typedef enum share_mem_type +{ + SHARE_MEM_CNT_BUDDY_CNT, //!< buddy分配的连续内存,最大4M,默认的内存分配方式 + SHARE_MEM_CNT_CMA_CNT, //!< cma分配的连续内存 + SHARE_MEM_CNT_DPD, //!< 离散内存 +} share_mem_type_t; /** * @brief 共享内存对象 * */ typedef struct share_mem { - kobject_t kobj; //!< 内核对象 - void *mem; //!< 内存指针 - size_t size; //!< 内存大小,为n^2。 - dlist_head_t task_head; //!< 哪些任务使用了该共享内存 - ram_limit_t *lim; //!< 内存限额 + kobject_t kobj; //!< 内核对象 + struct + { + void *mem; //!< 内存指针(连续内存时使用) + void **mem_array; //!< 离散内存时当数组地址使用,数组内存放离散内存的地址 + }; + size_t size; //!< 内存大小,(根据arch不同,size大小可能会被限制为,例如:n^2)。 + dlist_head_t task_head; //!< 哪些任务使用了该共享内存 + share_mem_type_t mem_type; //!< 内存类型 + ram_limit_t *lim; //!< 内存限额 } share_mem_t; enum share_mem_op @@ -45,20 +65,32 @@ enum share_mem_op SHARE_MEM_MAP, SHARE_MEM_UNMAP, }; + +static void share_mem_slab_init(void) +{ +#if IS_ENABLED(CONFIG_BUDDY_SLAB) + share_mem_slab = slab_create(sizeof(share_mem_t), "share_mem"); + assert(share_mem_slab); + share_mem_task_node_slab = slab_create(sizeof(share_mem_task_node_t), "share_mem_task_node"); + sizeof(share_mem_task_node_slab); +#endif +} +INIT_KOBJ_MEM(share_mem_slab_init); /** * @brief 共享内存解除映射 * * @param sm * @return int */ -static void share_mem_unmap_task(share_mem_t *sm) +static void *share_mem_unmap_task(share_mem_t *sm) { + vaddr_t vaddr = 0; share_mem_task_node_t *pos; umword_t status = spinlock_lock(&sm->kobj.lock); if (status < 0) { - return; + return NULL; } task_t *tk = thread_get_current_task(); dlist_foreach(&sm->task_head, pos, share_mem_task_node_t, node) @@ -66,18 +98,24 @@ static void share_mem_unmap_task(share_mem_t *sm) if (pos->task == tk) { dlist_del(&sm->task_head, &pos->node); +#if IS_ENABLED(CONFIG_MMU) + mm_limit_free_slab(share_mem_task_node_slab, tk->lim, pos); +#else mm_limit_free(tk->lim, pos); +#endif + vaddr = pos->addr; break; } } spinlock_set(&sm->kobj.lock, status); + return (void *)vaddr; } /** * @brief 共享内存映射 * * @param task */ -static int share_mem_map_task(share_mem_t *sm) +static int share_mem_map_task(share_mem_t *sm, vaddr_t addr) { int ret = -1; int flag = 0; @@ -102,8 +140,12 @@ static int share_mem_map_task(share_mem_t *sm) if (!flag) { // 没有找到则插入一个新的 - share_mem_task_node_t *task_node = mm_limit_alloc(tk->lim, sizeof(share_mem_task_node_t)); - + share_mem_task_node_t *task_node; +#if IS_ENABLED(CONFIG_MMU) + task_node = mm_limit_alloc_slab(share_mem_task_node_slab, tk->lim); +#else + task_node = mm_limit_alloc(tk->lim, sizeof(share_mem_task_node_t)); +#endif if (!task_node) { // 内存分配失败 @@ -114,6 +156,7 @@ static int share_mem_map_task(share_mem_t *sm) } dlist_item_init(&task_node->node); task_node->task = tk; + task_node->addr = addr; dlist_add_head(&sm->task_head, &task_node->node); ref_counter_inc(&tk->ref_cn); ret = 0; @@ -126,9 +169,209 @@ end: spinlock_set(&sm->kobj.lock, status); return ret; } +static int share_mem_free_pmem(share_mem_t *obj) +{ + assert(obj); + switch (obj->mem_type) + { + case SHARE_MEM_CNT_BUDDY_CNT: +#if IS_ENABLED(CONFIG_MMU) + mm_limit_free_buddy(obj->lim, obj->mem, obj->size); +#else + mm_limit_free_align(obj->lim, obj->mem, obj->size); +#endif + break; + case SHARE_MEM_CNT_CMA_CNT: + /*TODO:support CMA mem.*/ + return -ENOSYS; + case SHARE_MEM_CNT_DPD: + { + for (ssize_t st = 0; st < obj->size; st += PAGE_SIZE) + { + mm_limit_free_buddy(obj->lim, obj->mem_array[st / PAGE_SIZE], PAGE_SIZE); + } + int mem_cnt = ROUND_UP(obj->size, PAGE_SIZE); + size_t mem_array_size = ALIGN(mem_cnt * sizeof(void *), PAGE_SIZE); + mm_limit_free_buddy(obj->lim, obj->mem_array, mem_array_size); + } + break; + } + return 0; +} +/** + * @brief 申请物理内存 + * + * @param obj + * @return int + */ +static int share_mem_alloc_pmem(share_mem_t *obj) +{ + assert(obj); + if (obj->mem) + { + return 0; + } + switch (obj->mem_type) + { + case SHARE_MEM_CNT_BUDDY_CNT: +#if IS_ENABLED(CONFIG_MMU) + obj->mem = mm_limit_alloc_buddy(obj->lim, obj->size); +#else + int align_size = obj->size; + +#if CONFIG_MK_MPU_CFG +#if CONFIG_MPU_VERSION == 1 + if (obj->size < (1UL << CONFIG_PAGE_SHIFT) || !is_power_of_2(obj->size)) + { + //!< 大小必须是2的整数倍 + return -EINVAL; + } + align_size = obj->size; +#elif CONFIG_MPU_VERSION == 2 + if (obj->size < MPU_ALIGN_SIZE || (obj->size & (MPU_ALIGN_SIZE - 1))) + { + //!< 大小必须是2的整数倍 + return -EINVAL; + } + obj->size += MPU_ALIGN_SIZE; + align_size = MPU_ALIGN_SIZE; +#endif +#else + align_size = sizeof(void *); +#endif + + obj->mem = mm_limit_alloc_align(lim, obj->size, align_size); +#endif + if (obj->mem == NULL) + { + return -ENOMEM; + } + memset(obj->mem, 0, obj->size); + break; + case SHARE_MEM_CNT_CMA_CNT: + /*TODO:support CMA mem.*/ + return -ENOSYS; + case SHARE_MEM_CNT_DPD: + { + /** 非连续内存,按页申请 */ + int mem_cnt = ROUND_UP(obj->size, PAGE_SIZE); + size_t mem_array_size = ALIGN(mem_cnt * sizeof(void *), PAGE_SIZE); + + obj->mem_array = (void **)mm_limit_alloc_buddy(obj->lim, mem_array_size); + if (!obj->mem_array) + { + return -ENOMEM; + } + memset(obj->mem_array, 0, mem_array_size); + for (int i = 0; i < mem_cnt; i++) + { + obj->mem_array[i] = mm_limit_alloc_buddy(obj->lim, PAGE_SIZE); + if (obj->mem_array[i] == NULL) + { + /* 内存不足,释放申请的内存 */ + mm_limit_free_buddy(obj->lim, obj->mem_array, mem_array_size); + for (int j = 0; j < i; j++) + { + mm_limit_free_buddy(obj->lim, obj->mem_array[j], PAGE_SIZE); + } + obj->mem_array = NULL; + return -ENOMEM; + } + memset(obj->mem_array[i], 0, PAGE_SIZE); + } + } + break; + } + return 0; +} +/** + * @brief 共享内存执行映射操作 + * + * @param obj 共享内存对象 + * @param addr 映射到的虚拟地址,以及属性 + * @return int + */ +static ssize_t share_mem_map(share_mem_t *obj, vma_addr_t addr, vaddr_t *ret_vaddr) +{ + int ret; + assert(obj); + assert(ret_vaddr); + ssize_t map_size = 0; + task_t *task = thread_get_current_task(); + +#if IS_ENABLED(CONFIG_MMU) + vaddr_t ret_addr; + + addr.flags |= VMA_ADDR_RESV; //!< 设置为保留内存,保留内存的物理内存,不在内核中申请 + ret = task_vma_alloc(&task->mm_space.mem_vma, addr, obj->size, 0, &ret_addr); //!< 申请虚拟内存 + if (ret < 0) + { + return ret; + } + switch (obj->mem_type) + { + case SHARE_MEM_CNT_BUDDY_CNT: + case SHARE_MEM_CNT_CMA_CNT: + { + for (ssize_t st = ret_addr; st < ret_addr + obj->size; st += PAGE_SIZE, map_size += PAGE_SIZE) + { + ret = task_vma_page_fault(&task->mm_space.mem_vma, st, obj->mem + (st - ret_addr)); + if (ret < 0) + { + printk("%s:%d task map failed, pmem:0x%lx vmem:0x%lx.\n", obj->mem + (st - ret_addr), st); + break; + } + } + } + break; + case SHARE_MEM_CNT_DPD: + { + for (ssize_t st = ret_addr; st < ret_addr + obj->size; st += PAGE_SIZE, map_size += PAGE_SIZE) + { + ret = task_vma_page_fault(&task->mm_space.mem_vma, st, obj->mem_array[(st - ret_addr) / PAGE_SIZE]); + if (ret < 0) + { + printk("%s:%d task map failed, pmem:0x%lx vmem:0x%lx.\n", obj->mem + (st - ret_addr), st); + break; + } + } + } + break; + } + *ret_vaddr = ret_addr; + if (map_size == 0) + { + return ret; + } +#else + bool_t _ret = mm_space_add(&task->mm_space, (umword_t)(sm->mem), sm->size, attr); + + if (_ret) + { + mpu_switch_to_task(task); + } + map_size = _ret == TRUE ? 0 : -ENOMEM; + *ret_vaddr = sm->mem; +#endif + return map_size; +} +static int share_mem_unmap(share_mem_t *obj, vaddr_t vaddr) +{ + task_t *task = thread_get_current_task(); + +#if IS_ENABLED(CONFIG_MMU) + task_vma_free_pmem(&task->mm_space.mem_vma, vaddr, obj->size, FALSE); +#else + // 共享内存解除映射 + mm_space_del(&task->mm_space, (umword_t)obj->mem); + mpu_switch_to_task(task); +#endif + return 0; +} static void share_mem_syscall(kobject_t *kobj, syscall_prot_t sys_p, msg_tag_t in_tag, entry_frame_t *f) { + ssize_t ret; msg_tag_t tag = msg_tag_init4(0, 0, 0, -EINVAL); task_t *task = thread_get_current_task(); share_mem_t *sm = container_of(kobj, share_mem_t, kobj); @@ -142,42 +385,44 @@ static void share_mem_syscall(kobject_t *kobj, syscall_prot_t sys_p, msg_tag_t i { case SHARE_MEM_MAP: { - uint8_t attr; - - attr = f->regs[0] & 0xff; - int map_ret = share_mem_map_task(sm); - - if (map_ret >= 0) + vma_addr_t vma_addr; + vaddr_t ret_vaddr; + ret = share_mem_alloc_pmem(sm); //!< 如果内存没有申请,则申请内存 + if (ret < 0) { - if (map_ret == 0) - { - // 共享内存映射 - bool_t ret = mm_space_add(&task->mm_space, (umword_t)(sm->mem), sm->size, attr); - - if (ret) - { - mpu_switch_to_task(task); - } - else - { - share_mem_unmap_task(sm); - map_ret == -EAGAIN; - } - } + goto end; } - f->regs[1] = (umword_t)sm->mem; + + vma_addr = vma_addr_create_raw(f->regs[0]); + ret = share_mem_map(sm, vma_addr, &ret_vaddr); //!< 执行映射操作 + if (ret < 0) + { + goto end; + } + ret = share_mem_map_task(sm, ret_vaddr); //!< 存储那些task使用了该share mem. + if (ret < 0) + { + share_mem_unmap(sm, ret_vaddr); + ret = -EAGAIN; + goto end; + } + f->regs[1] = (umword_t)ret_vaddr; f->regs[2] = sm->size; - tag = msg_tag_init4(0, 0, 0, map_ret); + end: + tag = msg_tag_init4(0, 0, 0, ret); } break; case SHARE_MEM_UNMAP: { + vaddr_t addr; + // 从记录中删除 - share_mem_unmap_task(sm); - // 共享内存解除映射 - mm_space_del(&task->mm_space, (umword_t)sm->mem); - mpu_switch_to_task(task); - ref_counter_dec_and_release(&task->ref_cn, &task->kobj); + addr = (vaddr_t)share_mem_unmap_task(sm); + if (addr) + { + share_mem_unmap(sm, addr); + ref_counter_dec_and_release(&task->ref_cn, &task->kobj); + } tag = msg_tag_init4(0, 0, 0, 0); } break; @@ -185,17 +430,19 @@ static void share_mem_syscall(kobject_t *kobj, syscall_prot_t sys_p, msg_tag_t i f->regs[0] = tag.raw; } -static void share_mem_unmap(obj_space_t *obj_space, kobject_t *kobj) +static void share_mem_obj_unmap(obj_space_t *obj_space, kobject_t *kobj) { task_t *task = container_of(obj_space, task_t, obj_space); share_mem_t *sm = container_of(kobj, share_mem_t, kobj); + vaddr_t addr; // 从记录中删除 - share_mem_unmap_task(sm); - // 共享内存解除映射 - mm_space_del(&task->mm_space, (umword_t)sm->mem); - mpu_switch_to_task(task); - ref_counter_dec_and_release(&task->ref_cn, &task->kobj); + addr = (vaddr_t)share_mem_unmap_task(sm); + if (addr) + { + share_mem_unmap(sm, addr); + ref_counter_dec_and_release(&task->ref_cn, &task->kobj); + } } static void share_mem_release_stage1(kobject_t *kobj) { @@ -208,8 +455,13 @@ static void share_mem_release_stage2(kobject_t *kobj) assert(dlist_is_empty(&sm->task_head)); +#if IS_ENABLED(CONFIG_MMU) + share_mem_free_pmem(sm); + mm_limit_free_slab(share_mem_slab, sm->lim, sm); +#else mm_limit_free_align(sm->lim, sm->mem, sm->size); mm_limit_free(sm->lim, sm); +#endif printk("share mem 0x%x free.\n", sm); } /** @@ -221,8 +473,9 @@ static void share_mem_release_stage2(kobject_t *kobj) static void share_mem_init(share_mem_t *sm, umword_t max) { kobject_init(&sm->kobj, SHARE_MEM_TYPE); + sm->size = max; sm->kobj.invoke_func = share_mem_syscall; - sm->kobj.unmap_func = share_mem_unmap; + sm->kobj.unmap_func = share_mem_obj_unmap; sm->kobj.stage_1_func = share_mem_release_stage1; sm->kobj.stage_2_func = share_mem_release_stage2; } @@ -233,44 +486,29 @@ static void share_mem_init(share_mem_t *sm, umword_t max) * @param max * @return share_mem_t* */ -static share_mem_t *share_mem_create(ram_limit_t *lim, size_t max) +static share_mem_t *share_mem_create(ram_limit_t *lim, share_mem_type_t type, size_t max) { - int align_size = max; + share_mem_t *mem; -#if CONFIG_MK_MPU_CFG -#if CONFIG_MPU_VERSION == 1 - if (max < (1UL << CONFIG_PAGE_SHIFT) || !is_power_of_2(max)) +#if IS_ENABLED(CONFIG_MMU) + mem = mm_limit_alloc_slab(share_mem_slab, lim); + + if (mem == NULL) { - //!< 大小必须是2的整数倍 return NULL; } - align_size = max; -#elif CONFIG_MPU_VERSION == 2 - if (max < MPU_ALIGN_SIZE || (max & (MPU_ALIGN_SIZE - 1))) - { - //!< 大小必须是2的整数倍 - return NULL; - } - max += MPU_ALIGN_SIZE; - align_size = MPU_ALIGN_SIZE; -#endif + memset(mem, 0, sizeof(share_mem_t)); + max = ALIGN(max, (1 << CONFIG_PAGE_SHIFT)); #else - align_size = sizeof(void *); -#endif - share_mem_t *mem = mm_limit_alloc(lim, sizeof(share_mem_t)); + mem = mm_limit_alloc(lim, sizeof(share_mem_t)); if (!mem) { return NULL; } - mem->mem = mm_limit_alloc_align(lim, max, align_size); - if (!mem->mem) - { - mm_limit_free(lim, mem); - return NULL; - } - mem->size = max; +#endif mem->lim = lim; + mem->mem_type = type; share_mem_init(mem, max); return mem; } @@ -287,7 +525,7 @@ static share_mem_t *share_mem_create(ram_limit_t *lim, size_t max) static kobject_t *share_mem_func(ram_limit_t *lim, umword_t arg0, umword_t arg1, umword_t arg2, umword_t arg3) { - share_mem_t *irq = share_mem_create(lim, arg0); + share_mem_t *irq = share_mem_create(lim, arg0, arg1); if (!irq) { diff --git a/mkrtos_script/debug_aarch64_qemu.sh b/mkrtos_script/debug_aarch64_qemu.sh index f0500d598..453d24d86 100755 --- a/mkrtos_script/debug_aarch64_qemu.sh +++ b/mkrtos_script/debug_aarch64_qemu.sh @@ -8,10 +8,9 @@ fi # -machine virt,virtualization=on,gic-version=2,highmem=off,secure=off,dumpdtb=virt.dtb qemu-system-aarch64 \ -machine virt,virtualization=on,gic-version=2,highmem=off,secure=off\ - -device virtio-gpu-pci \ -cpu cortex-a57 \ - -nographic \ -m size=512 \ + -nographic \ -smp 4\ -kernel $PWD/build/output/bootstrap.elf \ -S -gdb tcp::$1 diff --git a/mkrtos_user/lib/sys/inc/u_factory.h b/mkrtos_user/lib/sys/inc/u_factory.h index 62c72eccf..2ee0ae7e2 100644 --- a/mkrtos_user/lib/sys/inc/u_factory.h +++ b/mkrtos_user/lib/sys/inc/u_factory.h @@ -5,10 +5,17 @@ #define THREAD_CREATE_VM 0x1 +typedef enum share_mem_type +{ + SHARE_MEM_CNT_BUDDY_CNT, //!< buddy分配的连续内存,最大4M,默认的内存分配方式 + SHARE_MEM_CNT_CMA_CNT, //!< cma分配的连续内存 + SHARE_MEM_CNT_DPD, //!< 离散内存 +} share_mem_type_t; + msg_tag_t factory_create_irq_sender(obj_handler_t obj, vpage_t vpage); msg_tag_t factory_create_ipc(obj_handler_t obj, vpage_t vpage); msg_tag_t factory_create_thread(obj_handler_t obj, vpage_t vpage); msg_tag_t factory_create_thread_vcpu(obj_handler_t obj, vpage_t vpage); msg_tag_t factory_create_task(obj_handler_t obj, vpage_t vpage); -msg_tag_t facotry_create_share_mem(obj_handler_t obj, vpage_t vpage, umword_t size); +msg_tag_t facotry_create_share_mem(obj_handler_t obj, vpage_t vpage, share_mem_type_t mem_type, umword_t size); msg_tag_t facotry_create_sema(obj_handler_t obj, vpage_t vpage, int cnt, int max); diff --git a/mkrtos_user/lib/sys/inc/u_share_mem.h b/mkrtos_user/lib/sys/inc/u_share_mem.h index 6ce285ec4..807631a10 100644 --- a/mkrtos_user/lib/sys/inc/u_share_mem.h +++ b/mkrtos_user/lib/sys/inc/u_share_mem.h @@ -1,5 +1,6 @@ #include "u_types.h" #include "u_prot.h" +#include "u_vmam.h" -msg_tag_t share_mem_map(obj_handler_t obj, uint8_t attrs, umword_t *addr, umword_t *size); +msg_tag_t share_mem_map(obj_handler_t obj, vma_addr_t vaddr, umword_t *addr, umword_t *size); msg_tag_t share_mem_unmap(obj_handler_t obj); diff --git a/mkrtos_user/lib/sys/inc/u_vmam.h b/mkrtos_user/lib/sys/inc/u_vmam.h index 317cb4a81..3e7e23ac2 100644 --- a/mkrtos_user/lib/sys/inc/u_vmam.h +++ b/mkrtos_user/lib/sys/inc/u_vmam.h @@ -30,7 +30,7 @@ typedef union vma_addr }; } vma_addr_t; -static inline vma_addr_t vam_addr_create_raw(umword_t raw) +static inline vma_addr_t vma_addr_create_raw(umword_t raw) { return (vma_addr_t){ .raw = raw, diff --git a/mkrtos_user/lib/sys/src/u_factory.c b/mkrtos_user/lib/sys/src/u_factory.c index c2f230f6f..42ee5a4d0 100644 --- a/mkrtos_user/lib/sys/src/u_factory.c +++ b/mkrtos_user/lib/sys/src/u_factory.c @@ -86,7 +86,7 @@ msg_tag_t factory_create_ipc(obj_handler_t obj, vpage_t vpage) return tag; } -msg_tag_t facotry_create_share_mem(obj_handler_t obj, vpage_t vpage, umword_t size) +msg_tag_t facotry_create_share_mem(obj_handler_t obj, vpage_t vpage, share_mem_type_t mem_type, umword_t size) { register volatile umword_t r0 asm(ARCH_REG_0); @@ -94,8 +94,8 @@ msg_tag_t facotry_create_share_mem(obj_handler_t obj, vpage_t vpage, umword_t si 0, SHARE_MEM_PROT, vpage.raw, + mem_type, size, - 0, 0); asm __volatile__("" : diff --git a/mkrtos_user/lib/sys/src/u_share_mem.c b/mkrtos_user/lib/sys/src/u_share_mem.c index 93795c4db..d63f14060 100644 --- a/mkrtos_user/lib/sys/src/u_share_mem.c +++ b/mkrtos_user/lib/sys/src/u_share_mem.c @@ -2,19 +2,20 @@ #include "u_prot.h" #include "u_types.h" #include "u_ipc.h" +#include "u_vmam.h" enum share_mem_op { SHARE_MEM_MAP, SHARE_MEM_UNMAP, }; -msg_tag_t share_mem_map(obj_handler_t obj, uint8_t attrs, umword_t *addr, umword_t *size) +msg_tag_t share_mem_map(obj_handler_t obj, vma_addr_t vaddr, umword_t *addr, umword_t *size) { register volatile umword_t r0 asm(ARCH_REG_0); register volatile umword_t r1 asm(ARCH_REG_1); register volatile umword_t r2 asm(ARCH_REG_2); mk_syscall(syscall_prot_create4(SHARE_MEM_MAP, SHARE_MEM_PROT, obj, FALSE).raw, - attrs, + vaddr.raw, 0, 0, 0, diff --git a/mkrtos_user/server/init/src/test/share_mem_test.c b/mkrtos_user/server/init/src/test/share_mem_test.c new file mode 100644 index 000000000..bf6ccabc6 --- /dev/null +++ b/mkrtos_user/server/init/src/test/share_mem_test.c @@ -0,0 +1,57 @@ +#include "u_types.h" +#include "u_prot.h" +#include "u_factory.h" +#include "u_task.h" +#include "u_hd_man.h" +#include "u_share_mem.h" +#include +#include +#include +#include +static void sharea_mem_test(CuTest *cu) +{ + addr_t addr; + umword_t size; + obj_handler_t hd = handler_alloc(); + assert(hd != HANDLER_INVALID); + msg_tag_t tag = facotry_create_share_mem(FACTORY_PROT, + vpage_create_raw3(KOBJ_ALL_RIGHTS, 0, hd), + SHARE_MEM_CNT_BUDDY_CNT, + PAGE_SIZE * 100); + assert(msg_tag_get_prot(tag) >= 0); + tag = share_mem_map(hd, vma_addr_create(VPAGE_PROT_RW, VMA_ADDR_RESV, 0), &addr, &size); + assert(msg_tag_get_prot(tag) >= 0); + memset((void *)addr, 0, size); + share_mem_unmap(hd); + tag = share_mem_map(hd, vma_addr_create(VPAGE_PROT_RW, VMA_ADDR_RESV, 0), &addr, &size); + assert(msg_tag_get_prot(tag) >= 0); + memset((void *)addr, 0, size); + share_mem_unmap(hd); + handler_free_umap(hd); + + hd = handler_alloc(); + assert(hd != HANDLER_INVALID); + tag = facotry_create_share_mem(FACTORY_PROT, + vpage_create_raw3(KOBJ_ALL_RIGHTS, 0, hd), + SHARE_MEM_CNT_DPD, + PAGE_SIZE * 100); + assert(msg_tag_get_prot(tag) >= 0); + tag = share_mem_map(hd, vma_addr_create(VPAGE_PROT_RW, VMA_ADDR_RESV, 0), &addr, &size); + assert(msg_tag_get_prot(tag) >= 0); + memset((void *)addr, 0, size); + share_mem_unmap(hd); + tag = share_mem_map(hd, vma_addr_create(VPAGE_PROT_RW, VMA_ADDR_RESV, 0), &addr, &size); + assert(msg_tag_get_prot(tag) >= 0); + memset((void *)addr, 0, size); + share_mem_unmap(hd); + handler_free_umap(hd); +} + +CuSuite *sharem_mem_test_suite(void) +{ + CuSuite *suite = CuSuiteNew(); + + SUITE_ADD_TEST(suite, sharea_mem_test); + + return suite; +} diff --git a/mkrtos_user/server/init/src/test/test.h b/mkrtos_user/server/init/src/test/test.h index 1fa5aaa67..9736e2d90 100644 --- a/mkrtos_user/server/init/src/test/test.h +++ b/mkrtos_user/server/init/src/test/test.h @@ -14,6 +14,7 @@ CuSuite *map_test_suite(void); CuSuite *pthread_base_test_suite(void); CuSuite *thread_base_test_suite(void); CuSuite *ipc_test_suite(void); +CuSuite *sharem_mem_test_suite(void); void test_main(void); void mm_test(void); diff --git a/mkrtos_user/server/init/src/test/test_main.c b/mkrtos_user/server/init/src/test/test_main.c index 7d33b3a9a..ddabc144e 100644 --- a/mkrtos_user/server/init/src/test/test_main.c +++ b/mkrtos_user/server/init/src/test/test_main.c @@ -11,11 +11,12 @@ static void RunAllTests(void) CuSuiteAddSuite(suite, ulog_test_suite()); CuSuiteAddSuite(suite, printf_test_suite()); CuSuiteAddSuite(suite, vmm_test_suite()); - CuSuiteAddSuite(suite, malloc_test_suite()); + CuSuiteAddSuite(suite, sharem_mem_test_suite()); CuSuiteAddSuite(suite, map_test_suite()); CuSuiteAddSuite(suite, thread_base_test_suite()); - CuSuiteAddSuite(suite, sema_test_suite()); + + CuSuiteAddSuite(suite, malloc_test_suite()); CuSuiteAddSuite(suite, pthread_base_test_suite()); CuSuiteAddSuite(suite, pthread_press_test_suite()); CuSuiteAddSuite(suite, pthread_lock_test_suite()); diff --git a/mkrtos_user/server/net/src/ethernetif.c b/mkrtos_user/server/net/src/ethernetif.c index e8e0ea90c..b07b3afac 100644 --- a/mkrtos_user/server/net/src/ethernetif.c +++ b/mkrtos_user/server/net/src/ethernetif.c @@ -44,9 +44,12 @@ static err_t low_level_init(struct netif *netif) msg_tag_t tag; send_shm_hd = handler_alloc(); assert(send_shm_hd != HANDLER_INVALID); - tag = facotry_create_share_mem(FACTORY_PROT, vpage_create_raw3(KOBJ_ALL_RIGHTS, 0, send_shm_hd), 2048); + tag = facotry_create_share_mem(FACTORY_PROT, + vpage_create_raw3(KOBJ_ALL_RIGHTS, 0, send_shm_hd), SHARE_MEM_CNT_BUDDY_CNT, + 2048); assert(msg_tag_get_prot(tag) >= 0); - tag = share_mem_map(send_shm_hd, 3, &send_shm_addr, &send_shm_size); + tag = share_mem_map(send_shm_hd, vma_addr_create(VPAGE_PROT_RW, VMA_ADDR_RESV, 0), + &send_shm_addr, &send_shm_size); assert(msg_tag_get_prot(tag) >= 0); return ERR_OK; diff --git a/mkrtos_user/server/net/src/main.c b/mkrtos_user/server/net/src/main.c index e99d147a7..1c8ccef65 100644 --- a/mkrtos_user/server/net/src/main.c +++ b/mkrtos_user/server/net/src/main.c @@ -38,9 +38,10 @@ int main(int args, char *argv[]) obj_handler_t shm_hd = handler_alloc(); assert(shm_hd != HANDLER_INVALID); - tag = facotry_create_share_mem(FACTORY_PROT, vpage_create_raw3(KOBJ_ALL_RIGHTS, 0, shm_hd), 2048); + tag = facotry_create_share_mem(FACTORY_PROT, vpage_create_raw3(KOBJ_ALL_RIGHTS, 0, shm_hd), + SHARE_MEM_CNT_BUDDY_CNT, 2048); assert(msg_tag_get_prot(tag) >= 0); - tag = share_mem_map(shm_hd, 3, &addr, &size); + tag = share_mem_map(shm_hd, vma_addr_create(VPAGE_PROT_RW, VMA_ADDR_RESV, 0), &addr, &size); assert(msg_tag_get_prot(tag) >= 0); while (1) diff --git a/mkrtos_user/server/test/src/share_mem_test.c b/mkrtos_user/server/test/src/share_mem_test.c deleted file mode 100644 index a7cacc12e..000000000 --- a/mkrtos_user/server/test/src/share_mem_test.c +++ /dev/null @@ -1,24 +0,0 @@ -#include "u_types.h" -#include "u_prot.h" -#include "u_factory.h" -#include "u_task.h" -#include "u_hd_man.h" -#include "u_share_mem.h" -#include -#include -#include -AUTO_CALL(102) -void sharea_mem_test(void) -{ - addr_t addr; - umword_t size; - obj_handler_t hd = handler_alloc(); - assert(hd != HANDLER_INVALID); - msg_tag_t tag = facotry_create_share_mem(FACTORY_PROT, vpage_create_raw3(KOBJ_ALL_RIGHTS, 0, hd), 1024); - assert(msg_tag_get_prot(tag) >= 0); - tag = share_mem_map(hd, 3, &addr, &size); - assert(msg_tag_get_prot(tag) >= 0); - memset((void *)addr, 0, size); - // share_mem_unmap(hd); - handler_free_umap(hd); -}