[vmm] move most users of arch_mmu_query directly to vaddr_to_paddr()
This commit is contained in:
@@ -110,7 +110,7 @@ static int do_boot(lkb_t *lkb, size_t len, const char **result)
|
||||
*result = "not enough memory";
|
||||
return -1;
|
||||
}
|
||||
arch_mmu_query((vaddr_t)buf, &buf_phys, NULL);
|
||||
buf_phys = vaddr_to_paddr(buf);
|
||||
LTRACEF("iobuffer %p (phys 0x%lx)\n", buf, buf_phys);
|
||||
|
||||
if (lkb_read(lkb, buf, len)) {
|
||||
@@ -367,7 +367,7 @@ int lkb_handle_command(lkb_t *lkb, const char *cmd, const char *arg, size_t len,
|
||||
}
|
||||
|
||||
/* translate to physical address */
|
||||
paddr_t pa = kvaddr_to_paddr(buf);
|
||||
paddr_t pa = vaddr_to_paddr(buf);
|
||||
if (pa == 0) {
|
||||
*result = "error allocating buffer";
|
||||
free(buf);
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
#include <lib/cbuf.h>
|
||||
#include <app/lkboot.h>
|
||||
#include <arch/arm/dcc.h>
|
||||
#include <arch/mmu.h>
|
||||
#include <kernel/vm.h>
|
||||
#include <kernel/mutex.h>
|
||||
|
||||
#include "pdcc.h"
|
||||
@@ -239,20 +239,20 @@ void lkboot_dcc_init(void)
|
||||
|
||||
buffer_desc.version = PDCC_VERSION;
|
||||
|
||||
err = arch_mmu_query((vaddr_t)htod_buffer, &pa, NULL);
|
||||
DEBUG_ASSERT(err == NO_ERROR);
|
||||
pa = vaddr_to_paddr(htod_buffer);
|
||||
DEBUG_ASSERT(pa);
|
||||
|
||||
buffer_desc.htod_buffer_phys = pa;
|
||||
buffer_desc.htod_buffer_len = DCC_BUFLEN;
|
||||
|
||||
err = arch_mmu_query((vaddr_t)dtoh_buffer, &pa, NULL);
|
||||
DEBUG_ASSERT(err == NO_ERROR);
|
||||
pa = vaddr_to_paddr(dtoh_buffer);
|
||||
DEBUG_ASSERT(pa);
|
||||
|
||||
buffer_desc.dtoh_buffer_phys = pa;
|
||||
buffer_desc.dtoh_buffer_len = DCC_BUFLEN;
|
||||
|
||||
err = arch_mmu_query((vaddr_t)&buffer_desc, &buffer_desc_phys, NULL);
|
||||
DEBUG_ASSERT(err == NO_ERROR);
|
||||
buffer_desc_phys = vaddr_to_paddr(&buffer_desc);
|
||||
DEBUG_ASSERT(buffer_desc_phys);
|
||||
|
||||
arch_clean_cache_range((vaddr_t)&buffer_desc, sizeof(buffer_desc));
|
||||
}
|
||||
|
||||
@@ -201,7 +201,7 @@ usage:
|
||||
}
|
||||
|
||||
paddr_t pa;
|
||||
arch_mmu_query((vaddr_t)ptr, &pa, 0);
|
||||
pa = vaddr_to_paddr(ptr);
|
||||
printf("physical address 0x%lx\n", pa);
|
||||
#else
|
||||
/* allocate from the heap */
|
||||
|
||||
@@ -136,7 +136,7 @@ static void zynq_common_target_init(uint level)
|
||||
/* we have a fpga image */
|
||||
|
||||
/* lookup the physical address of the bitfile */
|
||||
paddr_t pa = kvaddr_to_paddr((void *)fpga_ptr);
|
||||
paddr_t pa = vaddr_to_paddr((void *)fpga_ptr);
|
||||
if (pa != 0) {
|
||||
/* program the fpga with it*/
|
||||
printf("loading fpga image at %p (phys 0x%lx), len %zx\n", fpga_ptr, pa, fpga_len);
|
||||
|
||||
@@ -358,7 +358,7 @@ void arch_chain_load(void *entry, ulong arg0, ulong arg1, ulong arg2, ulong arg3
|
||||
&arm_chain_load, loader_pa, loader_pa_section);
|
||||
|
||||
/* using large pages, map around the target location */
|
||||
arch_mmu_map(loader_pa_section, loader_pa_section, (2 * SECTION_SIZE / PAGE_SIZE), 0);
|
||||
arch_mmu_map(NULL, loader_pa_section, loader_pa_section, (2 * SECTION_SIZE / PAGE_SIZE), 0);
|
||||
#else
|
||||
/* for non vm case, just branch directly into it */
|
||||
entry_pa = (paddr_t)entry;
|
||||
|
||||
@@ -93,7 +93,6 @@ void arch_context_switch(thread_t *oldthread, thread_t *newthread)
|
||||
#endif
|
||||
|
||||
arm_context_switch(&oldthread->arch.sp, newthread->arch.sp);
|
||||
|
||||
}
|
||||
|
||||
void arch_dump_thread(thread_t *t)
|
||||
|
||||
@@ -112,14 +112,14 @@ status_t virtio_block_init(struct virtio_device *dev, uint32_t host_features)
|
||||
|
||||
bdev->blk_req = memalign(sizeof(struct virtio_blk_req), sizeof(struct virtio_blk_req));
|
||||
#if WITH_KERNEL_VM
|
||||
arch_mmu_query((vaddr_t)bdev->blk_req, &bdev->blk_req_phys, NULL);
|
||||
bdev->blk_req_phys = vaddr_to_paddr(bdev->blk_req);
|
||||
#else
|
||||
bdev->blk_freq_phys = (uint64_t)(uintptr_t)bdev->blk_req;
|
||||
#endif
|
||||
LTRACEF("blk_req structure at %p (0x%lx phys)\n", bdev->blk_req, bdev->blk_req_phys);
|
||||
|
||||
#if WITH_KERNEL_VM
|
||||
arch_mmu_query((vaddr_t)&bdev->blk_response, &bdev->blk_response_phys, NULL);
|
||||
bdev->blk_response_phys = vaddr_to_paddr(&bdev->blk_response);
|
||||
#else
|
||||
bdev->blk_response_phys = (uint64_t)(uintptr_t)&bdev->blk_response;
|
||||
#endif
|
||||
@@ -237,7 +237,7 @@ ssize_t virtio_block_read_write(struct virtio_device *dev, void *buf, off_t offs
|
||||
desc = virtio_desc_index_to_desc(dev, 0, desc->next);
|
||||
#if WITH_KERNEL_VM
|
||||
/* translate the first buffer */
|
||||
arch_mmu_query(va, &pa, NULL);
|
||||
pa = vaddr_to_paddr((void *)va);
|
||||
desc->addr = (uint64_t)pa;
|
||||
/* desc->len is filled in below */
|
||||
#else
|
||||
@@ -259,7 +259,7 @@ ssize_t virtio_block_read_write(struct virtio_device *dev, void *buf, off_t offs
|
||||
|
||||
/* translate the next page in the buffer */
|
||||
va = PAGE_ALIGN(va + 1);
|
||||
arch_mmu_query(va, &pa, NULL);
|
||||
pa = vaddr_to_paddr((void *)va);
|
||||
LTRACEF("va now 0x%lx, pa 0x%lx, next_pa 0x%lx, remaining len %zu\n", va, pa, next_pa, len);
|
||||
|
||||
/* is the new translated physical address contiguous to the last one? */
|
||||
|
||||
@@ -228,7 +228,7 @@ static status_t attach_backing(struct virtio_gpu_dev *gdev, uint32_t resource_id
|
||||
req.req.nr_entries = 1;
|
||||
|
||||
paddr_t pa;
|
||||
arch_mmu_query((vaddr_t)ptr, &pa, NULL);
|
||||
pa = vaddr_to_paddr(ptr);
|
||||
req.mem.addr = pa;
|
||||
req.mem.length = buf_len;
|
||||
|
||||
@@ -445,7 +445,7 @@ status_t virtio_gpu_init(struct virtio_device *dev, uint32_t host_features)
|
||||
/* allocate memory for a gpu request */
|
||||
#if WITH_KERNEL_VM
|
||||
gdev->gpu_request = pmm_alloc_kpage();
|
||||
gdev->gpu_request_phys = kvaddr_to_paddr(gdev->gpu_request);
|
||||
gdev->gpu_request_phys = vaddr_to_paddr(gdev->gpu_request);
|
||||
#else
|
||||
gdev->gpu_request = malloc(sizeof(struct virtio_gpu_resp_display_info)); // XXX get size better
|
||||
gdev->gpu_request_phys = (paddr_t)gdev->gpu_request;
|
||||
|
||||
@@ -349,8 +349,8 @@ status_t virtio_alloc_ring(struct virtio_device *dev, uint index, uint16_t len)
|
||||
|
||||
/* compute the physical address */
|
||||
paddr_t pa;
|
||||
err = arch_mmu_query((vaddr_t)vptr, &pa, NULL);
|
||||
if (err < 0) {
|
||||
pa = vaddr_to_paddr(vptr);
|
||||
if (pa == 0) {
|
||||
return ERR_NO_MEMORY;
|
||||
}
|
||||
|
||||
|
||||
@@ -181,7 +181,7 @@ size_t pmm_free_kpages(void *ptr, uint count);
|
||||
void *paddr_to_kvaddr(paddr_t pa);
|
||||
|
||||
/* virtual to physical */
|
||||
paddr_t kvaddr_to_paddr(void *va);
|
||||
paddr_t vaddr_to_paddr(void *va);
|
||||
|
||||
/* virtual allocator */
|
||||
typedef struct vmm_aspace {
|
||||
|
||||
@@ -272,7 +272,7 @@ size_t pmm_free_kpages(void *_ptr, uint count)
|
||||
list_initialize(&list);
|
||||
|
||||
while (count > 0) {
|
||||
vm_page_t *p = address_to_page(kvaddr_to_paddr(ptr));
|
||||
vm_page_t *p = address_to_page(vaddr_to_paddr(ptr));
|
||||
if (p) {
|
||||
list_add_tail(&list, &p->node);
|
||||
}
|
||||
|
||||
@@ -37,7 +37,6 @@
|
||||
|
||||
#if WITH_KERNEL_VM
|
||||
#include <kernel/vm.h>
|
||||
#include <arch/mmu.h>
|
||||
#endif
|
||||
|
||||
static int cmd_display_mem(int argc, const cmd_args *argv);
|
||||
@@ -123,7 +122,7 @@ static int cmd_display_mem(int argc, const cmd_args *argv)
|
||||
|
||||
#if WITH_KERNEL_VM
|
||||
/* preflight the start address to see if it's mapped */
|
||||
if (arch_mmu_query((vaddr_t)address, NULL, NULL) < 0) {
|
||||
if (vaddr_to_paddr((void *)address) == 0) {
|
||||
printf("ERROR: address 0x%lx is unmapped\n", address);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -110,7 +110,7 @@ void pktbuf_add_buffer(pktbuf_t *p, u8 *buf, u32 len, uint32_t header_sz, uint32
|
||||
* stick with the address as presented to us.
|
||||
*/
|
||||
#if WITH_KERNEL_VM
|
||||
p->phys_base = kvaddr_to_paddr(buf) | (uintptr_t) buf % PAGE_SIZE;
|
||||
p->phys_base = vaddr_to_paddr(buf) | (uintptr_t) buf % PAGE_SIZE;
|
||||
#else
|
||||
p->phys_base = (uintptr_t) buf;
|
||||
#endif
|
||||
|
||||
@@ -474,7 +474,7 @@ status_t gem_init(uintptr_t gem_base)
|
||||
sizeof(*gem.descs), &descs_vaddr, 0, 0, ARCH_MMU_FLAG_UNCACHED_DEVICE)) < 0) {
|
||||
return ret;
|
||||
}
|
||||
descs_paddr = kvaddr_to_paddr((void *)descs_vaddr);
|
||||
descs_paddr = vaddr_to_paddr((void *)descs_vaddr);
|
||||
|
||||
/* tx/rx descriptor tables and memory mapped registers */
|
||||
gem.descs = (void *)descs_vaddr;
|
||||
|
||||
Reference in New Issue
Block a user