9 Commits

Author SHA1 Message Date
Travis Geiselbrecht
f4d32db4a2 WIP fat test tweaks, do-qemuarm tweaks 2022-09-06 23:19:30 -07:00
Travis Geiselbrecht
a2572e586f WIP fat extend file, write zeros to new clusters 2022-09-06 23:19:30 -07:00
Travis Geiselbrecht
db19c46914 WIP more work on fat 2022-09-05 21:54:36 -07:00
Travis Geiselbrecht
7a9691e826 [lib][bcache] add RAII utility class to hold a cache block 2022-09-05 21:54:36 -07:00
Travis Geiselbrecht
bb635f3a3b WIP fat resize 2022-09-05 21:54:36 -07:00
Travis Geiselbrecht
229c6557b3 [fs][fat] extend test cases to include simple file creation 2022-09-05 21:54:36 -07:00
Travis Geiselbrecht
afa659732e [fs][fat] Implement first implementation of file create
Limitations:
Only supports simple 8.3 file names
Cannot create with size > 0
Timestamp is bogus
2022-09-05 21:54:36 -07:00
Travis Geiselbrecht
1c58670d46 [lib][s][shell] fix bug in mkfile shell command 2022-09-05 21:54:36 -07:00
Travis Geiselbrecht
0ae7244eff [dev][virtio] add a partition scan once a block device is found
Move the irq registration of the virtio drivers into their init hooks so
the driver is fully working at partition scan time.

TODO: think of a more integrated solution for this. Triggering from the
bio layer, for example.
2022-09-05 19:45:37 -07:00
23 changed files with 1047 additions and 203 deletions

View File

@@ -7,7 +7,7 @@ MODULE_SRCS += \
MODULE_DEPS += \
dev/virtio \
lib/bio
lib/bio \
lib/partition
include make/module.mk

View File

@@ -18,7 +18,9 @@
#include <kernel/event.h>
#include <kernel/mutex.h>
#include <lib/bio.h>
#include <lib/partition.h>
#include <inttypes.h>
#include <platform/interrupts.h>
#if WITH_KERNEL_VM
#include <kernel/vm.h>
@@ -187,6 +189,7 @@ status_t virtio_block_init(struct virtio_device *dev, uint32_t host_features) {
/* set our irq handler */
dev->irq_driver_callback = &virtio_block_irq_driver_callback;
unmask_interrupt(dev->irq);
/* set DRIVER_OK */
virtio_status_driver_ok(dev);
@@ -229,6 +232,9 @@ status_t virtio_block_init(struct virtio_device *dev, uint32_t host_features) {
printf("\twrite zeroes: max sectors %u max sequence %u may unmap %u\n", config->max_write_zeroes_sectors, config->max_write_zeroes_seq, config->write_zeros_may_unmap);
}
/* tell the partition layer to scan and find any subdevices */
partition_publish(buf, 0);
return NO_ERROR;
}

View File

@@ -19,6 +19,7 @@
#include <kernel/event.h>
#include <kernel/mutex.h>
#include <dev/display.h>
#include <platform/interrupts.h>
#if WITH_KERNEL_VM
#include <kernel/vm.h>
@@ -454,6 +455,7 @@ status_t virtio_gpu_init(struct virtio_device *dev, uint32_t host_features) {
/* set our irq handler */
dev->irq_driver_callback = &virtio_gpu_irq_driver_callback;
dev->config_change_callback = &virtio_gpu_config_change_callback;
unmask_interrupt(dev->irq);
/* set DRIVER_OK */
virtio_status_driver_ok(dev);

View File

@@ -20,6 +20,7 @@
#include <kernel/spinlock.h>
#include <lib/pktbuf.h>
#include <lib/minip.h>
#include <platform/interrupts.h>
#define LOCAL_TRACE 0
@@ -150,6 +151,7 @@ status_t virtio_net_init(struct virtio_device *dev, uint32_t host_features) {
/* set our irq handler */
dev->irq_driver_callback = &virtio_net_irq_driver_callback;
unmask_interrupt(dev->irq);
/* set DRIVER_OK */
virtio_status_driver_ok(dev);

View File

@@ -160,9 +160,6 @@ int virtio_mmio_detect(void *ptr, uint count, const uint irqs[], size_t stride)
if (err >= 0) {
// good device
dev->valid = true;
if (dev->irq_driver_callback)
unmask_interrupt(dev->irq);
}
}
#endif // WITH_DEV_VIRTIO_BLOCK
@@ -177,9 +174,6 @@ int virtio_mmio_detect(void *ptr, uint count, const uint irqs[], size_t stride)
if (err >= 0) {
// good device
dev->valid = true;
if (dev->irq_driver_callback)
unmask_interrupt(dev->irq);
}
}
#endif // WITH_DEV_VIRTIO_NET
@@ -195,9 +189,6 @@ int virtio_mmio_detect(void *ptr, uint count, const uint irqs[], size_t stride)
// good device
dev->valid = true;
if (dev->irq_driver_callback)
unmask_interrupt(dev->irq);
virtio_gpu_start(dev);
}
}

View File

@@ -148,6 +148,9 @@ static struct bcache_block *alloc_block(struct bcache *cache) {
LTRACEF("looking at %p, num %u\n", block, block->blocknum);
if (block->ref_count == 0) {
if (block->is_dirty) {
/* if the oldest block in the list that's available is dirty, write
* it back first.
*/
err = flush_block(cache, block);
if (err)
return NULL;

View File

@@ -0,0 +1,97 @@
/*
* Copyright (c) 2022 Travis Geiselbrecht
*
* Use of this source code is governed by a MIT-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/
#pragma once
#include <lib/bcache.h>
#include <lk/cpp.h>
#include <lk/err.h>
// C++ helper routine to hold a reference to a block in the block cache,
// mostly for RAII purposes.
class bcache_block_ref {
public:
bcache_block_ref(bcache_t cache) : cache_(cache) {}
~bcache_block_ref() {
close();
}
// move constructor
bcache_block_ref(bcache_block_ref &&other) : bcache_block_ref(other.cache_) {
// TODO: replace with equivalent to std::swap when/if implemented
ptr_ = other.ptr_;
block_num_ = other.block_num_;
other.cache_ = {};
other.ptr_ = {};
other.block_num_ = {};
}
// move copy constructor
bcache_block_ref &operator=(bcache_block_ref &&other) {
cache_ = other.cache_;
ptr_ = other.ptr_;
block_num_ = other.block_num_;
other.cache_ = {};
other.ptr_ = {};
other.block_num_ = {};
return *this;
}
DISALLOW_COPY_AND_ASSIGN_ALLOW_MOVE(bcache_block_ref);
// close out the current block
int close() {
int err = 0;
if (ptr_) {
err = bcache_put_block(cache_, block_num_);
ptr_ = nullptr;
}
return err;
}
// get a new block
int get_block(uint block) {
// if it's already open just return it
if (ptr_ && block_num_ == block) {
return NO_ERROR;
}
// close out the existing block
int err = close();
if (err != 0) {
return err;
}
// open the new one
void *newptr;
err = bcache_get_block(cache_, &newptr, block);
if (err < 0) {
return err;
}
ptr_ = newptr;
block_num_ = block;
return err;
}
void mark_dirty() {
if (ptr_) {
bcache_mark_block_dirty(cache_, block_num_);
}
}
bool is_valid() const { return ptr_; }
// return the pointer to the block
const void *ptr() const { return ptr_; }
void *ptr() { return ptr_; }
private:
bcache_t cache_ {};
void *ptr_ {};
uint block_num_;
};

View File

@@ -544,7 +544,7 @@ void bio_dump_devices(void) {
entry->name, entry->total_size, entry->block_size, entry->ref);
if (!entry->geometry_count || !entry->geometry) {
printf(" (no erase geometry)\n");
printf(" (no erase geometry)");
} else {
for (size_t i = 0; i < entry->geometry_count; ++i) {
const bio_erase_geometry_info_t *geo = entry->geometry + i;

View File

@@ -11,11 +11,13 @@
#include <lk/cpp.h>
#include <lk/err.h>
#include <lk/trace.h>
#include <ctype.h>
#include <endian.h>
#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <lib/bcache/bcache_block_ref.h>
#include "fat_fs.h"
#include "fat_priv.h"
@@ -23,6 +25,8 @@
#define LOCAL_TRACE FAT_GLOBAL_TRACE(0)
fat_dir::fat_dir(fat_fs *f) : fat_file(f) {}
fat_dir::~fat_dir() = default;
// structure that represents an open dir handle. holds the offset into the directory
@@ -37,13 +41,11 @@ struct fat_dir_cookie {
static const uint32_t index_eod = 0xffffffff;
};
namespace {
// walk one entry into the dir, starting at byte offset into the directory block iterator.
// both dbi and offset will be modified during the call.
// filles out the entry and returns a pointer into the passed in buffer in out_filename.
// NOTE: *must* pass at least a MAX_FILE_NAME_LEN byte char pointer in the filename_buffer slot.
status_t fat_find_next_entry(fat_fs *fat, file_block_iterator &dbi, uint32_t &offset, dir_entry *entry,
static status_t fat_find_next_entry(fat_fs *fat, file_block_iterator &dbi, uint32_t &offset, dir_entry *entry,
char filename_buffer[MAX_FILE_NAME_LEN], char **out_filename) {
DEBUG_ASSERT(entry && filename_buffer && out_filename);
@@ -69,7 +71,7 @@ status_t fat_find_next_entry(fat_fs *fat, file_block_iterator &dbi, uint32_t &of
// walk within a sector
while (offset < fat->info().bytes_per_sector) {
LTRACEF_LEVEL(2, "looking at offset %u\n", offset);
LTRACEF_LEVEL(2, "looking at offset %#x\n", offset);
const uint8_t *ent = dbi.get_bcache_ptr(offset);
if (ent[0] == 0) { // no more entries
// we're completely done
@@ -206,10 +208,11 @@ status_t fat_find_next_entry(fat_fs *fat, file_block_iterator &dbi, uint32_t &of
return ERR_NOT_FOUND;
}
status_t fat_find_file_in_dir(fat_fs *fat, uint32_t starting_cluster, const char *name, dir_entry *entry, uint32_t *found_offset) {
static status_t fat_find_file_in_dir(fat_fs *fat, uint32_t starting_cluster, const char *name, dir_entry *entry, uint32_t *found_offset) {
LTRACEF("start_cluster %u, name '%s', out entry %p\n", starting_cluster, name, entry);
DEBUG_ASSERT(fat->lock.is_held());
DEBUG_ASSERT(entry);
// cache the length of the string we're matching against
const size_t namelen = strlen(name);
@@ -237,15 +240,15 @@ status_t fat_find_file_in_dir(fat_fs *fat, uint32_t starting_cluster, const char
// see if we've matched an entry
if (filenamelen == namelen && !strnicmp(name, filename, filenamelen)) {
// we have, return with a good status
*found_offset = offset;
if (found_offset) {
*found_offset = offset;
}
return NO_ERROR;
}
}
}
} // namespace
status_t fat_walk(fat_fs *fat, const char *path, dir_entry *out_entry, dir_entry_location *loc) {
status_t fat_dir_walk(fat_fs *fat, const char *path, dir_entry *out_entry, dir_entry_location *loc) {
LTRACEF("path %s\n", path);
DEBUG_ASSERT(fat->lock.is_held());
@@ -321,14 +324,290 @@ status_t fat_walk(fat_fs *fat, const char *path, dir_entry *out_entry, dir_entry
}
} else {
// we got a hit at the terminal entry of the path, pass it out to the caller as a success
*out_entry = entry;
loc->starting_dir_cluster = dir_start_cluster;
loc->dir_offset = found_offset;
if (out_entry) {
*out_entry = entry;
}
if (loc) {
loc->starting_dir_cluster = dir_start_cluster;
loc->dir_offset = found_offset;
}
return NO_ERROR;
}
}
}
// splits a path into the part of it leading up to the last element and the last element
// if the leading part is zero length, return a single "/" element
// will modify string passed in
// TODO: write unit test
static void split_path(char *path, const char **leading_path, const char **last_element) {
char *last_slash = strrchr(path, '/');
if (last_slash) {
*last_slash = 0;
if (path[0] != 0) {
*leading_path = path;
} else {
*leading_path = "/";
}
*last_element = last_slash + 1;
} else {
*leading_path = "/";
*last_element = path;
}
}
// construct a short file name from the incoming name
// the sfn is padded out with spaces the same way a real FAT entry is
// TODO: write unit test
static status_t name_to_short_file_name(char sfn[8 + 3 + 1], const char *name) {
// zero length inputs don't fly
if (name[0] == 0) {
return ERR_INVALID_ARGS;
}
// start off with a spaced out sfn
memset(sfn, ' ', 8 + 3);
sfn[8 + 3] = 0;
size_t input_pos = 0;
size_t output_pos = 0;
// pick out the 8 entry part
for (auto i = 0; i < 8; i++) {
char c = name[input_pos];
if (c == 0) {
break;
} else if (c == '.') {
output_pos = 8;
break;
} else {
sfn[output_pos++] = toupper(c);
input_pos++;
}
}
// at this point input pos had better be looking at a . or a null
if (name[input_pos] == 0) {
return NO_ERROR;
}
if (name[input_pos] != '.') {
return ERR_INVALID_ARGS;
}
input_pos++;
for (auto i = 0; i < 3; i++) {
char c = name[input_pos];
if (c == 0) {
break;
} else if (c == '.') {
// can only see '.' once
return ERR_INVALID_ARGS;
} else {
sfn[output_pos++] = toupper(c);
input_pos++;
}
}
// at this point we should be looking at the end of the input string
if (name[input_pos] != 0) {
return ERR_INVALID_ARGS;
}
return NO_ERROR;
}
status_t fat_dir_allocate(fat_fs *fat, const char *path, const fat_attribute attr, const uint32_t starting_cluster, const uint32_t size, dir_entry_location *loc) {
LTRACEF("path %s\n", path);
DEBUG_ASSERT(fat->lock.is_held());
// trim the last segment off the path, splitting into stuff leading up to the last segment and the last segment
char local_path[FS_MAX_FILE_LEN + 1];
strlcpy(local_path, path, FS_MAX_FILE_LEN);
const char *leading_path;
const char *last_element;
split_path(local_path, &leading_path, &last_element);
DEBUG_ASSERT(leading_path && last_element);
LTRACEF("path is now split into %s and %s\n", leading_path, last_element);
// find the starting directory cluster of the container directory
// 0 may mean root dir on fat12/16
uint32_t starting_dir_cluster;
if (strcmp(leading_path, "/") == 0) {
// root dir is a special case since we know where to start
if (fat->info().root_cluster) {
starting_dir_cluster = fat->info().root_cluster;
} else {
// fat 12/16 has a linear root dir, cluster 0 is a special case to fat_find_file_in_dir below
starting_dir_cluster = 0;
}
} else {
// walk to find the containing directory
dir_entry entry;
dir_entry_location dir_loc;
status_t err = fat_dir_walk(fat, local_path, &entry, &dir_loc);
if (err < 0) {
return err;
}
// verify it's a directory
if (entry.attributes != fat_attribute::directory) {
return ERR_BAD_PATH;
}
LTRACEF("found containing dir at %u:%u: starting cluster %u\n", dir_loc.starting_dir_cluster, dir_loc.dir_offset, entry.start_cluster);
starting_dir_cluster = entry.start_cluster;
if (starting_dir_cluster < 2 || starting_dir_cluster >= fat->info().total_clusters) {
TRACEF("directory entry contains out of bounds cluster %u\n", starting_dir_cluster);
return ERR_BAD_STATE;
}
}
LTRACEF("starting dir cluster of parent dir %u\n", starting_dir_cluster);
// verify the file doesn't already exist
dir_entry entry;
status_t err = fat_find_file_in_dir(fat, starting_dir_cluster, last_element, &entry, nullptr);
if (err >= 0) {
// we found it, cant create a new file in its place
return ERR_ALREADY_EXISTS;
}
// TODO: handle long file names
char sfn[8 + 3 + 1];
err = name_to_short_file_name(sfn, last_element);
if (err < 0) {
// if we couldn't convert to a SFN trivially, abort
return err;
}
LTRACEF("short file name '%s'\n", sfn);
// now we have a starting cluster for the containing directory and proof that it doesn't already exist.
// start walking to find a free slot
file_block_iterator dbi(fat, starting_dir_cluster);
err = dbi.next_sectors(0);
if (err < 0) {
return err;
}
uint32_t dir_offset = 0;
uint32_t sector_offset = 0;
for (;;) {
if (LOCAL_TRACE >= 2) {
LTRACEF("dir sector:\n");
hexdump8_ex(dbi.get_bcache_ptr(0), fat->info().bytes_per_sector, 0);
}
// walk within a sector
while (sector_offset < fat->info().bytes_per_sector) {
LTRACEF_LEVEL(2, "looking at offset %#x\n", sector_offset);
uint8_t *ent = dbi.get_bcache_ptr(sector_offset);
if (ent[0] == 0xe5 || ent[0] == 0) {
// deleted or last entry in the list
LTRACEF("found usable at offset %#x\n", sector_offset);
if (LOCAL_TRACE > 1) hexdump8_ex(ent, DIR_ENTRY_LENGTH, 0);
// fill in an entry here
memcpy(&ent[0], sfn, 11); // name
ent[11] = (uint8_t)attr; // attribute
ent[12] = 0; // reserved
ent[13] = 0; // creation time tenth of second
fat_write16(ent, 14, 0); // creation time seconds / 2
fat_write16(ent, 16, 0); // creation date
fat_write16(ent, 18, 0); // last accessed date
fat_write16(ent, 20, starting_cluster >> 16); // fat cluster high
fat_write16(ent, 22, 0); // modification time
fat_write16(ent, 24, 0); // modification date
fat_write16(ent, 26, starting_cluster); // fat cluster low
fat_write32(ent, 28, size); // file size
LTRACEF_LEVEL(2, "filled in entry\n");
if (LOCAL_TRACE > 1) hexdump8_ex(ent, DIR_ENTRY_LENGTH, 0);
// flush the data and exit
dbi.mark_bcache_dirty();
// flush it
bcache_flush(fat->bcache());
// fill in our location data and exit
if (loc) {
loc->starting_dir_cluster = starting_dir_cluster;
loc->dir_offset = dir_offset;
}
return NO_ERROR;
}
dir_offset += DIR_ENTRY_LENGTH;
sector_offset += DIR_ENTRY_LENGTH;
}
// move to the next sector
err = dbi.next_sector();
if (err < 0) {
return err;
}
// starting over at offset 0 in the new sector
sector_offset = 0;
}
// TODO: we probably ran out of space, add another cluster to the dir and start over
return ERR_NOT_IMPLEMENTED;
}
// given a dir entry location, open the corresponding sector and pass back a open pointer
// into the block cache.
// this code encapsulates the logic that takes into account that cluster 0 is magic in
// fat 12 and fat 16 for the root dir.
static bcache_block_ref open_dirent_block(fat_fs *fat, const dir_entry_location &loc) {
LTRACEF("fat %p, loc %u:%u\n", fat, loc.starting_dir_cluster, loc.dir_offset);
// find the dir entry and open the block
uint32_t sector;
if (loc.starting_dir_cluster == 0) {
DEBUG_ASSERT(fat->info().fat_bits == 12 || fat->info().fat_bits == 16);
// special case on fat12/16 to represent the root dir.
// load 0 into cluster and use sector_offset as relative to the
// start of the volume.
sector = fat->info().root_start_sector;
} else {
sector = fat_sector_for_cluster(fat, loc.starting_dir_cluster);
}
sector += loc.dir_offset / fat->info().bytes_per_sector;
bcache_block_ref bref(fat->bcache());
bref.get_block(sector);
return bref;
}
// update the starting cluster and/or size pointer in a directory entry
status_t fat_dir_update_entry(fat_fs *fat, const dir_entry_location &loc, uint32_t starting_cluster, uint32_t size) {
LTRACEF("fat %p, loc %u:%u, cluster %u, size %u\n", fat, loc.starting_dir_cluster, loc.dir_offset, starting_cluster, size);
bcache_block_ref bref = open_dirent_block(fat, loc);
DEBUG_ASSERT(bref.is_valid());
uint8_t *ent = (uint8_t *)bref.ptr();
ent += loc.dir_offset % fat->info().bytes_per_sector;
fat_write32(ent, 28, size); // file size
fat_write16(ent, 20, starting_cluster >> 16); // fat cluster high
fat_write16(ent, 26, starting_cluster); // fat cluster low
bref.mark_dirty();
return NO_ERROR;
}
status_t fat_dir::opendir_priv(const dir_entry &entry, const dir_entry_location &loc, fat_dir_cookie **out_cookie) {
// fill in our file info based on the entry
start_cluster_ = entry.start_cluster;
@@ -376,7 +655,7 @@ status_t fat_dir::opendir(fscookie *cookie, const char *name, dircookie **dcooki
loc.starting_dir_cluster = 1;
loc.dir_offset = 0;
} else {
status_t err = fat_walk(fat, name, &entry, &loc);
status_t err = fat_dir_walk(fat, name, &entry, &loc);
if (err != NO_ERROR) {
return err;
}

View File

@@ -21,7 +21,7 @@ struct fat_dir_cookie;
// at any point in time,
class fat_dir : public fat_file {
public:
explicit fat_dir(fat_fs *f) : fat_file(f) {}
explicit fat_dir(fat_fs *f);
virtual ~fat_dir();
static status_t opendir(fscookie *cookie, const char *name, dircookie **dcookie);

View File

@@ -7,18 +7,24 @@
* https://opensource.org/licenses/MIT
*/
#include <lk/err.h>
#include <lk/trace.h>
#include <endian.h>
#include <stdint.h>
#include <stdio.h>
#include <lib/bcache/bcache_block_ref.h>
#include "fat_fs.h"
#include "fat_priv.h"
#define LOCAL_TRACE FAT_GLOBAL_TRACE(0)
#define LOCAL_TRACE FAT_GLOBAL_TRACE(1)
uint32_t fat_next_cluster_in_chain(fat_fs *fat, uint32_t cluster) {
DEBUG_ASSERT(fat->lock.is_held());
// given a cluster number, compute the sector and the offset within the sector where
// the fat entry exists.
static void compute_fat_entry_address(fat_fs *fat, const uint32_t cluster, uint32_t *sector, uint32_t *offset_within_sector) {
DEBUG_ASSERT(cluster < fat->info().total_clusters);
// TODO: take into account active fat
// offset in bytes into the FAT for this entry
uint32_t fat_offset;
@@ -29,15 +35,25 @@ uint32_t fat_next_cluster_in_chain(fat_fs *fat, uint32_t cluster) {
} else {
fat_offset = cluster + (cluster / 2);
}
LTRACEF("cluster %#x, fat_offset %u\n", cluster, fat_offset);
LTRACEF_LEVEL(2, "cluster %#x, fat_offset %u\n", cluster, fat_offset);
const uint32_t fat_sector = fat_offset / fat->info().bytes_per_sector;
uint32_t bnum = fat->info().reserved_sectors + fat_sector;
const uint32_t fat_offset_in_sector = fat_offset % fat->info().bytes_per_sector;
*sector = fat->info().reserved_sectors + fat_sector;
*offset_within_sector = fat_offset % fat->info().bytes_per_sector;
}
// return the next cluster # in a chain, given a starting cluster
uint32_t fat_next_cluster_in_chain(fat_fs *fat, uint32_t cluster) {
DEBUG_ASSERT(fat->lock.is_held());
// compute the starting address
uint32_t sector;
uint32_t fat_offset_in_sector;
compute_fat_entry_address(fat, cluster, &sector, &fat_offset_in_sector);
// grab a pointer to the sector holding the fat entry
void *cache_ptr;
int err = bcache_get_block(fat->bcache(), &cache_ptr, bnum);
bcache_block_ref bref(fat->bcache());
int err = bref.get_block(sector);
if (err < 0) {
printf("bcache_get_block returned: %i\n", err);
return EOF_CLUSTER;
@@ -45,7 +61,7 @@ uint32_t fat_next_cluster_in_chain(fat_fs *fat, uint32_t cluster) {
uint32_t next_cluster;
if (fat->info().fat_bits == 32) {
const auto *table = (const uint32_t *)cache_ptr;
const auto *table = (const uint32_t *)bref.ptr();
const auto index = fat_offset_in_sector / 4;
next_cluster = table[index];
LE32SWAP(next_cluster);
@@ -53,7 +69,7 @@ uint32_t fat_next_cluster_in_chain(fat_fs *fat, uint32_t cluster) {
// mask out the top nibble
next_cluster &= 0x0fffffff;
} else if (fat->info().fat_bits == 16) {
const auto *table = (const uint16_t *)cache_ptr;
const auto *table = (const uint16_t *)bref.ptr();
const auto index = fat_offset_in_sector / 2;
next_cluster = table[index];
LE16SWAP(next_cluster);
@@ -68,24 +84,22 @@ uint32_t fat_next_cluster_in_chain(fat_fs *fat, uint32_t cluster) {
if (fat_offset_in_sector != (fat->info().bytes_per_sector - 1)) {
// normal, non sector straddling logic
next_cluster = fat_read16(cache_ptr, fat_offset_in_sector);
next_cluster = fat_read16(bref.ptr(), fat_offset_in_sector);
} else {
// need to straddle a fat sector
// read the first byte of the entry
next_cluster = ((const uint8_t *)cache_ptr)[fat_offset_in_sector];
next_cluster = ((const uint8_t *)bref.ptr())[fat_offset_in_sector];
// close the block cache and open the next sector
bcache_put_block(fat->bcache(), bnum);
bnum++;
err = bcache_get_block(fat->bcache(), &cache_ptr, bnum);
err = bref.get_block(++sector);
if (err < 0) {
printf("bcache_get_block returned: %i\n", err);
return EOF_CLUSTER;
}
// read the second byte
next_cluster |= ((const uint8_t *)cache_ptr)[0] << 8;
next_cluster |= ((const uint8_t *)bref.ptr())[0] << 8;
}
// odd cluster, shift over to get our value
@@ -101,40 +115,193 @@ uint32_t fat_next_cluster_in_chain(fat_fs *fat, uint32_t cluster) {
}
}
// return the sector to the block cache
bcache_put_block(fat->bcache(), bnum);
LTRACEF("returning cluster %#x\n", next_cluster);
return next_cluster;
}
// given a starting fat cluster, walk the fat chain for offset bytes, returning a new cluster or end of file
uint32_t file_offset_to_cluster(fat_fs *fat, uint32_t start_cluster, off_t offset) {
DEBUG_ASSERT(fat->lock.is_held());
uint32_t fat_find_last_cluster_in_chain(fat_fs *fat, uint32_t starting_cluster) {
LTRACEF("fat %p, starting cluster %u\n", fat, starting_cluster);
// negative offsets do not make sense
DEBUG_ASSERT(offset >= 0);
if (offset < 0) {
if (starting_cluster == 0) {
return 0;
}
uint32_t last_cluster = starting_cluster;
for (;;) {
uint32_t next = fat_next_cluster_in_chain(fat, last_cluster);
if (next == EOF_CLUSTER) {
return last_cluster;
}
last_cluster = next;
}
}
// write a value into a fat entry
static status_t fat_mark_entry(fat_fs *fat, uint32_t cluster, uint32_t val) {
LTRACEF("fat %p, cluster %u, val %#x\n", fat, cluster, val);
// compute the starting address
uint32_t sector;
uint32_t fat_offset_in_sector;
compute_fat_entry_address(fat, cluster, &sector, &fat_offset_in_sector);
// grab a pointer to the sector holding the fat entry
bcache_block_ref bref(fat->bcache());
int err = bref.get_block(sector);
if (err < 0) {
printf("bcache_get_block returned: %i\n", err);
return EOF_CLUSTER;
}
// starting at the start cluster, walk forward N clusters, based on how far
// the offset is units of cluster bytes
uint32_t found_cluster = start_cluster;
size_t clusters_to_walk = (size_t)offset / fat->info().bytes_per_cluster;
while (clusters_to_walk > 0) {
// walk foward these many clusters, returning the FAT entry at that spot
found_cluster = fat_next_cluster_in_chain(fat, found_cluster);
if (is_eof_cluster(found_cluster)) {
break;
}
clusters_to_walk--;
if (fat->info().fat_bits == 32) {
auto *table = (uint32_t *)bref.ptr();
const auto index = fat_offset_in_sector / 4;
table[index] = LE32(val);
} else if (fat->info().fat_bits == 16) {
auto *table = (uint16_t *)bref.ptr();
const auto index = fat_offset_in_sector / 2;
table[index] = LE16(val);
} else { // fat12
PANIC_UNIMPLEMENTED;
}
return found_cluster;
bref.mark_dirty();
return NO_ERROR;
}
// allocate a cluster chain
// start_cluster is an existing cluster that it should link to (or 0 if its the first in the chain)
// count is number of clusters to allocate
// first and last cluster are the first and lastly allocated in the new part of the list (may be the same)
status_t fat_allocate_cluster_chain(fat_fs *fat, uint32_t start_cluster, uint32_t count,
uint32_t *first_cluster, uint32_t *last_cluster,
bool zero_new_blocks) {
LTRACEF("fat %p, starting %u, count %u, zero %u\n", fat, start_cluster, count, zero_new_blocks);
DEBUG_ASSERT(fat->lock.is_held());
*first_cluster = *last_cluster = 0;
uint32_t prev_cluster = start_cluster;
// TODO: start search at start_cluster instead of the beginning
uint32_t search_cluster = 2; // first 2 clusters are reserved
// compute the starting address
uint32_t sector;
uint32_t fat_offset_in_sector;
compute_fat_entry_address(fat, search_cluster, &sector, &fat_offset_in_sector);
// grab a pointer to the sector holding the fat entry
bcache_block_ref bref(fat->bcache());
int err = bref.get_block(sector);
if (err < 0) {
printf("bcache_get_block returned: %i\n", err);
return EOF_CLUSTER;
}
// start walking forward until we have found up to count clusters or we run out of clusters
const auto total_clusters = fat->info().total_clusters;
while (count > 0) {
uint32_t entry;
if (fat->info().fat_bits == 32) {
const auto *table = (const uint32_t *)bref.ptr();
const auto index = fat_offset_in_sector / 4;
entry = table[index];
LE32SWAP(entry);
// mask out the top nibble
entry &= 0x0fffffff;
} else if (fat->info().fat_bits == 16) {
const auto *table = (const uint16_t *)bref.ptr();
const auto index = fat_offset_in_sector / 2;
entry = table[index];
} else { // fat12
PANIC_UNIMPLEMENTED;
}
LTRACEF_LEVEL(2, "search_cluster %u, sector %u, offset %u: entry %#x\n", search_cluster, sector, fat_offset_in_sector, entry);
if (entry == 0) {
// its a free entry, allocate it and move on
LTRACEF("found free cluster %u, sector %u, offset %u\n", search_cluster, sector, fat_offset_in_sector);
// zero the cluster first
if (zero_new_blocks) {
fat_zero_cluster(fat, search_cluster);
}
// add it to the chain
if (prev_cluster > 0) {
// link the last cluster we had found before to this one.
// NOTE: may be start_cluster if this is the first iteration
fat_mark_entry(fat, prev_cluster, search_cluster);
}
if (*first_cluster == 0) {
// this is the first one in the chain
*first_cluster = search_cluster;
}
*last_cluster = search_cluster;
prev_cluster = search_cluster;
count--;
if (count == 0) {
// we're at the end of this chain, mark it as EOF
fat_mark_entry(fat, search_cluster, EOF_CLUSTER);
// early terminate here, since there's no point pushing
// to the next sector
break;
}
}
// helper to move to the next sector, dropping old block cache and
// loading the next one.
auto inc_sector = [&bref, &sector]() -> status_t {
status_t localerr = bref.get_block(++sector);
if (localerr < 0) {
printf("bcache_get_block returned: %i\n", localerr);
return EOF_CLUSTER;
}
return NO_ERROR;
};
// next entry
search_cluster++;
if (search_cluster >= total_clusters) {
// no more clusters, abort
break;
}
if (fat->info().fat_bits == 32) {
fat_offset_in_sector += 4;
if (fat_offset_in_sector == fat->info().bytes_per_sector) {
fat_offset_in_sector = 0;
if ((err = inc_sector()) != NO_ERROR) {
return err;
}
}
} else if (fat->info().fat_bits == 16) {
fat_offset_in_sector += 2;
if (fat_offset_in_sector == fat->info().bytes_per_sector) {
fat_offset_in_sector = 0;
if ((err = inc_sector()) != NO_ERROR) {
return err;
}
}
} else { // fat12
PANIC_UNIMPLEMENTED;
}
}
if (count == 0) {
return NO_ERROR;
} else {
return ERR_NO_RESOURCES;
}
}
// return the disk sector that corresponds to a cluster number, with
// appropriate offsets applied
uint32_t fat_sector_for_cluster(fat_fs *fat, uint32_t cluster) {
DEBUG_ASSERT(fat->lock.is_held());
@@ -152,13 +319,45 @@ uint32_t fat_sector_for_cluster(fat_fs *fat, uint32_t cluster) {
return sector;
}
// read a cluster directly into a buffer, using the bcache
ssize_t fat_read_cluster(fat_fs *fat, void *buf, uint32_t cluster) {
DEBUG_ASSERT(fat->lock.is_held());
LTRACEF("buf %p, cluster %u\n", buf, cluster);
auto sector = fat_sector_for_cluster(fat, cluster);
return bio_read_block(fat->dev(), buf, sector, fat->info().sectors_per_cluster);
uint8_t *buf8 = (uint8_t *)buf;
for (size_t i = 0; i < fat->info().sectors_per_cluster; i++) {
status_t err = bcache_read_block(fat->bcache(), buf8, sector);
if (err < 0) {
return err;
}
buf8 += fat->info().bytes_per_sector;
sector++;
}
return NO_ERROR;
}
// zero a cluster, using the bcache
ssize_t fat_zero_cluster(fat_fs *fat, uint32_t cluster) {
DEBUG_ASSERT(fat->lock.is_held());
LTRACEF("cluster %u\n", cluster);
auto sector = fat_sector_for_cluster(fat, cluster);
for (size_t i = 0; i < fat->info().sectors_per_cluster; i++) {
status_t err = bcache_zero_block(fat->bcache(), sector);
if (err < 0) {
return err;
}
sector++;
}
return NO_ERROR;
}

View File

@@ -69,6 +69,7 @@ private:
};
enum class fat_attribute : uint8_t {
file = 0x0, // lack of attribute is a file
read_only = 0x01,
hidden = 0x02,
system = 0x04,
@@ -87,6 +88,15 @@ inline uint32_t fat_read32(const void *_buffer, size_t offset) {
(buffer[offset + 3] << 24);
}
inline void fat_write32(void *_buffer, size_t offset, uint32_t val) {
auto *buffer = (uint8_t *)_buffer;
buffer[offset] = val;
buffer[offset + 1] = val >> 8;
buffer[offset + 2] = val >> 16;
buffer[offset + 3] = val >> 24;
}
inline uint16_t fat_read16(const void *_buffer, size_t offset) {
auto *buffer = (const uint8_t *)_buffer;
@@ -94,6 +104,13 @@ inline uint16_t fat_read16(const void *_buffer, size_t offset) {
(buffer[offset + 1] << 8);
}
inline void fat_write16(void *_buffer, size_t offset, uint16_t val) {
auto *buffer = (uint8_t *)_buffer;
buffer[offset] = val;
buffer[offset + 1] = val >> 8;
}
// In fat32, clusters between 0x0fff.fff8 and 0x0fff.ffff are interpreted as
// end of file.
const uint32_t EOF_CLUSTER_BASE = 0x0ffffff8;

View File

@@ -23,11 +23,15 @@ typedef void *fsfilecookie;
/* file allocation table parsing */
uint32_t fat_next_cluster_in_chain(fat_fs *fat, uint32_t cluster);
uint32_t file_offset_to_cluster(fat_fs *fat, uint32_t start_cluster, off_t offset);
uint32_t fat_find_last_cluster_in_chain(fat_fs *fat, uint32_t starting_cluster);
status_t fat_allocate_cluster_chain(fat_fs *fat, uint32_t start_cluster, uint32_t count,
uint32_t *first_cluster, uint32_t *last_cluster,
bool zero_new_blocks);
/* general io routines */
uint32_t fat_sector_for_cluster(fat_fs *fat, uint32_t cluster);
ssize_t fat_read_cluster(fat_fs *fat, void *buf, uint32_t cluster);
ssize_t fat_zero_cluster(fat_fs *fat, uint32_t cluster);
// general directory apis outside of an object
struct dir_entry {
@@ -47,4 +51,11 @@ inline bool operator==(const dir_entry_location &a, const dir_entry_location &b)
return (a.starting_dir_cluster == b.starting_dir_cluster && a.dir_offset == b.dir_offset);
}
status_t fat_walk(fat_fs *fat, const char *path, dir_entry *out_entry, dir_entry_location *loc);
// walk a path, returning the entry and the location where it was found
status_t fat_dir_walk(fat_fs *fat, const char *path, dir_entry *out_entry, dir_entry_location *loc);
// walk a path, allocating a new entry with the path name.
// returns the dir entry location
status_t fat_dir_allocate(fat_fs *fat, const char *path, fat_attribute attr, uint32_t starting_cluster, uint32_t size, dir_entry_location *loc);
status_t fat_dir_update_entry(fat_fs *fat, const dir_entry_location &loc, uint32_t starting_cluster, uint32_t size);

View File

@@ -22,7 +22,7 @@
#include "file_iterator.h"
#define LOCAL_TRACE FAT_GLOBAL_TRACE(0)
#define LOCAL_TRACE FAT_GLOBAL_TRACE(1)
fat_file::fat_file(fat_fs *f) : fs_(f) {}
fat_file::~fat_file() = default;
@@ -78,6 +78,7 @@ status_t fat_file::open_file_priv(const dir_entry &entry, const dir_entry_locati
}
}
// static
status_t fat_file::open_file(fscookie *cookie, const char *path, filecookie **fcookie) {
fat_fs *fs = (fat_fs *)cookie;
@@ -88,7 +89,7 @@ status_t fat_file::open_file(fscookie *cookie, const char *path, filecookie **fc
// look for the file in the fs
dir_entry entry;
dir_entry_location loc;
status_t err = fat_walk(fs, path, &entry, &loc);
status_t err = fat_dir_walk(fs, path, &entry, &loc);
if (err != NO_ERROR) {
return err;
}
@@ -204,6 +205,7 @@ ssize_t fat_file::read_file_priv(void *_buf, const off_t offset, size_t len) {
return amount_read;
}
// static
ssize_t fat_file::read_file(filecookie *fcookie, void *_buf, const off_t offset, size_t len) {
fat_file *file = (fat_file *)fcookie;
@@ -213,11 +215,14 @@ ssize_t fat_file::read_file(filecookie *fcookie, void *_buf, const off_t offset,
status_t fat_file::stat_file_priv(struct file_stat *stat) {
AutoLock guard(fs_->lock);
LTRACEF("file %p state %p\n", this, stat);
stat->size = length_;
stat->is_dir = is_dir();
return NO_ERROR;
}
// static
status_t fat_file::stat_file(filecookie *fcookie, struct file_stat *stat) {
fat_file *file = (fat_file *)fcookie;
@@ -234,9 +239,12 @@ status_t fat_file::close_file_priv(bool *last_ref) {
return NO_ERROR;
}
// static
status_t fat_file::close_file(filecookie *fcookie) {
fat_file *file = (fat_file *)fcookie;
LTRACEF("file %p\n", file);
bool last_ref;
status_t err = file->close_file_priv(&last_ref);
if (err < 0) {
@@ -251,3 +259,125 @@ status_t fat_file::close_file(filecookie *fcookie) {
return NO_ERROR;
}
// static
status_t fat_file::create_file(fscookie *cookie, const char *path, filecookie **fcookie, uint64_t len) {
fat_fs *fs = (fat_fs *)cookie;
LTRACEF("fs %p path '%s' len %" PRIu64 "\n", fs, path, len);
// currently only support zero length files
if (len != 0) {
return ERR_NOT_IMPLEMENTED;
}
{
AutoLock guard(fs->lock);
// tell the dir code to find us a spot
dir_entry_location loc;
status_t err = fat_dir_allocate(fs, path, fat_attribute::file, 0, 0, &loc);
if (err < 0) {
return err;
}
// we have found and allocated a spot
fat_file *file = new fat_file(fs);
file->dir_loc_ = loc;
file->inc_ref();
*fcookie = (filecookie *)file;
}
return NO_ERROR;
}
status_t fat_file::truncate_file_priv(uint64_t _len) {
LTRACEF("file %p, len %" PRIu64" \n", this, _len);
if (_len == length_) {
return NO_ERROR;
}
// test some boundary conditions
if (_len >= 2UL*1024*1024*1024) {
// 2GB limit on the fs
return ERR_TOO_BIG;
}
AutoLock guard(fs_->lock);
// from now on out use this as our length variable
const uint32_t len32 = _len;
// TODO: test for max size of fs
if (len32 == length_) {
return NO_ERROR;
} else {
// are we expanding/shrinking within a cluster that's already allocated?
const uint32_t bpc = fs_->info().bytes_per_cluster;
const uint32_t current_cluster_count = (length_ + bpc - 1) / bpc;
const uint32_t new_cluster_count = (len32 + bpc - 1) / bpc;
LTRACEF("existing len %u, clusters %u: newlen %u, clusters %u\n", length_, current_cluster_count, len32, new_cluster_count);
if (new_cluster_count == current_cluster_count) {
// new length doesn't change the cluster count
// update the dir entry and move on
status_t err = fat_dir_update_entry(fs_, dir_loc_, start_cluster_, len32);
if (err != NO_ERROR) {
return err;
}
// remember our new length
length_ = len32;
} else if (len32 > length_) {
// expanding the file
LTRACEF("expanding the file: start_cluster_ %u\n", start_cluster_);
// TODO: compartmentalize this cluster extension/shrinking so DIR code can reuse it
// TODO: write zeros to any partial blocks we're extending
// walk to the end of the existing cluster chain
const uint32_t existing_chain_end = fat_find_last_cluster_in_chain(fs_, start_cluster_);
uint32_t first_cluster;
uint32_t last_cluster;
status_t err = fat_allocate_cluster_chain(fs_, existing_chain_end, new_cluster_count - current_cluster_count,
&first_cluster, &last_cluster, true);
LTRACEF("fat_allocate_cluster_chain returns %d, first_cluster %u, last_cluster %u\n", err, first_cluster, last_cluster);
if (err != NO_ERROR) {
return err;
}
// update the dir entry, linking the first cluster in our chain if it's the first one
err = fat_dir_update_entry(fs_, dir_loc_, start_cluster_ ? start_cluster_ : first_cluster, len32);
if (err != NO_ERROR) {
return err;
}
// remember our new length
length_ = len32;
// if we just created the first cluster, remember it here
if (start_cluster_ == 0) {
start_cluster_ = first_cluster;
}
} else {
// shrinking the file
PANIC_UNIMPLEMENTED;
return ERR_NOT_IMPLEMENTED;
}
}
bcache_flush(fs_->bcache());
return NO_ERROR;
}
// static
status_t fat_file::truncate_file(filecookie *fcookie, uint64_t len) {
fat_file *file = (fat_file *)fcookie;
return file->truncate_file_priv(len);
}

View File

@@ -26,6 +26,8 @@ public:
static ssize_t read_file(filecookie *fcookie, void *_buf, const off_t offset, size_t len);
static status_t stat_file(filecookie *fcookie, struct file_stat *stat);
static status_t close_file(filecookie *fcookie);
static status_t create_file(fscookie *cookie, const char *path, filecookie **fcookie, uint64_t len);
static status_t truncate_file(filecookie *fcookie, uint64_t len);
// used by fs node list maintenance
// node in the fs's list of open files and dirs
@@ -37,6 +39,7 @@ private:
ssize_t read_file_priv(void *_buf, const off_t offset, size_t len);
status_t stat_file_priv(struct file_stat *stat);
status_t close_file_priv(bool *last_ref);
status_t truncate_file_priv(uint64_t len);
protected:
// increment the ref and add/remove the file from the fs list
@@ -48,7 +51,7 @@ protected:
fat_fs *fs_ = nullptr; // pointer back to the fs instance we're in
// pointer to our dir entry, acts as our unique key
// pointer to our dir entry, acts as our unique key in the fs list
dir_entry_location dir_loc_ {};
// our start cluster and length

View File

@@ -107,3 +107,11 @@ status_t file_block_iterator::load_bcache_block(bnum_t bnum) {
return err;
}
status_t file_block_iterator::mark_bcache_dirty() {
if (bcache_buf) {
return bcache_mark_block_dirty(fat->bcache(), bcache_bnum);
} else {
return ERR_NO_RESOURCES;
}
}

View File

@@ -29,12 +29,21 @@ public:
DISALLOW_COPY_ASSIGN_AND_MOVE(file_block_iterator);
const uint8_t *get_bcache_ptr(size_t offset) {
const uint8_t *get_bcache_ptr(size_t offset) const {
DEBUG_ASSERT(offset < fat->info().bytes_per_sector);
DEBUG_ASSERT(bcache_buf);
return (const uint8_t *)bcache_buf + offset;
}
uint8_t *get_bcache_ptr(size_t offset) {
DEBUG_ASSERT(offset < fat->info().bytes_per_sector);
DEBUG_ASSERT(bcache_buf);
return (uint8_t *)bcache_buf + offset;
}
// write mark the current block as modified
status_t mark_bcache_dirty();
// move N sectors ahead in the file, walking the FAT cluster chain as necessary.
// sectors == 0 will ensure the current block is loaded.
status_t next_sectors(uint32_t sectors);

View File

@@ -16,6 +16,7 @@
#include <malloc.h>
#include <string.h>
#include <endian.h>
#include <stdlib.h>
#include "fat_priv.h"
#include "fat_fs.h"
@@ -46,6 +47,29 @@ __NO_INLINE static void fat_dump(fat_fs *fat) {
fat_fs::fat_fs() = default;
fat_fs::~fat_fs() = default;
void fat_fs::add_to_file_list(fat_file *file) {
DEBUG_ASSERT(lock.is_held());
DEBUG_ASSERT(!list_in_list(&file->node_));
LTRACEF("file %p, location %u:%u\n", file, file->dir_loc().starting_dir_cluster, file->dir_loc().dir_offset);
list_add_head(&file_list_, &file->node_);
}
fat_file *fat_fs::lookup_file(const dir_entry_location &loc) {
DEBUG_ASSERT(lock.is_held());
fat_file *f;
list_for_every_entry(&file_list_, f, fat_file, node_) {
if (loc == f->dir_loc()) {
return f;
}
}
return nullptr;
}
// static fs hooks
status_t fat_fs::mount(bdev_t *dev, fscookie **cookie) {
status_t result = NO_ERROR;
@@ -195,7 +219,12 @@ status_t fat_fs::mount(bdev_t *dev, fscookie **cookie) {
}
info->bytes_per_cluster = info->sectors_per_cluster * info->bytes_per_sector;
fat->bcache_ = bcache_create(fat->dev(), info->bytes_per_sector, 16);
int bcache_size = MIN(16, 64 * info->sectors_per_cluster);
dprintf(INFO, "FAT: creating bcache of %d entries of %u bytes\n", bcache_size, info->bytes_per_sector);
fat->bcache_ = bcache_create(fat->dev(), info->bytes_per_sector, bcache_size);
// we're okay, cancel our cleanup of the fat structure
ac2.cancel();
@@ -210,6 +239,7 @@ status_t fat_fs::mount(bdev_t *dev, fscookie **cookie) {
return result;
}
// static
status_t fat_fs::unmount(fscookie *cookie) {
auto *fat = (fat_fs *)cookie;
@@ -219,6 +249,10 @@ status_t fat_fs::unmount(fscookie *cookie) {
// TODO: handle unmounting when files/dirs are active
DEBUG_ASSERT(list_is_empty(&fat->file_list_));
if (LK_DEBUGLEVEL > INFO) {
bcache_dump(fat->bcache(), "FAT bcache ");
}
bcache_flush(fat->bcache());
bcache_destroy(fat->bcache());
}
@@ -227,28 +261,6 @@ status_t fat_fs::unmount(fscookie *cookie) {
return NO_ERROR;
}
void fat_fs::add_to_file_list(fat_file *file) {
DEBUG_ASSERT(lock.is_held());
DEBUG_ASSERT(!list_in_list(&file->node_));
LTRACEF("file %p, location %u:%u\n", file, file->dir_loc().starting_dir_cluster, file->dir_loc().dir_offset);
list_add_head(&file_list_, &file->node_);
}
fat_file *fat_fs::lookup_file(const dir_entry_location &loc) {
DEBUG_ASSERT(lock.is_held());
fat_file *f;
list_for_every_entry(&file_list_, f, fat_file, node_) {
if (loc == f->dir_loc()) {
return f;
}
}
return nullptr;
}
static const struct fs_api fat_api = {
.format = nullptr,
.fs_stat = nullptr,
@@ -256,9 +268,9 @@ static const struct fs_api fat_api = {
.mount = fat_fs::mount,
.unmount = fat_fs::unmount,
.open = fat_file::open_file,
.create = nullptr,
.create = fat_file::create_file,
.remove = nullptr,
.truncate = nullptr,
.truncate = fat_file::truncate_file,
.stat = fat_file::stat_file,
.read = fat_file::read_file,
.write = nullptr,

View File

@@ -1,24 +1,26 @@
#!/bin/bash
set -e
set -x
# create a largeish file (crosses at least one sector)
dd if=/dev/zero of=largefile bs=1024 count=512
# create a 12 bit fat, 1K clusters, 2MB
rm blk.bin.fat12
mkfs.fat -C blk.bin.fat12 -S 512 -s 2 -n FAT12 2000
rm -f blk.bin.fat12
mkfs.fat -C blk.bin.fat12 -v -S 512 -s 2 -n FAT12 2000
# create a 16 bit fat, 2K clusters, 16MB
rm blk.bin.fat16
mkfs.fat -C blk.bin.fat16 -S 512 -s 4 -n FAT16 16384
rm -f blk.bin.fat16
mkfs.fat -C blk.bin.fat16 -v -S 512 -s 4 -n FAT16 16384
# create a 32 bit fat, 4K clusters, 4GB
rm blk.bin.fat32
mkfs.fat -C blk.bin.fat32 -S 512 -n FAT32 4194304
rm -f blk.bin.fat32
mkfs.fat -v -C blk.bin.fat32 -S 512 -n FAT32 4194304
#mkfs.fat blk.bin.fat32 -v -S 512 -s 8 -n FAT32
# create a huge 32 bit fat, 32K clusters, 2TB
rm blk.bin.fat32.huge
rm -f blk.bin.fat32.huge
#mkfs.fat -v -C blk.bin.fat32.huge -S 512 -n FAT32 2147483648
for i in blk.bin.*; do

View File

@@ -33,6 +33,29 @@ namespace {
const char *test_device_name = "virtio0";
#define test_path "/fat"
// helper routine that mounts the above in the /fat path and then cleans up on
// the way out.
template <typename R>
bool test_mount_wrapper(R routine) {
BEGIN_TEST;
ASSERT_EQ(NO_ERROR, fs_mount(test_path, "fat", test_device_name));
// clean up by unmounting no matter what happens here
auto unmount_cleanup = lk::make_auto_call([]() { fs_unmount(test_path); });
// all through to the inner routine
all_ok = routine();
if (!all_ok) {
END_TEST;
}
// unmount the fs
unmount_cleanup.cancel();
ASSERT_EQ(NO_ERROR, fs_unmount(test_path));
END_TEST;
}
bool test_fat_mount() {
BEGIN_TEST;
@@ -45,53 +68,47 @@ bool test_fat_mount() {
}
bool test_fat_dir_root() {
BEGIN_TEST;
return test_mount_wrapper([]() {
BEGIN_TEST;
ASSERT_EQ(NO_ERROR, fs_mount(test_path, "fat", test_device_name));
// open and then close the root dir
dirhandle *handle;
ASSERT_EQ(NO_ERROR, fs_open_dir(test_path, &handle));
ASSERT_NONNULL(handle);
ASSERT_EQ(NO_ERROR, fs_close_dir(handle));
// clean up by unmounting no matter what happens here
auto unmount_cleanup = lk::make_auto_call([]() { fs_unmount(test_path); });
// open it again
ASSERT_EQ(NO_ERROR, fs_open_dir(test_path, &handle));
ASSERT_NONNULL(handle);
// open and then close the root dir
dirhandle *handle;
ASSERT_EQ(NO_ERROR, fs_open_dir(test_path, &handle));
ASSERT_NONNULL(handle);
ASSERT_EQ(NO_ERROR, fs_close_dir(handle));
// close the dir handle if we abort from here on out
auto closedir_cleanup = lk::make_auto_call([&]() { fs_close_dir(handle); });
// open it again
ASSERT_EQ(NO_ERROR, fs_open_dir(test_path, &handle));
ASSERT_NONNULL(handle);
// close the dir handle if we abort from here on out
auto closedir_cleanup = lk::make_auto_call([&]() { fs_close_dir(handle); });
// read an entry
dirent ent;
ASSERT_EQ(NO_ERROR, fs_read_dir(handle, &ent));
LTRACEF("read entry '%s'\n", ent.name);
// read all of the entries until we hit an EOD
int count = 1;
for (;;) {
auto err = fs_read_dir(handle, &ent);
bool valid = (err == NO_ERROR || err == ERR_NOT_FOUND);
ASSERT_TRUE(valid);
count++;
if (err == ERR_NOT_FOUND) {
break;
}
// read an entry
dirent ent;
ASSERT_EQ(NO_ERROR, fs_read_dir(handle, &ent));
LTRACEF("read entry '%s'\n", ent.name);
}
// make sure we saw at least 3 entries
ASSERT_LT(2, count);
closedir_cleanup.cancel();
ASSERT_EQ(NO_ERROR, fs_close_dir(handle));
// read all of the entries until we hit an EOD
int count = 1;
for (;;) {
auto err = fs_read_dir(handle, &ent);
bool valid = (err == NO_ERROR || err == ERR_NOT_FOUND);
ASSERT_TRUE(valid);
count++;
if (err == ERR_NOT_FOUND) {
break;
}
LTRACEF("read entry '%s'\n", ent.name);
}
// make sure we saw at least 3 entries
ASSERT_LT(2, count);
unmount_cleanup.cancel();
ASSERT_EQ(NO_ERROR, fs_unmount(test_path));
closedir_cleanup.cancel();
ASSERT_EQ(NO_ERROR, fs_close_dir(handle));
END_TEST;
END_TEST;
});
}
// helper routine for the read file test routine below
@@ -130,80 +147,127 @@ bool test_file_read(const char *path, const unsigned char *test_file_buffer, siz
}
bool test_fat_read_file() {
BEGIN_TEST;
return test_mount_wrapper([]() {
BEGIN_TEST;
ASSERT_EQ(NO_ERROR, fs_mount(test_path, "fat", test_device_name));
// clean up by unmounting no matter what happens here
auto unmount_cleanup = lk::make_auto_call([]() { fs_unmount(test_path); });
// read in a few files and validate their contents
EXPECT_TRUE(test_file_read(test_path "/hello.txt", test_file_hello, test_file_hello_size));
EXPECT_TRUE(test_file_read(test_path "/license", test_file_license, test_file_license_size));
EXPECT_TRUE(test_file_read(test_path "/long_filename_hello.txt", test_file_hello, test_file_hello_size));
EXPECT_TRUE(test_file_read(test_path "/a_very_long_filename_hello_that_uses_at_least_a_few_entries.txt", test_file_hello, test_file_hello_size));
EXPECT_TRUE(test_file_read(test_path "/dir.a/long_filename_hello.txt", test_file_hello, test_file_hello_size));
// read in a few files and validate their contents
EXPECT_TRUE(test_file_read(test_path "/hello.txt", test_file_hello, test_file_hello_size));
EXPECT_TRUE(test_file_read(test_path "/license", test_file_license, test_file_license_size));
EXPECT_TRUE(test_file_read(test_path "/long_filename_hello.txt", test_file_hello, test_file_hello_size));
EXPECT_TRUE(test_file_read(test_path "/a_very_long_filename_hello_that_uses_at_least_a_few_entries.txt", test_file_hello, test_file_hello_size));
EXPECT_TRUE(test_file_read(test_path "/dir.a/long_filename_hello.txt", test_file_hello, test_file_hello_size));
// unmount the fs
unmount_cleanup.cancel();
ASSERT_EQ(NO_ERROR, fs_unmount(test_path));
END_TEST;
END_TEST;
});
}
bool test_fat_multi_open() {
BEGIN_TEST;
return test_mount_wrapper([]() {
BEGIN_TEST;
ASSERT_EQ(NO_ERROR, fs_mount(test_path, "fat", test_device_name));
// clean up by unmounting no matter what happens here
auto unmount_cleanup = lk::make_auto_call([]() { fs_unmount(test_path); });
// open a file three times simultaneously
{
filehandle *handle1 = nullptr;
ASSERT_EQ(NO_ERROR, fs_open_file(test_path "/hello.txt", &handle1));
auto closefile_cleanup1 = lk::make_auto_call([&]() { fs_close_file(handle1); });
// open a file three times simultaneously
{
filehandle *handle1 = nullptr;
ASSERT_EQ(NO_ERROR, fs_open_file(test_path "/hello.txt", &handle1));
auto closefile_cleanup1 = lk::make_auto_call([&]() { fs_close_file(handle1); });
filehandle *handle2 = nullptr;
ASSERT_EQ(NO_ERROR, fs_open_file(test_path "/hello.txt", &handle2));
auto closefile_cleanup2 = lk::make_auto_call([&]() { fs_close_file(handle2); });
filehandle *handle2 = nullptr;
ASSERT_EQ(NO_ERROR, fs_open_file(test_path "/hello.txt", &handle2));
auto closefile_cleanup2 = lk::make_auto_call([&]() { fs_close_file(handle2); });
filehandle *handle3 = nullptr;
ASSERT_EQ(NO_ERROR, fs_open_file(test_path "/hello.txt", &handle3));
filehandle *handle3 = nullptr;
ASSERT_EQ(NO_ERROR, fs_open_file(test_path "/hello.txt", &handle3));
// close the files in reverse order
closefile_cleanup1.cancel();
ASSERT_EQ(NO_ERROR, fs_close_file(handle1));
closefile_cleanup2.cancel();
ASSERT_EQ(NO_ERROR, fs_close_file(handle2));
ASSERT_EQ(NO_ERROR, fs_close_file(handle3));
}
// close the files in reverse order
closefile_cleanup1.cancel();
ASSERT_EQ(NO_ERROR, fs_close_file(handle1));
closefile_cleanup2.cancel();
ASSERT_EQ(NO_ERROR, fs_close_file(handle2));
ASSERT_EQ(NO_ERROR, fs_close_file(handle3));
}
// open a dir three times simultaneously
{
dirhandle *handle1 = nullptr;
ASSERT_EQ(NO_ERROR, fs_open_dir(test_path "/dir.a", &handle1));
auto closedir_cleanup1 = lk::make_auto_call([&]() { fs_close_dir(handle1); });
// open a dir three times simultaneously
{
dirhandle *handle1 = nullptr;
ASSERT_EQ(NO_ERROR, fs_open_dir(test_path "/dir.a", &handle1));
auto closedir_cleanup1 = lk::make_auto_call([&]() { fs_close_dir(handle1); });
dirhandle *handle2 = nullptr;
ASSERT_EQ(NO_ERROR, fs_open_dir(test_path "/dir.a", &handle2));
auto closedir_cleanup2 = lk::make_auto_call([&]() { fs_close_dir(handle2); });
dirhandle *handle2 = nullptr;
ASSERT_EQ(NO_ERROR, fs_open_dir(test_path "/dir.a", &handle2));
auto closedir_cleanup2 = lk::make_auto_call([&]() { fs_close_dir(handle2); });
dirhandle *handle3 = nullptr;
ASSERT_EQ(NO_ERROR, fs_open_dir(test_path "/dir.a", &handle3));
dirhandle *handle3 = nullptr;
ASSERT_EQ(NO_ERROR, fs_open_dir(test_path "/dir.a", &handle3));
// close the dirs in reverse order
closedir_cleanup1.cancel();
ASSERT_EQ(NO_ERROR, fs_close_dir(handle1));
closedir_cleanup2.cancel();
ASSERT_EQ(NO_ERROR, fs_close_dir(handle2));
ASSERT_EQ(NO_ERROR, fs_close_dir(handle3));
}
// close the dirs in reverse order
closedir_cleanup1.cancel();
ASSERT_EQ(NO_ERROR, fs_close_dir(handle1));
closedir_cleanup2.cancel();
ASSERT_EQ(NO_ERROR, fs_close_dir(handle2));
ASSERT_EQ(NO_ERROR, fs_close_dir(handle3));
}
END_TEST;
});
}
// unmount the fs
unmount_cleanup.cancel();
ASSERT_EQ(NO_ERROR, fs_unmount(test_path));
bool test_fat_create_file() {
return test_mount_wrapper([]() {
BEGIN_TEST;
END_TEST;
filehandle *handle;
// create a few empty files
handle = nullptr;
ASSERT_EQ(NO_ERROR, fs_create_file(test_path "/newfile", &handle, 0));
ASSERT_NONNULL(handle);
ASSERT_EQ(NO_ERROR, fs_close_file(handle));
handle = nullptr;
ASSERT_EQ(NO_ERROR, fs_create_file(test_path "/newfile.txt", &handle, 0));
ASSERT_NONNULL(handle);
ASSERT_EQ(NO_ERROR, fs_close_file(handle));
// create a file in a subdir
handle = nullptr;
ASSERT_EQ(NO_ERROR, fs_create_file(test_path "/dir.a/newfile", &handle, 0));
ASSERT_NONNULL(handle);
ASSERT_EQ(NO_ERROR, fs_close_file(handle));
// create a file that already exists
handle = nullptr;
ASSERT_EQ(ERR_ALREADY_EXISTS, fs_create_file(test_path "/newfile", &handle, 0));
END_TEST;
});
}
bool test_fat_resize_file() {
return test_mount_wrapper([]() {
BEGIN_TEST;
filehandle *handle;
// create an empty file
handle = nullptr;
ASSERT_EQ(NO_ERROR, fs_create_file(test_path "/reszfile", &handle, 0));
ASSERT_NONNULL(handle);
auto closefile_cleanup1 = lk::make_auto_call([&]() { fs_close_file(handle); });
// resize the file
EXPECT_EQ(NO_ERROR, fs_truncate_file(handle, 0)); // same size
EXPECT_EQ(ERR_TOO_BIG, fs_truncate_file(handle, 2UL*1024*1024*1024)); // too big for FAT
EXPECT_EQ(ERR_TOO_BIG, fs_truncate_file(handle, 8UL*1024*1024*1024)); // >32bit too big for FAT
EXPECT_EQ(ERR_TOO_BIG, fs_truncate_file(handle, -1)); // negative should produce way out of range
EXPECT_EQ(NO_ERROR, fs_truncate_file(handle, 1));
EXPECT_EQ(NO_ERROR, fs_truncate_file(handle, 4095)); // assumes cluster size 4k
EXPECT_EQ(NO_ERROR, fs_truncate_file(handle, 4096));
EXPECT_EQ(NO_ERROR, fs_truncate_file(handle, 4097));
EXPECT_EQ(NO_ERROR, fs_truncate_file(handle, 12345));
EXPECT_EQ(NO_ERROR, fs_truncate_file(handle, 1002345));
END_TEST;
});
}
BEGIN_TEST_CASE(fat)
@@ -211,6 +275,8 @@ BEGIN_TEST_CASE(fat)
RUN_TEST(test_fat_dir_root)
RUN_TEST(test_fat_read_file)
RUN_TEST(test_fat_multi_open)
RUN_TEST(test_fat_create_file)
RUN_TEST(test_fat_resize_file)
END_TEST_CASE(fat)
} // namespace

View File

@@ -165,7 +165,7 @@ static int cmd_mkfile(int argc, const console_cmd_args *argv) {
prepend_cwd(path, FS_MAX_PATH_LEN, argv[1].str);
filehandle *handle;
status_t status = fs_create_file(path, &handle, (argc >= 2) ? argv[2].u : 0);
status_t status = fs_create_file(path, &handle, (argc >= 3) ? argv[2].u : 0);
if (status < 0) {
printf("error %d making file '%s'\n", status, path);
goto err;

View File

@@ -5,6 +5,8 @@
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/
#include <lib/partition.h>
#include <lk/debug.h>
#include <stdio.h>
#include <string.h>
@@ -12,7 +14,6 @@
#include <stdlib.h>
#include <arch.h>
#include <lib/bio.h>
#include <lib/partition.h>
struct chs {
uint8_t c;
@@ -93,7 +94,7 @@ int partition_publish(const char *device, off_t offset) {
// publish it
char subdevice[128];
sprintf(subdevice, "%sp%d", device, i);
snprintf(subdevice, sizeof(subdevice), "%sp%d", device, i);
err = bio_publish_subdevice(device, subdevice, part[i].lba_start, part[i].lba_length);
if (err < 0) {
@@ -107,6 +108,10 @@ int partition_publish(const char *device, off_t offset) {
bio_close(dev);
if (err >= 0) {
dprintf(INFO, "partition_publish: %u partition%s found\n", count, (count == 1) ? "" : "s");
}
err:
return (err < 0) ? err : count;
}

View File

@@ -26,7 +26,7 @@ function HELP {
DO_NET=0
DO_NET_TAP=0
DO_DISK=0
DO_DISK_IMAGE=""
DISK_IMAGE=""
DO_64BIT=0
DO_VIRT=0
DO_CORTEX_M3=0
@@ -92,6 +92,8 @@ ARGS=" -cpu $CPU -m $MEMSIZE -smp $SMP -machine $MACHINE -kernel build-${PROJECT
if (( $DO_DISK )); then
ARGS+=" -drive if=none,file=${DISK_IMAGE},id=blk,format=raw"
# valid options are writeback, none, writethrough, directsync, unsafe
#ARGS+=",cache=none"
ARGS+=" -device virtio-blk-device,drive=blk"
fi