Move primary cache code to libminixfs.
Add primary cache management feature to libminixfs as mfs and ext2 currently do separately, remove cache code from mfs and ext2, and make them use the libminixfs interface. This makes all fields of the buf struct private to libminixfs and FS clients aren't supposed to access them at all. Only the opaque 'void *data' field (the FS block contents, used to be called bp) is to be accessed by the FS client. The main purpose is to implement the interface to the 2ndary vm cache just once, get rid of some code duplication, and add a little abstraction to reduce the code inertia of the whole caching business. Some minor sanity checking and prohibition done by mfs in this code as removed from the generic primary cache code as a result: - checking all inodes are not in use when allocating/resizing the cache - checking readonly filesystems aren't written to - checking the superblock isn't written to on mounted filesystems The minixfslib code relies on fs_blockstats() in the client filesystem to return some FS usage information.
This commit is contained in:
parent
280d8c668e
commit
bd3cde4571
53 changed files with 927 additions and 1630 deletions
|
@ -7,10 +7,65 @@
|
|||
#include <minix/sef.h>
|
||||
#include <minix/vfsif.h>
|
||||
|
||||
struct buf {
|
||||
/* Data portion of the buffer. */
|
||||
void *data;
|
||||
|
||||
/* Header portion of the buffer - internal to libminixfs. */
|
||||
struct buf *lmfs_next; /* used to link all free bufs in a chain */
|
||||
struct buf *lmfs_prev; /* used to link all free bufs the other way */
|
||||
struct buf *lmfs_hash; /* used to link bufs on hash chains */
|
||||
block_t lmfs_blocknr; /* block number of its (minor) device */
|
||||
dev_t lmfs_dev; /* major | minor device where block resides */
|
||||
char lmfs_dirt; /* BP_CLEAN or BP_DIRTY */
|
||||
char lmfs_count; /* number of users of this buffer */
|
||||
unsigned int lmfs_bytes; /* Number of bytes allocated in bp */
|
||||
};
|
||||
|
||||
int fs_lookup_credentials(vfs_ucred_t *credentials,
|
||||
uid_t *caller_uid, gid_t *caller_gid, cp_grant_id_t grant2, size_t cred_size);
|
||||
u32_t fs_bufs_heuristic(int minbufs, u32_t btotal, u32_t bfree,
|
||||
int blocksize, dev_t majordev);
|
||||
|
||||
void lmfs_markdirty(struct buf *bp);
|
||||
void lmfs_markclean(struct buf *bp);
|
||||
int lmfs_isclean(struct buf *bp);
|
||||
dev_t lmfs_dev(struct buf *bp);
|
||||
int lmfs_bytes(struct buf *bp);
|
||||
int lmfs_bufs_in_use(void);
|
||||
int lmfs_nr_bufs(void);
|
||||
void lmfs_flushall(void);
|
||||
int lmfs_fs_block_size(void);
|
||||
void lmfs_may_use_vmcache(int);
|
||||
void lmfs_set_blocksize(int blocksize, int major);
|
||||
void lmfs_reset_rdwt_err(void);
|
||||
int lmfs_rdwt_err(void);
|
||||
void lmfs_buf_pool(int new_nr_bufs);
|
||||
struct buf *lmfs_get_block(dev_t dev, block_t block,int only_search);
|
||||
void lmfs_invalidate(dev_t device);
|
||||
void lmfs_put_block(struct buf *bp, int block_type);
|
||||
void lmfs_rw_scattered(dev_t, struct buf **, int, int);
|
||||
|
||||
/* calls that libminixfs does into fs */
|
||||
void fs_blockstats(u32_t *blocks, u32_t *free, u32_t *used);
|
||||
int fs_sync(void);
|
||||
|
||||
/* get_block arguments */
|
||||
#define NORMAL 0 /* forces get_block to do disk read */
|
||||
#define NO_READ 1 /* prevents get_block from doing disk read */
|
||||
#define PREFETCH 2 /* tells get_block not to read or mark dev */
|
||||
|
||||
/* When a block is released, the type of usage is passed to put_block(). */
|
||||
#define ONE_SHOT 0200 /* set if block not likely to be needed soon */
|
||||
|
||||
#define INODE_BLOCK 0 /* inode block */
|
||||
#define DIRECTORY_BLOCK 1 /* directory block */
|
||||
#define INDIRECT_BLOCK 2 /* pointer block */
|
||||
#define MAP_BLOCK 3 /* bit map */
|
||||
#define FULL_DATA_BLOCK 5 /* data, fully used */
|
||||
#define PARTIAL_DATA_BLOCK 6 /* data, partly used*/
|
||||
|
||||
#define END_OF_FILE (-104) /* eof detected */
|
||||
|
||||
#endif /* _MINIX_FSLIB_H */
|
||||
|
||||
|
|
|
@ -1,12 +1,47 @@
|
|||
|
||||
#define _SYSTEM
|
||||
|
||||
#include <minix/libminixfs.h>
|
||||
#include <minix/dmap.h>
|
||||
#include <minix/u64.h>
|
||||
#include <minix/sysutil.h>
|
||||
#include <sys/param.h>
|
||||
#include <assert.h>
|
||||
#include <errno.h>
|
||||
#include <math.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/param.h>
|
||||
|
||||
#include <minix/dmap.h>
|
||||
#include <minix/libminixfs.h>
|
||||
#include <minix/syslib.h>
|
||||
#include <minix/sysutil.h>
|
||||
#include <minix/u64.h>
|
||||
#include <minix/bdev.h>
|
||||
|
||||
#define BP_CLEAN 0 /* on-disk block and memory copies identical */
|
||||
#define BP_DIRTY 1 /* on-disk block and memory copies differ */
|
||||
|
||||
#define BUFHASH(b) ((b) % nr_bufs)
|
||||
#define MARKCLEAN lmfs_markclean
|
||||
|
||||
#define MINBUFS 6 /* minimal no of bufs for sanity check */
|
||||
|
||||
static struct buf *front; /* points to least recently used free block */
|
||||
static struct buf *rear; /* points to most recently used free block */
|
||||
static unsigned int bufs_in_use;/* # bufs currently in use (not on free list)*/
|
||||
|
||||
static void rm_lru(struct buf *bp);
|
||||
static void read_block(struct buf *);
|
||||
static void flushall(dev_t dev);
|
||||
|
||||
static int vmcache = 0; /* are we using vm's secondary cache? (initially not) */
|
||||
|
||||
static struct buf *buf;
|
||||
static struct buf **buf_hash; /* the buffer hash table */
|
||||
static unsigned int nr_bufs;
|
||||
static int may_use_vmcache;
|
||||
|
||||
static int fs_block_size = 1024; /* raw i/o block size */
|
||||
|
||||
static int rdwt_err;
|
||||
|
||||
u32_t fs_bufs_heuristic(int minbufs, u32_t btotal, u32_t bfree,
|
||||
int blocksize, dev_t majordev)
|
||||
|
@ -58,3 +93,605 @@ u32_t fs_bufs_heuristic(int minbufs, u32_t btotal, u32_t bfree,
|
|||
return bufs;
|
||||
}
|
||||
|
||||
void
|
||||
lmfs_markdirty(struct buf *bp)
|
||||
{
|
||||
bp->lmfs_dirt = BP_DIRTY;
|
||||
}
|
||||
|
||||
void
|
||||
lmfs_markclean(struct buf *bp)
|
||||
{
|
||||
bp->lmfs_dirt = BP_CLEAN;
|
||||
}
|
||||
|
||||
int
|
||||
lmfs_isclean(struct buf *bp)
|
||||
{
|
||||
return bp->lmfs_dirt == BP_CLEAN;
|
||||
}
|
||||
|
||||
dev_t
|
||||
lmfs_dev(struct buf *bp)
|
||||
{
|
||||
return bp->lmfs_dev;
|
||||
}
|
||||
|
||||
int lmfs_bytes(struct buf *bp)
|
||||
{
|
||||
return bp->lmfs_bytes;
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* lmfs_get_block *
|
||||
*===========================================================================*/
|
||||
struct buf *lmfs_get_block(
|
||||
register dev_t dev, /* on which device is the block? */
|
||||
register block_t block, /* which block is wanted? */
|
||||
int only_search /* if NO_READ, don't read, else act normal */
|
||||
)
|
||||
{
|
||||
/* Check to see if the requested block is in the block cache. If so, return
|
||||
* a pointer to it. If not, evict some other block and fetch it (unless
|
||||
* 'only_search' is 1). All the blocks in the cache that are not in use
|
||||
* are linked together in a chain, with 'front' pointing to the least recently
|
||||
* used block and 'rear' to the most recently used block. If 'only_search' is
|
||||
* 1, the block being requested will be overwritten in its entirety, so it is
|
||||
* only necessary to see if it is in the cache; if it is not, any free buffer
|
||||
* will do. It is not necessary to actually read the block in from disk.
|
||||
* If 'only_search' is PREFETCH, the block need not be read from the disk,
|
||||
* and the device is not to be marked on the block, so callers can tell if
|
||||
* the block returned is valid.
|
||||
* In addition to the LRU chain, there is also a hash chain to link together
|
||||
* blocks whose block numbers end with the same bit strings, for fast lookup.
|
||||
*/
|
||||
|
||||
int b;
|
||||
static struct buf *bp, *prev_ptr;
|
||||
u64_t yieldid = VM_BLOCKID_NONE, getid = make64(dev, block);
|
||||
|
||||
assert(buf_hash);
|
||||
assert(buf);
|
||||
assert(nr_bufs > 0);
|
||||
|
||||
ASSERT(fs_block_size > 0);
|
||||
|
||||
/* Search the hash chain for (dev, block). Do_read() can use
|
||||
* lmfs_get_block(NO_DEV ...) to get an unnamed block to fill with zeros when
|
||||
* someone wants to read from a hole in a file, in which case this search
|
||||
* is skipped
|
||||
*/
|
||||
if (dev != NO_DEV) {
|
||||
b = BUFHASH(block);
|
||||
bp = buf_hash[b];
|
||||
while (bp != NULL) {
|
||||
if (bp->lmfs_blocknr == block && bp->lmfs_dev == dev) {
|
||||
/* Block needed has been found. */
|
||||
if (bp->lmfs_count == 0) rm_lru(bp);
|
||||
bp->lmfs_count++; /* record that block is in use */
|
||||
ASSERT(bp->lmfs_bytes == fs_block_size);
|
||||
ASSERT(bp->lmfs_dev == dev);
|
||||
ASSERT(bp->lmfs_dev != NO_DEV);
|
||||
ASSERT(bp->data);
|
||||
return(bp);
|
||||
} else {
|
||||
/* This block is not the one sought. */
|
||||
bp = bp->lmfs_hash; /* move to next block on hash chain */
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Desired block is not on available chain. Take oldest block ('front'). */
|
||||
if ((bp = front) == NULL) panic("all buffers in use: %d", nr_bufs);
|
||||
|
||||
if(bp->lmfs_bytes < fs_block_size) {
|
||||
ASSERT(!bp->data);
|
||||
ASSERT(bp->lmfs_bytes == 0);
|
||||
if(!(bp->data = alloc_contig( (size_t) fs_block_size, 0, NULL))) {
|
||||
printf("fs cache: couldn't allocate a new block.\n");
|
||||
for(bp = front;
|
||||
bp && bp->lmfs_bytes < fs_block_size; bp = bp->lmfs_next)
|
||||
;
|
||||
if(!bp) {
|
||||
panic("no buffer available");
|
||||
}
|
||||
} else {
|
||||
bp->lmfs_bytes = fs_block_size;
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT(bp);
|
||||
ASSERT(bp->data);
|
||||
ASSERT(bp->lmfs_bytes == fs_block_size);
|
||||
ASSERT(bp->lmfs_count == 0);
|
||||
|
||||
rm_lru(bp);
|
||||
|
||||
/* Remove the block that was just taken from its hash chain. */
|
||||
b = BUFHASH(bp->lmfs_blocknr);
|
||||
prev_ptr = buf_hash[b];
|
||||
if (prev_ptr == bp) {
|
||||
buf_hash[b] = bp->lmfs_hash;
|
||||
} else {
|
||||
/* The block just taken is not on the front of its hash chain. */
|
||||
while (prev_ptr->lmfs_hash != NULL)
|
||||
if (prev_ptr->lmfs_hash == bp) {
|
||||
prev_ptr->lmfs_hash = bp->lmfs_hash; /* found it */
|
||||
break;
|
||||
} else {
|
||||
prev_ptr = prev_ptr->lmfs_hash; /* keep looking */
|
||||
}
|
||||
}
|
||||
|
||||
/* If the block taken is dirty, make it clean by writing it to the disk.
|
||||
* Avoid hysteresis by flushing all other dirty blocks for the same device.
|
||||
*/
|
||||
if (bp->lmfs_dev != NO_DEV) {
|
||||
if (bp->lmfs_dirt == BP_DIRTY) flushall(bp->lmfs_dev);
|
||||
|
||||
/* Are we throwing out a block that contained something?
|
||||
* Give it to VM for the second-layer cache.
|
||||
*/
|
||||
yieldid = make64(bp->lmfs_dev, bp->lmfs_blocknr);
|
||||
assert(bp->lmfs_bytes == fs_block_size);
|
||||
bp->lmfs_dev = NO_DEV;
|
||||
}
|
||||
|
||||
/* Fill in block's parameters and add it to the hash chain where it goes. */
|
||||
MARKCLEAN(bp); /* NO_DEV blocks may be marked dirty */
|
||||
bp->lmfs_dev = dev; /* fill in device number */
|
||||
bp->lmfs_blocknr = block; /* fill in block number */
|
||||
bp->lmfs_count++; /* record that block is being used */
|
||||
b = BUFHASH(bp->lmfs_blocknr);
|
||||
bp->lmfs_hash = buf_hash[b];
|
||||
|
||||
buf_hash[b] = bp; /* add to hash list */
|
||||
|
||||
if(dev == NO_DEV) {
|
||||
if(vmcache && cmp64(yieldid, VM_BLOCKID_NONE) != 0) {
|
||||
vm_yield_block_get_block(yieldid, VM_BLOCKID_NONE,
|
||||
bp->data, fs_block_size);
|
||||
}
|
||||
return(bp); /* If the caller wanted a NO_DEV block, work is done. */
|
||||
}
|
||||
|
||||
/* Go get the requested block unless searching or prefetching. */
|
||||
if(only_search == PREFETCH || only_search == NORMAL) {
|
||||
/* Block is not found in our cache, but we do want it
|
||||
* if it's in the vm cache.
|
||||
*/
|
||||
if(vmcache) {
|
||||
/* If we can satisfy the PREFETCH or NORMAL request
|
||||
* from the vm cache, work is done.
|
||||
*/
|
||||
if(vm_yield_block_get_block(yieldid, getid,
|
||||
bp->data, fs_block_size) == OK) {
|
||||
return bp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if(only_search == PREFETCH) {
|
||||
/* PREFETCH: don't do i/o. */
|
||||
bp->lmfs_dev = NO_DEV;
|
||||
} else if (only_search == NORMAL) {
|
||||
read_block(bp);
|
||||
} else if(only_search == NO_READ) {
|
||||
/* we want this block, but its contents
|
||||
* will be overwritten. VM has to forget
|
||||
* about it.
|
||||
*/
|
||||
if(vmcache) {
|
||||
vm_forgetblock(getid);
|
||||
}
|
||||
} else
|
||||
panic("unexpected only_search value: %d", only_search);
|
||||
|
||||
assert(bp->data);
|
||||
|
||||
return(bp); /* return the newly acquired block */
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* lmfs_put_block *
|
||||
*===========================================================================*/
|
||||
void lmfs_put_block(bp, block_type)
|
||||
register struct buf *bp; /* pointer to the buffer to be released */
|
||||
int block_type; /* INODE_BLOCK, DIRECTORY_BLOCK, or whatever */
|
||||
{
|
||||
/* Return a block to the list of available blocks. Depending on 'block_type'
|
||||
* it may be put on the front or rear of the LRU chain. Blocks that are
|
||||
* expected to be needed again shortly (e.g., partially full data blocks)
|
||||
* go on the rear; blocks that are unlikely to be needed again shortly
|
||||
* (e.g., full data blocks) go on the front. Blocks whose loss can hurt
|
||||
* the integrity of the file system (e.g., inode blocks) are written to
|
||||
* disk immediately if they are dirty.
|
||||
*/
|
||||
if (bp == NULL) return; /* it is easier to check here than in caller */
|
||||
|
||||
bp->lmfs_count--; /* there is one use fewer now */
|
||||
if (bp->lmfs_count != 0) return; /* block is still in use */
|
||||
|
||||
bufs_in_use--; /* one fewer block buffers in use */
|
||||
|
||||
/* Put this block back on the LRU chain. */
|
||||
if (bp->lmfs_dev == DEV_RAM || (block_type & ONE_SHOT)) {
|
||||
/* Block probably won't be needed quickly. Put it on front of chain.
|
||||
* It will be the next block to be evicted from the cache.
|
||||
*/
|
||||
bp->lmfs_prev = NULL;
|
||||
bp->lmfs_next = front;
|
||||
if (front == NULL)
|
||||
rear = bp; /* LRU chain was empty */
|
||||
else
|
||||
front->lmfs_prev = bp;
|
||||
front = bp;
|
||||
}
|
||||
else {
|
||||
/* Block probably will be needed quickly. Put it on rear of chain.
|
||||
* It will not be evicted from the cache for a long time.
|
||||
*/
|
||||
bp->lmfs_prev = rear;
|
||||
bp->lmfs_next = NULL;
|
||||
if (rear == NULL)
|
||||
front = bp;
|
||||
else
|
||||
rear->lmfs_next = bp;
|
||||
rear = bp;
|
||||
}
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* read_block *
|
||||
*===========================================================================*/
|
||||
static void read_block(bp)
|
||||
register struct buf *bp; /* buffer pointer */
|
||||
{
|
||||
/* Read or write a disk block. This is the only routine in which actual disk
|
||||
* I/O is invoked. If an error occurs, a message is printed here, but the error
|
||||
* is not reported to the caller. If the error occurred while purging a block
|
||||
* from the cache, it is not clear what the caller could do about it anyway.
|
||||
*/
|
||||
int r, op_failed;
|
||||
u64_t pos;
|
||||
dev_t dev;
|
||||
|
||||
op_failed = 0;
|
||||
|
||||
if ( (dev = bp->lmfs_dev) != NO_DEV) {
|
||||
pos = mul64u(bp->lmfs_blocknr, fs_block_size);
|
||||
r = bdev_read(dev, pos, bp->data, fs_block_size,
|
||||
BDEV_NOFLAGS);
|
||||
if (r < 0) {
|
||||
printf("fs cache: I/O error on device %d/%d, block %u\n",
|
||||
major(dev), minor(dev), bp->lmfs_blocknr);
|
||||
op_failed = 1;
|
||||
} else if (r != (ssize_t) fs_block_size) {
|
||||
r = END_OF_FILE;
|
||||
op_failed = 1;
|
||||
}
|
||||
|
||||
if (op_failed) {
|
||||
bp->lmfs_dev = NO_DEV; /* invalidate block */
|
||||
|
||||
/* Report read errors to interested parties. */
|
||||
rdwt_err = r;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* lmfs_invalidate *
|
||||
*===========================================================================*/
|
||||
void lmfs_invalidate(
|
||||
dev_t device /* device whose blocks are to be purged */
|
||||
)
|
||||
{
|
||||
/* Remove all the blocks belonging to some device from the cache. */
|
||||
|
||||
register struct buf *bp;
|
||||
|
||||
for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++)
|
||||
if (bp->lmfs_dev == device) bp->lmfs_dev = NO_DEV;
|
||||
|
||||
vm_forgetblocks();
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* flushall *
|
||||
*===========================================================================*/
|
||||
static void flushall(dev_t dev)
|
||||
{
|
||||
/* Flush all dirty blocks for one device. */
|
||||
|
||||
register struct buf *bp;
|
||||
static struct buf **dirty; /* static so it isn't on stack */
|
||||
static unsigned int dirtylistsize = 0;
|
||||
int ndirty;
|
||||
|
||||
if(dirtylistsize != nr_bufs) {
|
||||
if(dirtylistsize > 0) {
|
||||
assert(dirty != NULL);
|
||||
free(dirty);
|
||||
}
|
||||
if(!(dirty = malloc(sizeof(dirty[0])*nr_bufs)))
|
||||
panic("couldn't allocate dirty buf list");
|
||||
dirtylistsize = nr_bufs;
|
||||
}
|
||||
|
||||
for (bp = &buf[0], ndirty = 0; bp < &buf[nr_bufs]; bp++) {
|
||||
if (bp->lmfs_dirt == BP_DIRTY && bp->lmfs_dev == dev) {
|
||||
dirty[ndirty++] = bp;
|
||||
}
|
||||
}
|
||||
|
||||
lmfs_rw_scattered(dev, dirty, ndirty, WRITING);
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* lmfs_rw_scattered *
|
||||
*===========================================================================*/
|
||||
void lmfs_rw_scattered(
|
||||
dev_t dev, /* major-minor device number */
|
||||
struct buf **bufq, /* pointer to array of buffers */
|
||||
int bufqsize, /* number of buffers */
|
||||
int rw_flag /* READING or WRITING */
|
||||
)
|
||||
{
|
||||
/* Read or write scattered data from a device. */
|
||||
|
||||
register struct buf *bp;
|
||||
int gap;
|
||||
register int i;
|
||||
register iovec_t *iop;
|
||||
static iovec_t *iovec = NULL;
|
||||
u64_t pos;
|
||||
int j, r;
|
||||
|
||||
STATICINIT(iovec, NR_IOREQS);
|
||||
|
||||
/* (Shell) sort buffers on lmfs_blocknr. */
|
||||
gap = 1;
|
||||
do
|
||||
gap = 3 * gap + 1;
|
||||
while (gap <= bufqsize);
|
||||
while (gap != 1) {
|
||||
gap /= 3;
|
||||
for (j = gap; j < bufqsize; j++) {
|
||||
for (i = j - gap;
|
||||
i >= 0 && bufq[i]->lmfs_blocknr > bufq[i + gap]->lmfs_blocknr;
|
||||
i -= gap) {
|
||||
bp = bufq[i];
|
||||
bufq[i] = bufq[i + gap];
|
||||
bufq[i + gap] = bp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Set up I/O vector and do I/O. The result of bdev I/O is OK if everything
|
||||
* went fine, otherwise the error code for the first failed transfer.
|
||||
*/
|
||||
while (bufqsize > 0) {
|
||||
for (j = 0, iop = iovec; j < NR_IOREQS && j < bufqsize; j++, iop++) {
|
||||
bp = bufq[j];
|
||||
if (bp->lmfs_blocknr != (block_t) bufq[0]->lmfs_blocknr + j) break;
|
||||
iop->iov_addr = (vir_bytes) bp->data;
|
||||
iop->iov_size = (vir_bytes) fs_block_size;
|
||||
}
|
||||
pos = mul64u(bufq[0]->lmfs_blocknr, fs_block_size);
|
||||
if (rw_flag == READING)
|
||||
r = bdev_gather(dev, pos, iovec, j, BDEV_NOFLAGS);
|
||||
else
|
||||
r = bdev_scatter(dev, pos, iovec, j, BDEV_NOFLAGS);
|
||||
|
||||
/* Harvest the results. The driver may have returned an error, or it
|
||||
* may have done less than what we asked for.
|
||||
*/
|
||||
if (r < 0) {
|
||||
printf("fs cache: I/O error %d on device %d/%d, block %u\n",
|
||||
r, major(dev), minor(dev), bufq[0]->lmfs_blocknr);
|
||||
}
|
||||
for (i = 0; i < j; i++) {
|
||||
bp = bufq[i];
|
||||
if (r < (ssize_t) fs_block_size) {
|
||||
/* Transfer failed. */
|
||||
if (i == 0) {
|
||||
bp->lmfs_dev = NO_DEV; /* Invalidate block */
|
||||
vm_forgetblocks();
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (rw_flag == READING) {
|
||||
bp->lmfs_dev = dev; /* validate block */
|
||||
lmfs_put_block(bp, PARTIAL_DATA_BLOCK);
|
||||
} else {
|
||||
MARKCLEAN(bp);
|
||||
}
|
||||
r -= fs_block_size;
|
||||
}
|
||||
bufq += i;
|
||||
bufqsize -= i;
|
||||
if (rw_flag == READING) {
|
||||
/* Don't bother reading more than the device is willing to
|
||||
* give at this time. Don't forget to release those extras.
|
||||
*/
|
||||
while (bufqsize > 0) {
|
||||
lmfs_put_block(*bufq++, PARTIAL_DATA_BLOCK);
|
||||
bufqsize--;
|
||||
}
|
||||
}
|
||||
if (rw_flag == WRITING && i == 0) {
|
||||
/* We're not making progress, this means we might keep
|
||||
* looping. Buffers remain dirty if un-written. Buffers are
|
||||
* lost if invalidate()d or LRU-removed while dirty. This
|
||||
* is better than keeping unwritable blocks around forever..
|
||||
*/
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* rm_lru *
|
||||
*===========================================================================*/
|
||||
static void rm_lru(bp)
|
||||
struct buf *bp;
|
||||
{
|
||||
/* Remove a block from its LRU chain. */
|
||||
struct buf *next_ptr, *prev_ptr;
|
||||
|
||||
bufs_in_use++;
|
||||
next_ptr = bp->lmfs_next; /* successor on LRU chain */
|
||||
prev_ptr = bp->lmfs_prev; /* predecessor on LRU chain */
|
||||
if (prev_ptr != NULL)
|
||||
prev_ptr->lmfs_next = next_ptr;
|
||||
else
|
||||
front = next_ptr; /* this block was at front of chain */
|
||||
|
||||
if (next_ptr != NULL)
|
||||
next_ptr->lmfs_prev = prev_ptr;
|
||||
else
|
||||
rear = prev_ptr; /* this block was at rear of chain */
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* cache_resize *
|
||||
*===========================================================================*/
|
||||
static void cache_resize(unsigned int blocksize, unsigned int bufs)
|
||||
{
|
||||
struct buf *bp;
|
||||
|
||||
assert(blocksize > 0);
|
||||
assert(bufs >= MINBUFS);
|
||||
|
||||
for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++)
|
||||
if(bp->lmfs_count != 0) panic("change blocksize with buffer in use");
|
||||
|
||||
lmfs_buf_pool(bufs);
|
||||
|
||||
fs_block_size = blocksize;
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* lmfs_set_blocksize *
|
||||
*===========================================================================*/
|
||||
void lmfs_set_blocksize(int new_block_size, int major)
|
||||
{
|
||||
int bufs;
|
||||
u32_t btotal, bfree, bused;
|
||||
|
||||
cache_resize(new_block_size, MINBUFS);
|
||||
|
||||
fs_blockstats(&btotal, &bfree, &bused);
|
||||
|
||||
bufs = fs_bufs_heuristic(10, btotal, bfree,
|
||||
new_block_size, major);
|
||||
|
||||
cache_resize(new_block_size, bufs);
|
||||
|
||||
/* Decide whether to use seconday cache or not.
|
||||
* Only do this if
|
||||
* - it's available, and
|
||||
* - use of it hasn't been disabled for this fs, and
|
||||
* - our main FS device isn't a memory device
|
||||
*/
|
||||
|
||||
vmcache = 0;
|
||||
if(vm_forgetblock(VM_BLOCKID_NONE) != ENOSYS &&
|
||||
may_use_vmcache && major != MEMORY_MAJOR) {
|
||||
vmcache = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* lmfs_buf_pool *
|
||||
*===========================================================================*/
|
||||
void lmfs_buf_pool(int new_nr_bufs)
|
||||
{
|
||||
/* Initialize the buffer pool. */
|
||||
register struct buf *bp;
|
||||
|
||||
assert(new_nr_bufs >= MINBUFS);
|
||||
|
||||
if(nr_bufs > 0) {
|
||||
assert(buf);
|
||||
(void) fs_sync();
|
||||
for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) {
|
||||
if(bp->data) {
|
||||
assert(bp->lmfs_bytes > 0);
|
||||
free_contig(bp->data, bp->lmfs_bytes);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if(buf)
|
||||
free(buf);
|
||||
|
||||
if(!(buf = calloc(sizeof(buf[0]), new_nr_bufs)))
|
||||
panic("couldn't allocate buf list (%d)", new_nr_bufs);
|
||||
|
||||
if(buf_hash)
|
||||
free(buf_hash);
|
||||
if(!(buf_hash = calloc(sizeof(buf_hash[0]), new_nr_bufs)))
|
||||
panic("couldn't allocate buf hash list (%d)", new_nr_bufs);
|
||||
|
||||
nr_bufs = new_nr_bufs;
|
||||
|
||||
bufs_in_use = 0;
|
||||
front = &buf[0];
|
||||
rear = &buf[nr_bufs - 1];
|
||||
|
||||
for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) {
|
||||
bp->lmfs_blocknr = NO_BLOCK;
|
||||
bp->lmfs_dev = NO_DEV;
|
||||
bp->lmfs_next = bp + 1;
|
||||
bp->lmfs_prev = bp - 1;
|
||||
bp->data = NULL;
|
||||
bp->lmfs_bytes = 0;
|
||||
}
|
||||
front->lmfs_prev = NULL;
|
||||
rear->lmfs_next = NULL;
|
||||
|
||||
for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) bp->lmfs_hash = bp->lmfs_next;
|
||||
buf_hash[0] = front;
|
||||
|
||||
vm_forgetblocks();
|
||||
}
|
||||
|
||||
int lmfs_bufs_in_use(void)
|
||||
{
|
||||
return bufs_in_use;
|
||||
}
|
||||
|
||||
int lmfs_nr_bufs(void)
|
||||
{
|
||||
return nr_bufs;
|
||||
}
|
||||
|
||||
void lmfs_flushall(void)
|
||||
{
|
||||
struct buf *bp;
|
||||
for(bp = &buf[0]; bp < &buf[nr_bufs]; bp++)
|
||||
if(bp->lmfs_dev != NO_DEV && bp->lmfs_dirt == BP_DIRTY)
|
||||
flushall(bp->lmfs_dev);
|
||||
}
|
||||
|
||||
int lmfs_fs_block_size(void)
|
||||
{
|
||||
return fs_block_size;
|
||||
}
|
||||
|
||||
void lmfs_may_use_vmcache(int ok)
|
||||
{
|
||||
may_use_vmcache = ok;
|
||||
}
|
||||
|
||||
void lmfs_reset_rdwt_err(void)
|
||||
{
|
||||
rdwt_err = OK;
|
||||
}
|
||||
|
||||
int lmfs_rdwt_err(void)
|
||||
{
|
||||
return rdwt_err;
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Makefile for ext2 filesystem
|
||||
PROG= ext2
|
||||
SRCS= balloc.c cache.c link.c \
|
||||
SRCS= balloc.c link.c \
|
||||
mount.c misc.c open.c protect.c read.c \
|
||||
stadir.c table.c time.c utility.c \
|
||||
write.c ialloc.c inode.c main.c path.c \
|
||||
|
|
|
@ -210,7 +210,7 @@ struct inode *rip; /* used for preallocation */
|
|||
/* we preallocate bytes only */
|
||||
ASSERT(EXT2_PREALLOC_BLOCKS == sizeof(char)*CHAR_BIT);
|
||||
|
||||
bit = setbyte(bp->b_bitmap, sp->s_blocks_per_group);
|
||||
bit = setbyte(b_bitmap(bp), sp->s_blocks_per_group);
|
||||
if (bit != -1) {
|
||||
block = bit + sp->s_first_data_block +
|
||||
group * sp->s_blocks_per_group;
|
||||
|
@ -227,17 +227,17 @@ struct inode *rip; /* used for preallocation */
|
|||
rip->i_prealloc_index = 0;
|
||||
rip->i_prealloc_count = EXT2_PREALLOC_BLOCKS - 1;
|
||||
|
||||
bp->b_dirt = DIRTY; /* by setbyte */
|
||||
lmfs_markdirty(bp);
|
||||
put_block(bp, MAP_BLOCK);
|
||||
|
||||
gd->free_blocks_count -= EXT2_PREALLOC_BLOCKS;
|
||||
sp->s_free_blocks_count -= EXT2_PREALLOC_BLOCKS;
|
||||
group_descriptors_dirty = DIRTY;
|
||||
group_descriptors_dirty = 1;
|
||||
return block;
|
||||
}
|
||||
}
|
||||
|
||||
bit = setbit(bp->b_bitmap, sp->s_blocks_per_group, word);
|
||||
bit = setbit(b_bitmap(bp), sp->s_blocks_per_group, word);
|
||||
if (bit == -1) {
|
||||
if (word == 0) {
|
||||
panic("ext2: allocator failed to allocate a bit in bitmap\
|
||||
|
@ -251,12 +251,12 @@ struct inode *rip; /* used for preallocation */
|
|||
block = sp->s_first_data_block + group * sp->s_blocks_per_group + bit;
|
||||
check_block_number(block, sp, gd);
|
||||
|
||||
bp->b_dirt = DIRTY; /* Now it's safe to mark it as dirty */
|
||||
lmfs_markdirty(bp);
|
||||
put_block(bp, MAP_BLOCK);
|
||||
|
||||
gd->free_blocks_count--;
|
||||
sp->s_free_blocks_count--;
|
||||
group_descriptors_dirty = DIRTY;
|
||||
group_descriptors_dirty = 1;
|
||||
|
||||
if (update_bsearch && block != -1 && block != NO_BLOCK) {
|
||||
/* We searched from the beginning, update bsearch. */
|
||||
|
@ -313,16 +313,16 @@ void free_block(struct super_block *sp, bit_t bit_returned)
|
|||
|
||||
bp = get_block(sp->s_dev, gd->block_bitmap, NORMAL);
|
||||
|
||||
if (unsetbit(bp->b_bitmap, bit))
|
||||
if (unsetbit(b_bitmap(bp), bit))
|
||||
panic("Tried to free unused block", bit_returned);
|
||||
|
||||
bp->b_dirt = DIRTY;
|
||||
lmfs_markdirty(bp);
|
||||
put_block(bp, MAP_BLOCK);
|
||||
|
||||
gd->free_blocks_count++;
|
||||
sp->s_free_blocks_count++;
|
||||
|
||||
group_descriptors_dirty = DIRTY;
|
||||
group_descriptors_dirty = 1;
|
||||
|
||||
if (bit_returned < sp->s_bsearch)
|
||||
sp->s_bsearch = bit_returned;
|
||||
|
|
|
@ -27,26 +27,8 @@ union fsdata_u {
|
|||
/* A block is free if b_dev == NO_DEV. */
|
||||
|
||||
/* These defs make it possible to use to bp->b_data instead of bp->b.b__data */
|
||||
#define b_data bp->b__data
|
||||
#define b_ind bp->b__ind
|
||||
#define b_ino bp->b__ino
|
||||
#define b_bitmap bp->b__bitmap
|
||||
|
||||
#define BUFHASH(b) ((b) % nr_bufs)
|
||||
|
||||
EXTERN struct buf *front; /* points to least recently used free block */
|
||||
EXTERN struct buf *rear; /* points to most recently used free block */
|
||||
EXTERN unsigned int bufs_in_use; /* # bufs currently in use (not on free list)*/
|
||||
|
||||
/* When a block is released, the type of usage is passed to put_block(). */
|
||||
#define WRITE_IMMED 0100 /* block should be written to disk now */
|
||||
#define ONE_SHOT 0200 /* set if block not likely to be needed soon */
|
||||
|
||||
#define INODE_BLOCK 0 /* inode block */
|
||||
#define DIRECTORY_BLOCK 1 /* directory block */
|
||||
#define INDIRECT_BLOCK 2 /* pointer block */
|
||||
#define MAP_BLOCK 3 /* bit map */
|
||||
#define FULL_DATA_BLOCK 5 /* data, fully used */
|
||||
#define PARTIAL_DATA_BLOCK 6 /* data, partly used*/
|
||||
#define b_data(bp) ((union fsdata_u *) bp->data)->b__data
|
||||
#define b_ind(bp) ((union fsdata_u *) bp->data)->b__ind
|
||||
#define b_bitmap(bp) ((union fsdata_u *) bp->data)->b__bitmap
|
||||
|
||||
#endif /* EXT2_BUF_H */
|
||||
|
|
|
@ -1,604 +0,0 @@
|
|||
/* The file system maintains a buffer cache to reduce the number of disk
|
||||
* accesses needed. Whenever a read or write to the disk is done, a check is
|
||||
* first made to see if the block is in the cache. This file manages the
|
||||
* cache.
|
||||
*
|
||||
* The entry points into this file are:
|
||||
* get_block: request to fetch a block for reading or writing from cache
|
||||
* put_block: return a block previously requested with get_block
|
||||
* invalidate: remove all the cache blocks on some device
|
||||
*
|
||||
* Private functions:
|
||||
* rw_block: read or write a block from the disk itself
|
||||
*
|
||||
* Created (MFS based):
|
||||
* February 2010 (Evgeniy Ivanov)
|
||||
*/
|
||||
|
||||
#include "fs.h"
|
||||
#include <minix/u64.h>
|
||||
#include <minix/bdev.h>
|
||||
#include <minix/libminixfs.h>
|
||||
#include <stdlib.h>
|
||||
#include <assert.h>
|
||||
#include "buf.h"
|
||||
#include "super.h"
|
||||
#include "inode.h"
|
||||
|
||||
static void rm_lru(struct buf *bp);
|
||||
static void rw_block(struct buf *, int);
|
||||
|
||||
int vmcache = 0; /* are we using vm's secondary cache? (initially not) */
|
||||
|
||||
static block_t super_start = 0, super_end = 0;
|
||||
|
||||
/*===========================================================================*
|
||||
* get_block *
|
||||
*===========================================================================*/
|
||||
struct buf *get_block(
|
||||
register dev_t dev, /* on which device is the block? */
|
||||
register block_t block, /* which block is wanted? */
|
||||
int only_search /* if NO_READ, don't read, else act normal */
|
||||
)
|
||||
{
|
||||
/* Check to see if the requested block is in the block cache. If so, return
|
||||
* a pointer to it. If not, evict some other block and fetch it (unless
|
||||
* 'only_search' is 1). All the blocks in the cache that are not in use
|
||||
* are linked together in a chain, with 'front' pointing to the least recently
|
||||
* used block and 'rear' to the most recently used block. If 'only_search' is
|
||||
* 1, the block being requested will be overwritten in its entirety, so it is
|
||||
* only necessary to see if it is in the cache; if it is not, any free buffer
|
||||
* will do. It is not necessary to actually read the block in from disk.
|
||||
* If 'only_search' is PREFETCH, the block need not be read from the disk,
|
||||
* and the device is not to be marked on the block, so callers can tell if
|
||||
* the block returned is valid.
|
||||
* In addition to the LRU chain, there is also a hash chain to link together
|
||||
* blocks whose block numbers end with the same bit strings, for fast lookup.
|
||||
*/
|
||||
|
||||
int b;
|
||||
static struct buf *bp, *prev_ptr;
|
||||
u64_t yieldid = VM_BLOCKID_NONE, getid = make64(dev, block);
|
||||
|
||||
assert(buf_hash);
|
||||
assert(buf);
|
||||
assert(nr_bufs > 0);
|
||||
|
||||
ASSERT(fs_block_size > 0);
|
||||
|
||||
/* Search the hash chain for (dev, block). Do_read() can use
|
||||
* get_block(NO_DEV ...) to get an unnamed block to fill with zeros when
|
||||
* someone wants to read from a hole in a file, in which case this search
|
||||
* is skipped
|
||||
*/
|
||||
if (dev != NO_DEV) {
|
||||
b = BUFHASH(block);
|
||||
bp = buf_hash[b];
|
||||
while (bp != NULL) {
|
||||
if (bp->b_blocknr == block && bp->b_dev == dev) {
|
||||
/* Block needed has been found. */
|
||||
if (bp->b_count == 0) rm_lru(bp);
|
||||
bp->b_count++; /* record that block is in use */
|
||||
ASSERT(bp->b_bytes == fs_block_size);
|
||||
ASSERT(bp->b_dev == dev);
|
||||
ASSERT(bp->b_dev != NO_DEV);
|
||||
ASSERT(bp->bp);
|
||||
return(bp);
|
||||
} else {
|
||||
/* This block is not the one sought. */
|
||||
bp = bp->b_hash; /* move to next block on hash chain */
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Desired block is not on available chain. Take oldest block ('front'). */
|
||||
if ((bp = front) == NULL) panic("all buffers in use", nr_bufs);
|
||||
|
||||
if(bp->b_bytes < fs_block_size) {
|
||||
ASSERT(!bp->bp);
|
||||
ASSERT(bp->b_bytes == 0);
|
||||
if(!(bp->bp = alloc_contig( (size_t) fs_block_size, 0, NULL))) {
|
||||
ext2_debug("ext2: couldn't allocate a new block.\n");
|
||||
for(bp = front;
|
||||
bp && bp->b_bytes < fs_block_size; bp = bp->b_next)
|
||||
;
|
||||
if(!bp) {
|
||||
panic("no buffer available");
|
||||
}
|
||||
} else {
|
||||
bp->b_bytes = fs_block_size;
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT(bp);
|
||||
ASSERT(bp->bp);
|
||||
ASSERT(bp->b_bytes == fs_block_size);
|
||||
ASSERT(bp->b_count == 0);
|
||||
|
||||
rm_lru(bp);
|
||||
|
||||
/* Remove the block that was just taken from its hash chain. */
|
||||
b = BUFHASH(bp->b_blocknr);
|
||||
prev_ptr = buf_hash[b];
|
||||
if (prev_ptr == bp) {
|
||||
buf_hash[b] = bp->b_hash;
|
||||
} else {
|
||||
/* The block just taken is not on the front of its hash chain. */
|
||||
while (prev_ptr->b_hash != NULL)
|
||||
if (prev_ptr->b_hash == bp) {
|
||||
prev_ptr->b_hash = bp->b_hash; /* found it */
|
||||
break;
|
||||
} else {
|
||||
prev_ptr = prev_ptr->b_hash; /* keep looking */
|
||||
}
|
||||
}
|
||||
|
||||
/* If the block taken is dirty, make it clean by writing it to the disk.
|
||||
* Avoid hysteresis by flushing all other dirty blocks for the same device.
|
||||
*/
|
||||
if (bp->b_dev != NO_DEV) {
|
||||
if (bp->b_dirt == DIRTY) flushall(bp->b_dev);
|
||||
|
||||
/* Are we throwing out a block that contained something?
|
||||
* Give it to VM for the second-layer cache.
|
||||
*/
|
||||
yieldid = make64(bp->b_dev, bp->b_blocknr);
|
||||
assert(bp->b_bytes == fs_block_size);
|
||||
bp->b_dev = NO_DEV;
|
||||
}
|
||||
|
||||
/* Fill in block's parameters and add it to the hash chain where it goes. */
|
||||
bp->b_dev = dev; /* fill in device number */
|
||||
bp->b_blocknr = block; /* fill in block number */
|
||||
bp->b_count++; /* record that block is being used */
|
||||
b = BUFHASH(bp->b_blocknr);
|
||||
bp->b_hash = buf_hash[b];
|
||||
|
||||
buf_hash[b] = bp; /* add to hash list */
|
||||
|
||||
if(dev == NO_DEV) {
|
||||
if(vmcache && cmp64(yieldid, VM_BLOCKID_NONE) != 0) {
|
||||
vm_yield_block_get_block(yieldid, VM_BLOCKID_NONE,
|
||||
bp->bp, fs_block_size);
|
||||
}
|
||||
return(bp); /* If the caller wanted a NO_DEV block, work is done. */
|
||||
}
|
||||
|
||||
/* Go get the requested block unless searching or prefetching. */
|
||||
if(only_search == PREFETCH || only_search == NORMAL) {
|
||||
/* Block is not found in our cache, but we do want it
|
||||
* if it's in the vm cache.
|
||||
*/
|
||||
if(vmcache) {
|
||||
/* If we can satisfy the PREFETCH or NORMAL request
|
||||
* from the vm cache, work is done.
|
||||
*/
|
||||
if(vm_yield_block_get_block(yieldid, getid,
|
||||
bp->bp, fs_block_size) == OK) {
|
||||
return bp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if(only_search == PREFETCH) {
|
||||
/* PREFETCH: don't do i/o. */
|
||||
bp->b_dev = NO_DEV;
|
||||
} else if (only_search == NORMAL) {
|
||||
rw_block(bp, READING);
|
||||
} else if(only_search == NO_READ) {
|
||||
/* we want this block, but its contents
|
||||
* will be overwritten. VM has to forget
|
||||
* about it.
|
||||
*/
|
||||
if(vmcache) {
|
||||
vm_forgetblock(getid);
|
||||
}
|
||||
} else
|
||||
panic("unexpected only_search value: %d", only_search);
|
||||
|
||||
assert(bp->bp);
|
||||
|
||||
return(bp); /* return the newly acquired block */
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* put_block *
|
||||
*===========================================================================*/
|
||||
void put_block(
|
||||
register struct buf *bp, /* pointer to the buffer to be released */
|
||||
int block_type /* INODE_BLOCK, DIRECTORY_BLOCK, or whatever */
|
||||
)
|
||||
{
|
||||
/* Return a block to the list of available blocks. Depending on 'block_type'
|
||||
* it may be put on the front or rear of the LRU chain. Blocks that are
|
||||
* expected to be needed again shortly (e.g., partially full data blocks)
|
||||
* go on the rear; blocks that are unlikely to be needed again shortly
|
||||
* (e.g., full data blocks) go on the front. Blocks whose loss can hurt
|
||||
* the integrity of the file system (e.g., inode blocks) are written to
|
||||
* disk immediately if they are dirty.
|
||||
*/
|
||||
if (bp == NULL) return; /* it is easier to check here than in caller */
|
||||
|
||||
bp->b_count--; /* there is one use fewer now */
|
||||
if (bp->b_count != 0) return; /* block is still in use */
|
||||
|
||||
bufs_in_use--; /* one fewer block buffers in use */
|
||||
|
||||
/* Put this block back on the LRU chain. If the ONE_SHOT bit is set in
|
||||
* 'block_type', the block is not likely to be needed again shortly, so put
|
||||
* it on the front of the LRU chain where it will be the first one to be
|
||||
* taken when a free buffer is needed later.
|
||||
*/
|
||||
if (bp->b_dev == DEV_RAM || (block_type & ONE_SHOT)) {
|
||||
/* Block probably won't be needed quickly. Put it on front of chain.
|
||||
* It will be the next block to be evicted from the cache.
|
||||
*/
|
||||
bp->b_prev = NULL;
|
||||
bp->b_next = front;
|
||||
if (front == NULL)
|
||||
rear = bp; /* LRU chain was empty */
|
||||
else
|
||||
front->b_prev = bp;
|
||||
front = bp;
|
||||
}
|
||||
else {
|
||||
/* Block probably will be needed quickly. Put it on rear of chain.
|
||||
* It will not be evicted from the cache for a long time.
|
||||
*/
|
||||
bp->b_prev = rear;
|
||||
bp->b_next = NULL;
|
||||
if (rear == NULL)
|
||||
front = bp;
|
||||
else
|
||||
rear->b_next = bp;
|
||||
rear = bp;
|
||||
}
|
||||
|
||||
/* Some blocks are so important (e.g., inodes, indirect blocks) that they
|
||||
* should be written to the disk immediately to avoid messing up the file
|
||||
* system in the event of a crash.
|
||||
*/
|
||||
if ((block_type & WRITE_IMMED) && bp->b_dirt==DIRTY && bp->b_dev != NO_DEV) {
|
||||
rw_block(bp, WRITING);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*===========================================================================*
|
||||
* rw_block *
|
||||
*===========================================================================*/
|
||||
static void rw_block(
|
||||
register struct buf *bp, /* buffer pointer */
|
||||
int rw_flag /* READING or WRITING */
|
||||
)
|
||||
{
|
||||
/* Read or write a disk block. This is the only routine in which actual disk
|
||||
* I/O is invoked. If an error occurs, a message is printed here, but the error
|
||||
* is not reported to the caller. If the error occurred while purging a block
|
||||
* from the cache, it is not clear what the caller could do about it anyway.
|
||||
*/
|
||||
int r, op_failed = 0;
|
||||
u64_t pos;
|
||||
dev_t dev;
|
||||
|
||||
if ( (dev = bp->b_dev) != NO_DEV) {
|
||||
pos = mul64u(bp->b_blocknr, fs_block_size);
|
||||
if (rw_flag == READING)
|
||||
r = bdev_read(dev, pos, bp->b_data, fs_block_size,
|
||||
BDEV_NOFLAGS);
|
||||
else
|
||||
r = bdev_write(dev, pos, bp->b_data, fs_block_size,
|
||||
BDEV_NOFLAGS);
|
||||
if (r < 0) {
|
||||
printf("Ext2(%d) I/O error on device %d/%d, block %u\n",
|
||||
SELF_E, major(dev), minor(dev), bp->b_blocknr);
|
||||
op_failed = 1;
|
||||
} else if (r != (ssize_t) fs_block_size) {
|
||||
r = END_OF_FILE;
|
||||
op_failed = 1;
|
||||
}
|
||||
|
||||
if (op_failed) {
|
||||
bp->b_dev = NO_DEV; /* invalidate block */
|
||||
|
||||
/* Report read errors to interested parties. */
|
||||
if (rw_flag == READING) rdwt_err = r;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
bp->b_dirt = CLEAN;
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* invalidate *
|
||||
*===========================================================================*/
|
||||
void invalidate(
|
||||
dev_t device /* device whose blocks are to be purged */
|
||||
)
|
||||
{
|
||||
/* Remove all the blocks belonging to some device from the cache. */
|
||||
|
||||
register struct buf *bp;
|
||||
|
||||
for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++)
|
||||
if (bp->b_dev == device) bp->b_dev = NO_DEV;
|
||||
|
||||
vm_forgetblocks();
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* flushall *
|
||||
*===========================================================================*/
|
||||
void flushall(
|
||||
dev_t dev /* device to flush */
|
||||
)
|
||||
{
|
||||
/* Flush all dirty blocks for one device. */
|
||||
|
||||
register struct buf *bp;
|
||||
static struct buf **dirty = NULL; /* static so it isn't on stack */
|
||||
static int unsigned dirtylistsize = 0;
|
||||
int ndirty;
|
||||
|
||||
if(dirtylistsize != nr_bufs) {
|
||||
if(dirtylistsize > 0) {
|
||||
assert(dirty != NULL);
|
||||
free(dirty);
|
||||
}
|
||||
if(!(dirty = malloc(sizeof(dirty[0])*nr_bufs)))
|
||||
panic("couldn't allocate dirty buf list");
|
||||
dirtylistsize = nr_bufs;
|
||||
}
|
||||
|
||||
for (bp = &buf[0], ndirty = 0; bp < &buf[nr_bufs]; bp++)
|
||||
if (bp->b_dirt == DIRTY && bp->b_dev == dev) dirty[ndirty++] = bp;
|
||||
rw_scattered(dev, dirty, ndirty, WRITING);
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* rw_scattered *
|
||||
*===========================================================================*/
|
||||
void rw_scattered(
|
||||
dev_t dev, /* major-minor device number */
|
||||
struct buf **bufq, /* pointer to array of buffers */
|
||||
int bufqsize, /* number of buffers */
|
||||
int rw_flag /* READING or WRITING */
|
||||
)
|
||||
{
|
||||
/* Read or write scattered data from a device. */
|
||||
|
||||
register struct buf *bp;
|
||||
int gap;
|
||||
register int i;
|
||||
register iovec_t *iop;
|
||||
static iovec_t *iovec = NULL;
|
||||
u64_t pos;
|
||||
int j, r;
|
||||
|
||||
STATICINIT(iovec, NR_IOREQS);
|
||||
assert(bufq != NULL);
|
||||
|
||||
/* (Shell) sort buffers on b_blocknr. */
|
||||
gap = 1;
|
||||
do
|
||||
gap = 3 * gap + 1;
|
||||
while (gap <= bufqsize);
|
||||
while (gap != 1) {
|
||||
gap /= 3;
|
||||
for (j = gap; j < bufqsize; j++) {
|
||||
for (i = j - gap;
|
||||
i >= 0 && bufq[i]->b_blocknr > bufq[i + gap]->b_blocknr;
|
||||
i -= gap) {
|
||||
bp = bufq[i];
|
||||
bufq[i] = bufq[i + gap];
|
||||
bufq[i + gap] = bp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Set up I/O vector and do I/O. The result of dev_io is OK if everything
|
||||
* went fine, otherwise the error code for the first failed transfer.
|
||||
*/
|
||||
while (bufqsize > 0) {
|
||||
for (j = 0, iop = iovec; j < NR_IOREQS && j < bufqsize; j++, iop++) {
|
||||
bp = bufq[j];
|
||||
if (bp->b_blocknr != (block_t) bufq[0]->b_blocknr + j) break;
|
||||
iop->iov_addr = (vir_bytes) bp->b_data;
|
||||
iop->iov_size = (vir_bytes) fs_block_size;
|
||||
}
|
||||
pos = mul64u(bufq[0]->b_blocknr, fs_block_size);
|
||||
if (rw_flag == READING)
|
||||
r = bdev_gather(dev, pos, iovec, j, BDEV_NOFLAGS);
|
||||
else
|
||||
r = bdev_scatter(dev, pos, iovec, j, BDEV_NOFLAGS);
|
||||
|
||||
/* Harvest the results. The driver may have returned an error, or it
|
||||
* may have done less than what we asked for.
|
||||
*/
|
||||
if (r < 0) {
|
||||
printf("ext2: I/O error %d on device %d/%d, block %u\n",
|
||||
r, major(dev), minor(dev), bufq[0]->b_blocknr);
|
||||
}
|
||||
for (i = 0; i < j; i++) {
|
||||
bp = bufq[i];
|
||||
if (r < (ssize_t) fs_block_size) {
|
||||
/* Transfer failed. */
|
||||
if (i == 0) {
|
||||
bp->b_dev = NO_DEV; /* invalidate block */
|
||||
vm_forgetblocks();
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (rw_flag == READING) {
|
||||
bp->b_dev = dev; /* validate block */
|
||||
put_block(bp, PARTIAL_DATA_BLOCK);
|
||||
} else {
|
||||
bp->b_dirt = CLEAN;
|
||||
}
|
||||
r -= fs_block_size;
|
||||
}
|
||||
bufq += i;
|
||||
bufqsize -= i;
|
||||
if (rw_flag == READING) {
|
||||
/* Don't bother reading more than the device is willing to
|
||||
* give at this time. Don't forget to release those extras.
|
||||
*/
|
||||
while (bufqsize > 0) {
|
||||
put_block(*bufq++, PARTIAL_DATA_BLOCK);
|
||||
bufqsize--;
|
||||
}
|
||||
}
|
||||
if (rw_flag == WRITING && i == 0) {
|
||||
/* We're not making progress, this means we might keep
|
||||
* looping. Buffers remain dirty if un-written. Buffers are
|
||||
* lost if invalidate()d or LRU-removed while dirty. This
|
||||
* is better than keeping unwritable blocks around forever..
|
||||
*/
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* rm_lru *
|
||||
*===========================================================================*/
|
||||
static void rm_lru(
|
||||
struct buf *bp
|
||||
)
|
||||
{
|
||||
/* Remove a block from its LRU chain. */
|
||||
struct buf *next_ptr, *prev_ptr;
|
||||
|
||||
bufs_in_use++;
|
||||
next_ptr = bp->b_next; /* successor on LRU chain */
|
||||
prev_ptr = bp->b_prev; /* predecessor on LRU chain */
|
||||
if (prev_ptr != NULL)
|
||||
prev_ptr->b_next = next_ptr;
|
||||
else
|
||||
front = next_ptr; /* this block was at front of chain */
|
||||
|
||||
if (next_ptr != NULL)
|
||||
next_ptr->b_prev = prev_ptr;
|
||||
else
|
||||
rear = prev_ptr; /* this block was at rear of chain */
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* cache_resize *
|
||||
*===========================================================================*/
|
||||
static void cache_resize(unsigned int blocksize, unsigned int bufs)
|
||||
{
|
||||
struct buf *bp;
|
||||
struct inode *rip;
|
||||
|
||||
#define MINBUFS 10
|
||||
assert(blocksize > 0);
|
||||
assert(bufs >= MINBUFS);
|
||||
|
||||
for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++)
|
||||
if(bp->b_count != 0) panic("change blocksize with buffer in use");
|
||||
|
||||
for (rip = &inode[0]; rip < &inode[NR_INODES]; rip++)
|
||||
if (rip->i_count > 0) panic("change blocksize with inode in use");
|
||||
|
||||
buf_pool(bufs);
|
||||
|
||||
fs_block_size = blocksize;
|
||||
super_start = SUPER_BLOCK_BYTES / fs_block_size;
|
||||
super_end = (SUPER_BLOCK_BYTES + _MIN_BLOCK_SIZE - 1) / fs_block_size;
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* bufs_heuristic *
|
||||
*===========================================================================*/
|
||||
static int bufs_heuristic(struct super_block *sp)
|
||||
{
|
||||
u32_t btotal, bfree;
|
||||
|
||||
btotal = sp->s_blocks_count;
|
||||
bfree = sp->s_free_blocks_count;
|
||||
return fs_bufs_heuristic(MINBUFS, btotal, bfree,
|
||||
sp->s_block_size, major(sp->s_dev));
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* set_blocksize *
|
||||
*===========================================================================*/
|
||||
void set_blocksize(struct super_block *sp)
|
||||
{
|
||||
int bufs;
|
||||
|
||||
cache_resize(sp->s_block_size, MINBUFS);
|
||||
bufs = bufs_heuristic(sp);
|
||||
cache_resize(sp->s_block_size, bufs);
|
||||
|
||||
/* Decide whether to use seconday cache or not.
|
||||
* Only do this if
|
||||
* - it's available, and
|
||||
* - use of it hasn't been disabled for this fs, and
|
||||
* - our main FS device isn't a memory device
|
||||
*/
|
||||
|
||||
vmcache = 0;
|
||||
if(vm_forgetblock(VM_BLOCKID_NONE) != ENOSYS &&
|
||||
may_use_vmcache && major(sp->s_dev) != MEMORY_MAJOR) {
|
||||
vmcache = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* buf_pool *
|
||||
*===========================================================================*/
|
||||
void buf_pool(int new_nr_bufs)
|
||||
{
|
||||
/* Initialize the buffer pool. */
|
||||
register struct buf *bp;
|
||||
|
||||
assert(new_nr_bufs >= MINBUFS);
|
||||
|
||||
if(nr_bufs > 0) {
|
||||
assert(buf);
|
||||
(void) fs_sync();
|
||||
for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) {
|
||||
if(bp->bp) {
|
||||
assert(bp->b_bytes > 0);
|
||||
free_contig(bp->bp, bp->b_bytes);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if(buf)
|
||||
free(buf);
|
||||
|
||||
if(!(buf = calloc(sizeof(buf[0]), new_nr_bufs)))
|
||||
panic("couldn't allocate buf list (%d)", new_nr_bufs);
|
||||
|
||||
if(buf_hash)
|
||||
free(buf_hash);
|
||||
if(!(buf_hash = calloc(sizeof(buf_hash[0]), new_nr_bufs)))
|
||||
panic("couldn't allocate buf hash list (%d)", new_nr_bufs);
|
||||
|
||||
nr_bufs = new_nr_bufs;
|
||||
|
||||
bufs_in_use = 0;
|
||||
front = &buf[0];
|
||||
rear = &buf[nr_bufs - 1];
|
||||
|
||||
for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) {
|
||||
bp->b_blocknr = NO_BLOCK;
|
||||
bp->b_dev = NO_DEV;
|
||||
bp->b_next = bp + 1;
|
||||
bp->b_prev = bp - 1;
|
||||
bp->bp = NULL;
|
||||
bp->b_bytes = 0;
|
||||
}
|
||||
front->b_prev = NULL;
|
||||
rear->b_next = NULL;
|
||||
|
||||
for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) bp->b_hash = bp->b_next;
|
||||
buf_hash[0] = front;
|
||||
|
||||
vm_forgetblocks();
|
||||
}
|
||||
|
|
@ -43,8 +43,8 @@
|
|||
#define IGN_PERM 0
|
||||
#define CHK_PERM 1
|
||||
|
||||
#define CLEAN 0 /* disk and memory copies identical */
|
||||
#define DIRTY 1 /* disk and memory copies differ */
|
||||
#define IN_CLEAN 0 /* inode disk and memory copies identical */
|
||||
#define IN_DIRTY 1 /* inode disk and memory copies differ */
|
||||
#define ATIME 002 /* set if atime field needs updating */
|
||||
#define CTIME 004 /* set if ctime field needs updating */
|
||||
#define MTIME 010 /* set if mtime field needs updating */
|
||||
|
|
|
@ -42,14 +42,6 @@ EXTERN char fs_dev_label[16]; /* Name of the device driver that is handled
|
|||
EXTERN int unmountdone;
|
||||
EXTERN int exitsignaled;
|
||||
|
||||
/* our block size. */
|
||||
EXTERN unsigned int fs_block_size;
|
||||
|
||||
/* Buffer cache. */
|
||||
EXTERN struct buf *buf;
|
||||
EXTERN struct buf **buf_hash; /* the buffer hash table */
|
||||
EXTERN unsigned int nr_bufs;
|
||||
EXTERN int may_use_vmcache;
|
||||
/* Little hack for syncing group descriptors. */
|
||||
EXTERN int group_descriptors_dirty;
|
||||
|
||||
|
|
|
@ -160,7 +160,7 @@ int is_dir; /* inode will be a directory if it is TRUE */
|
|||
ASSERT(gd->free_inodes_count);
|
||||
|
||||
bp = get_block(sp->s_dev, gd->inode_bitmap, NORMAL);
|
||||
bit = setbit(bp->b_bitmap, sp->s_inodes_per_group, 0);
|
||||
bit = setbit(b_bitmap(bp), sp->s_inodes_per_group, 0);
|
||||
ASSERT(bit != -1); /* group definitly contains free inode */
|
||||
|
||||
inumber = group * sp->s_inodes_per_group + bit + 1;
|
||||
|
@ -179,7 +179,7 @@ int is_dir; /* inode will be a directory if it is TRUE */
|
|||
panic("ext2: allocator tryed to use reserved inode.\n");
|
||||
}
|
||||
|
||||
bp->b_dirt = DIRTY;
|
||||
lmfs_markdirty(bp);
|
||||
put_block(bp, MAP_BLOCK);
|
||||
|
||||
gd->free_inodes_count--;
|
||||
|
@ -189,7 +189,7 @@ int is_dir; /* inode will be a directory if it is TRUE */
|
|||
sp->s_dirs_counter++;
|
||||
}
|
||||
|
||||
group_descriptors_dirty = DIRTY;
|
||||
group_descriptors_dirty = 1;
|
||||
|
||||
/* Almost the same as previous 'group' ASSERT */
|
||||
ASSERT(inumber != NO_BIT);
|
||||
|
@ -228,10 +228,10 @@ static void free_inode_bit(struct super_block *sp, bit_t bit_returned,
|
|||
|
||||
bp = get_block(sp->s_dev, gd->inode_bitmap, NORMAL);
|
||||
|
||||
if (unsetbit(bp->b_bitmap, bit))
|
||||
if (unsetbit(b_bitmap(bp), bit))
|
||||
panic("Tried to free unused inode", bit_returned);
|
||||
|
||||
bp->b_dirt = DIRTY;
|
||||
lmfs_markdirty(bp);
|
||||
put_block(bp, MAP_BLOCK);
|
||||
|
||||
gd->free_inodes_count++;
|
||||
|
@ -242,7 +242,7 @@ static void free_inode_bit(struct super_block *sp, bit_t bit_returned,
|
|||
sp->s_dirs_counter--;
|
||||
}
|
||||
|
||||
group_descriptors_dirty = DIRTY;
|
||||
group_descriptors_dirty = 1;
|
||||
|
||||
if (group < sp->s_igsearch)
|
||||
sp->s_igsearch = group;
|
||||
|
@ -471,5 +471,5 @@ static void wipe_inode(
|
|||
rip->i_block[i] = NO_BLOCK;
|
||||
rip->i_block[0] = NO_BLOCK;
|
||||
|
||||
rip->i_dirt = DIRTY;
|
||||
rip->i_dirt = IN_DIRTY;
|
||||
}
|
||||
|
|
|
@ -246,12 +246,12 @@ void put_inode(
|
|||
*/
|
||||
(void) truncate_inode(rip, (off_t) 0);
|
||||
/* free inode clears I_TYPE field, since it's used there */
|
||||
rip->i_dirt = DIRTY;
|
||||
rip->i_dirt = IN_DIRTY;
|
||||
free_inode(rip);
|
||||
}
|
||||
|
||||
rip->i_mountpoint = FALSE;
|
||||
if (rip->i_dirt == DIRTY) rw_inode(rip, WRITING);
|
||||
if (rip->i_dirt == IN_DIRTY) rw_inode(rip, WRITING);
|
||||
|
||||
discard_preallocated_blocks(rip); /* Return blocks to the filesystem */
|
||||
|
||||
|
@ -335,20 +335,20 @@ void rw_inode(
|
|||
bp = get_block(rip->i_dev, b, NORMAL);
|
||||
|
||||
offset &= (sp->s_block_size - 1);
|
||||
dip = (d_inode*) (bp->b_data + offset);
|
||||
dip = (d_inode*) (b_data(bp) + offset);
|
||||
|
||||
/* Do the read or write. */
|
||||
if (rw_flag == WRITING) {
|
||||
if (rip->i_update)
|
||||
update_times(rip); /* times need updating */
|
||||
if (sp->s_rd_only == FALSE)
|
||||
bp->b_dirt = DIRTY;
|
||||
lmfs_markdirty(bp);
|
||||
}
|
||||
|
||||
icopy(rip, dip, rw_flag, TRUE);
|
||||
|
||||
put_block(bp, INODE_BLOCK);
|
||||
rip->i_dirt = CLEAN;
|
||||
rip->i_dirt = IN_CLEAN;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -103,7 +103,7 @@ int fs_link()
|
|||
if(r == OK) {
|
||||
rip->i_links_count++;
|
||||
rip->i_update |= CTIME;
|
||||
rip->i_dirt = DIRTY;
|
||||
rip->i_dirt = IN_DIRTY;
|
||||
}
|
||||
|
||||
/* Done. Release both inodes. */
|
||||
|
@ -201,7 +201,7 @@ int fs_rdlink()
|
|||
} else {
|
||||
bp = get_block(rip->i_dev, b, NORMAL);
|
||||
if (bp != NULL) {
|
||||
link_text = bp->b_data;
|
||||
link_text = b_data(bp);
|
||||
r = OK;
|
||||
} else {
|
||||
r = EIO;
|
||||
|
@ -294,7 +294,7 @@ char file_name[NAME_MAX + 1]; /* name of file to be removed */
|
|||
if (r == OK) {
|
||||
rip->i_links_count--; /* entry deleted from parent's dir */
|
||||
rip->i_update |= CTIME;
|
||||
rip->i_dirt = DIRTY;
|
||||
rip->i_dirt = IN_DIRTY;
|
||||
}
|
||||
|
||||
put_inode(rip);
|
||||
|
@ -502,7 +502,7 @@ int fs_rename()
|
|||
if(search_dir(old_ip, dot2, &numb, ENTER, IGN_PERM, I_DIRECTORY) == OK) {
|
||||
/* New link created. */
|
||||
new_dirp->i_links_count++;
|
||||
new_dirp->i_dirt = DIRTY;
|
||||
new_dirp->i_dirt = IN_DIRTY;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -577,7 +577,7 @@ off_t newsize; /* inode must become this size */
|
|||
/* Next correct the inode size. */
|
||||
rip->i_size = newsize;
|
||||
rip->i_update |= CTIME | MTIME;
|
||||
rip->i_dirt = DIRTY;
|
||||
rip->i_dirt = IN_DIRTY;
|
||||
|
||||
return(OK);
|
||||
}
|
||||
|
@ -650,7 +650,7 @@ off_t start, end; /* range of bytes to free (end uninclusive) */
|
|||
}
|
||||
|
||||
rip->i_update |= CTIME | MTIME;
|
||||
rip->i_dirt = DIRTY;
|
||||
rip->i_dirt = IN_DIRTY;
|
||||
|
||||
return(OK);
|
||||
}
|
||||
|
@ -726,7 +726,7 @@ off_t len;
|
|||
offset = pos % rip->i_sp->s_block_size;
|
||||
if (offset + len > rip->i_sp->s_block_size)
|
||||
panic("zeroblock_range: len too long", len);
|
||||
memset(bp->b_data + offset, 0, len);
|
||||
bp->b_dirt = DIRTY;
|
||||
memset(b_data(bp) + offset, 0, len);
|
||||
lmfs_markdirty(bp);
|
||||
put_block(bp, FULL_DATA_BLOCK);
|
||||
}
|
||||
|
|
|
@ -142,7 +142,7 @@ static int sef_cb_init_fresh(int UNUSED(type), sef_init_info_t *UNUSED(info))
|
|||
if (!strcmp(env_argv[i], "-o"))
|
||||
optset_parse(optset_table, env_argv[++i]);
|
||||
|
||||
may_use_vmcache = 1;
|
||||
lmfs_may_use_vmcache(1);
|
||||
|
||||
/* Init inode table */
|
||||
for (i = 0; i < NR_INODES; ++i) {
|
||||
|
@ -155,8 +155,7 @@ static int sef_cb_init_fresh(int UNUSED(type), sef_init_info_t *UNUSED(info))
|
|||
SELF_E = getprocnr();
|
||||
|
||||
/* just a small number before we find out the block size at mount time */
|
||||
buf_pool(10);
|
||||
fs_block_size = _MIN_BLOCK_SIZE;
|
||||
lmfs_buf_pool(10);
|
||||
|
||||
return(OK);
|
||||
}
|
||||
|
|
|
@ -20,22 +20,17 @@ int fs_sync()
|
|||
* the block cache.
|
||||
*/
|
||||
struct inode *rip;
|
||||
struct buf *bp;
|
||||
|
||||
assert(nr_bufs > 0);
|
||||
assert(buf);
|
||||
assert(lmfs_nr_bufs() > 0);
|
||||
|
||||
if (superblock->s_rd_only)
|
||||
return(OK); /* nothing to sync */
|
||||
|
||||
/* Write all the dirty inodes to the disk. */
|
||||
for(rip = &inode[0]; rip < &inode[NR_INODES]; rip++)
|
||||
if(rip->i_count > 0 && rip->i_dirt == DIRTY) rw_inode(rip, WRITING);
|
||||
if(rip->i_count > 0 && rip->i_dirt == IN_DIRTY) rw_inode(rip, WRITING);
|
||||
|
||||
/* Write all the dirty blocks to the disk, one drive at a time. */
|
||||
for(bp = &buf[0]; bp < &buf[nr_bufs]; bp++)
|
||||
if(bp->b_dev != NO_DEV && bp->b_dirt == DIRTY)
|
||||
flushall(bp->b_dev);
|
||||
lmfs_flushall();
|
||||
|
||||
if (superblock->s_dev != NO_DEV) {
|
||||
superblock->s_wtime = clock_time();
|
||||
|
@ -58,8 +53,8 @@ int fs_flush()
|
|||
|
||||
if(dev == fs_dev) return(EBUSY);
|
||||
|
||||
flushall(dev);
|
||||
invalidate(dev);
|
||||
lmfs_flushall();
|
||||
lmfs_invalidate(dev);
|
||||
|
||||
return(OK);
|
||||
}
|
||||
|
|
|
@ -114,7 +114,7 @@ int fs_readsuper()
|
|||
}
|
||||
|
||||
|
||||
set_blocksize(superblock);
|
||||
lmfs_set_blocksize(superblock->s_block_size, major(fs_dev));
|
||||
|
||||
/* Get the root inode of the mounted file system. */
|
||||
if ( (root_ip = get_inode(fs_dev, ROOT_INODE)) == NULL) {
|
||||
|
|
|
@ -166,7 +166,7 @@ int fs_mkdir()
|
|||
/* Normal case. It was possible to enter . and .. in the new dir. */
|
||||
rip->i_links_count++; /* this accounts for . */
|
||||
ldirp->i_links_count++; /* this accounts for .. */
|
||||
ldirp->i_dirt = DIRTY; /* mark parent's inode as dirty */
|
||||
ldirp->i_dirt = IN_DIRTY; /* mark parent's inode as dirty */
|
||||
} else {
|
||||
/* It was not possible to enter . or .. probably disk was full -
|
||||
* links counts haven't been touched. */
|
||||
|
@ -174,7 +174,7 @@ int fs_mkdir()
|
|||
panic("Dir disappeared ", rip->i_num);
|
||||
rip->i_links_count--; /* undo the increment done in new_node() */
|
||||
}
|
||||
rip->i_dirt = DIRTY; /* either way, i_links_count has changed */
|
||||
rip->i_dirt = IN_DIRTY; /* either way, i_links_count has changed */
|
||||
|
||||
put_inode(ldirp); /* return the inode of the parent dir */
|
||||
put_inode(rip); /* return the inode of the newly made dir */
|
||||
|
@ -227,16 +227,16 @@ int fs_slink()
|
|||
(cp_grant_id_t) fs_m_in.REQ_GRANT3,
|
||||
(vir_bytes) 0, (vir_bytes) sip->i_block,
|
||||
(vir_bytes) fs_m_in.REQ_MEM_SIZE);
|
||||
sip->i_dirt = DIRTY;
|
||||
sip->i_dirt = IN_DIRTY;
|
||||
link_target_buf = (char*) sip->i_block;
|
||||
} else {
|
||||
if ((bp = new_block(sip, (off_t) 0)) != NULL) {
|
||||
sys_safecopyfrom(VFS_PROC_NR,
|
||||
(cp_grant_id_t) fs_m_in.REQ_GRANT3,
|
||||
(vir_bytes) 0, (vir_bytes) bp->b_data,
|
||||
(vir_bytes) 0, (vir_bytes) b_data(bp),
|
||||
(vir_bytes) fs_m_in.REQ_MEM_SIZE);
|
||||
bp->b_dirt = DIRTY;
|
||||
link_target_buf = bp->b_data;
|
||||
lmfs_markdirty(bp);
|
||||
link_target_buf = b_data(bp);
|
||||
} else {
|
||||
r = err_code;
|
||||
}
|
||||
|
@ -326,7 +326,7 @@ static struct inode *new_node(struct inode *ldirp,
|
|||
if ((r=search_dir(ldirp, string, &rip->i_num, ENTER, IGN_PERM,
|
||||
rip->i_mode & I_TYPE)) != OK) {
|
||||
rip->i_links_count--; /* pity, have to free disk inode */
|
||||
rip->i_dirt = DIRTY; /* dirty inodes are written out */
|
||||
rip->i_dirt = IN_DIRTY; /* dirty inodes are written out */
|
||||
put_inode(rip); /* this call frees the inode */
|
||||
err_code = r;
|
||||
return(NULL);
|
||||
|
|
|
@ -305,7 +305,7 @@ char *suffix; /* current remaining path. Has to point in the
|
|||
if ((blink = read_map(rip, (off_t) 0)) == NO_BLOCK)
|
||||
return(EIO);
|
||||
bp = get_block(rip->i_dev, blink, NORMAL);
|
||||
sp = bp->b_data;
|
||||
sp = b_data(bp);
|
||||
} else {
|
||||
/* fast symlink, stored in inode */
|
||||
sp = (const char*) rip->i_block;
|
||||
|
@ -561,8 +561,8 @@ int ftype; /* used when ENTER and
|
|||
/* Search a directory block.
|
||||
* Note, we set prev_dp at the end of the loop.
|
||||
*/
|
||||
for (dp = (struct ext2_disk_dir_desc*) &bp->b_data;
|
||||
CUR_DISC_DIR_POS(dp, &bp->b_data) < ldir_ptr->i_sp->s_block_size;
|
||||
for (dp = (struct ext2_disk_dir_desc*) &b_data(bp);
|
||||
CUR_DISC_DIR_POS(dp, &b_data(bp)) < ldir_ptr->i_sp->s_block_size;
|
||||
dp = NEXT_DISC_DIR_DESC(dp) ) {
|
||||
/* Match occurs if string found. */
|
||||
if (flag != ENTER && dp->d_ino != NO_ENTRY) {
|
||||
|
@ -588,7 +588,7 @@ int ftype; /* used when ENTER and
|
|||
*((ino_t *) &dp->d_name[t]) = dp->d_ino;
|
||||
}
|
||||
dp->d_ino = NO_ENTRY; /* erase entry */
|
||||
bp->b_dirt = DIRTY;
|
||||
lmfs_markdirty(bp);
|
||||
|
||||
/* If we don't support HTree (directory index),
|
||||
* which is fully compatible ext2 feature,
|
||||
|
@ -609,7 +609,7 @@ int ftype; /* used when ENTER and
|
|||
conv2(le_CPU, dp->d_rec_len);
|
||||
}
|
||||
ldir_ptr->i_update |= CTIME | MTIME;
|
||||
ldir_ptr->i_dirt = DIRTY;
|
||||
ldir_ptr->i_dirt = IN_DIRTY;
|
||||
/* Now we have cleared dentry, if it's not
|
||||
* the first one, merge it with previous one.
|
||||
* Since we assume, that existing dentry must be
|
||||
|
@ -652,7 +652,7 @@ int ftype; /* used when ENTER and
|
|||
dp->d_rec_len = conv2(le_CPU, new_slot_size);
|
||||
/* if we fail before writing real ino */
|
||||
dp->d_ino = NO_ENTRY;
|
||||
bp->b_dirt = DIRTY;
|
||||
lmfs_markdirty(bp);
|
||||
e_hit = TRUE; /* we found a free slot */
|
||||
break;
|
||||
}
|
||||
|
@ -683,7 +683,7 @@ int ftype; /* used when ENTER and
|
|||
new_slots++; /* increase directory size by 1 entry */
|
||||
if ( (bp = new_block(ldir_ptr, ldir_ptr->i_size)) == NULL)
|
||||
return(err_code);
|
||||
dp = (struct ext2_disk_dir_desc*) &bp->b_data;
|
||||
dp = (struct ext2_disk_dir_desc*) &b_data(bp);
|
||||
dp->d_rec_len = conv2(le_CPU, ldir_ptr->i_sp->s_block_size);
|
||||
dp->d_name_len = DIR_ENTRY_MAX_NAME_LEN(dp); /* for failure */
|
||||
extended = 1;
|
||||
|
@ -711,10 +711,10 @@ int ftype; /* used when ENTER and
|
|||
else
|
||||
dp->d_file_type = EXT2_FT_UNKNOWN;
|
||||
}
|
||||
bp->b_dirt = DIRTY;
|
||||
lmfs_markdirty(bp);
|
||||
put_block(bp, DIRECTORY_BLOCK);
|
||||
ldir_ptr->i_update |= CTIME | MTIME; /* mark mtime for update later */
|
||||
ldir_ptr->i_dirt = DIRTY;
|
||||
ldir_ptr->i_dirt = IN_DIRTY;
|
||||
|
||||
if (new_slots == 1) {
|
||||
ldir_ptr->i_size += (off_t) conv2(le_CPU, dp->d_rec_len);
|
||||
|
|
|
@ -29,7 +29,7 @@ int fs_chmod()
|
|||
/* Now make the change. Clear setgid bit if file is not in caller's grp */
|
||||
rip->i_mode = (rip->i_mode & ~ALL_MODES) | (mode & ALL_MODES);
|
||||
rip->i_update |= CTIME;
|
||||
rip->i_dirt = DIRTY;
|
||||
rip->i_dirt = IN_DIRTY;
|
||||
|
||||
/* Return full new mode to caller. */
|
||||
fs_m_out.RES_MODE = rip->i_mode;
|
||||
|
@ -58,7 +58,7 @@ int fs_chown()
|
|||
rip->i_gid = fs_m_in.REQ_GID;
|
||||
rip->i_mode &= ~(I_SET_UID_BIT | I_SET_GID_BIT);
|
||||
rip->i_update |= CTIME;
|
||||
rip->i_dirt = DIRTY;
|
||||
rip->i_dirt = IN_DIRTY;
|
||||
}
|
||||
|
||||
/* Update caller on current mode, as it may have changed. */
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
#ifndef EXT2_PROTO_H
|
||||
#define EXT2_PROTO_H
|
||||
|
||||
#define get_block(d, n, t) lmfs_get_block(d, n, t)
|
||||
#define put_block(n, t) lmfs_put_block(n, t)
|
||||
|
||||
/* Function prototypes. */
|
||||
|
||||
/* Structs used in prototypes must be declared as such first. */
|
||||
|
@ -15,16 +18,6 @@ void discard_preallocated_blocks(struct inode *rip);
|
|||
block_t alloc_block(struct inode *rip, block_t goal);
|
||||
void free_block(struct super_block *sp, bit_t bit);
|
||||
|
||||
/* cache.c */
|
||||
void buf_pool(int bufs);
|
||||
void flushall(dev_t dev);
|
||||
struct buf *get_block(dev_t dev, block_t block,int only_search);
|
||||
void invalidate(dev_t device);
|
||||
void put_block(struct buf *bp, int block_type);
|
||||
void set_blocksize(struct super_block *sp);
|
||||
void rw_scattered(dev_t dev, struct buf **bufq, int bufqsize, int
|
||||
rw_flag);
|
||||
|
||||
/* ialloc.c */
|
||||
struct inode *alloc_inode(struct inode *parent, mode_t bits);
|
||||
void free_inode(struct inode *rip);
|
||||
|
|
|
@ -125,7 +125,7 @@ int fs_readwrite(void)
|
|||
if (r == OK) {
|
||||
if (rw_flag == READING) rip->i_update |= ATIME;
|
||||
if (rw_flag == WRITING) rip->i_update |= CTIME | MTIME;
|
||||
rip->i_dirt = DIRTY; /* inode is thus now dirty */
|
||||
rip->i_dirt = IN_DIRTY; /* inode is thus now dirty */
|
||||
}
|
||||
|
||||
fs_m_out.RES_NBYTES = cum_io;
|
||||
|
@ -270,12 +270,12 @@ int *completed; /* number of bytes copied */
|
|||
if (rw_flag == READING) {
|
||||
/* Copy a chunk from the block buffer to user space. */
|
||||
r = sys_safecopyto(VFS_PROC_NR, gid, (vir_bytes) buf_off,
|
||||
(vir_bytes) (bp->b_data+off), (size_t) chunk);
|
||||
(vir_bytes) (b_data(bp)+off), (size_t) chunk);
|
||||
} else {
|
||||
/* Copy a chunk from user space to the block buffer. */
|
||||
r = sys_safecopyfrom(VFS_PROC_NR, gid, (vir_bytes) buf_off,
|
||||
(vir_bytes) (bp->b_data+off), (size_t) chunk);
|
||||
bp->b_dirt = DIRTY;
|
||||
(vir_bytes) (b_data(bp)+off), (size_t) chunk);
|
||||
lmfs_markdirty(bp);
|
||||
}
|
||||
|
||||
n = (off + chunk == block_size ? FULL_DATA_BLOCK : PARTIAL_DATA_BLOCK);
|
||||
|
@ -338,8 +338,8 @@ off_t position; /* position in file whose blk wanted */
|
|||
b = rip->i_block[EXT2_TIND_BLOCK];
|
||||
if (b == NO_BLOCK) return(NO_BLOCK);
|
||||
bp = get_block(rip->i_dev, b, NORMAL); /* get triple ind block */
|
||||
ASSERT(bp->b_dev != NO_DEV);
|
||||
ASSERT(bp->b_dev == rip->i_dev);
|
||||
ASSERT(lmfs_dev(bp) != NO_DEV);
|
||||
ASSERT(lmfs_dev(bp) == rip->i_dev);
|
||||
excess = block_pos - triple_ind_s;
|
||||
index = excess / addr_in_block2;
|
||||
b = rd_indir(bp, index); /* num of double ind block */
|
||||
|
@ -348,8 +348,8 @@ off_t position; /* position in file whose blk wanted */
|
|||
}
|
||||
if (b == NO_BLOCK) return(NO_BLOCK);
|
||||
bp = get_block(rip->i_dev, b, NORMAL); /* get double indirect block */
|
||||
ASSERT(bp->b_dev != NO_DEV);
|
||||
ASSERT(bp->b_dev == rip->i_dev);
|
||||
ASSERT(lmfs_dev(bp) != NO_DEV);
|
||||
ASSERT(lmfs_dev(bp) == rip->i_dev);
|
||||
index = excess / addr_in_block;
|
||||
b = rd_indir(bp, index); /* num of single ind block */
|
||||
put_block(bp, INDIRECT_BLOCK); /* release double ind block */
|
||||
|
@ -357,8 +357,8 @@ off_t position; /* position in file whose blk wanted */
|
|||
}
|
||||
if (b == NO_BLOCK) return(NO_BLOCK);
|
||||
bp = get_block(rip->i_dev, b, NORMAL);
|
||||
ASSERT(bp->b_dev != NO_DEV);
|
||||
ASSERT(bp->b_dev == rip->i_dev);
|
||||
ASSERT(lmfs_dev(bp) != NO_DEV);
|
||||
ASSERT(lmfs_dev(bp) == rip->i_dev);
|
||||
b = rd_indir(bp, index);
|
||||
put_block(bp, INDIRECT_BLOCK); /* release single ind block */
|
||||
|
||||
|
@ -376,7 +376,7 @@ int index; /* index into *bp */
|
|||
if (bp == NULL)
|
||||
panic("rd_indir() on NULL");
|
||||
/* TODO: use conv call */
|
||||
return conv4(le_CPU, bp->b_ind[index]);
|
||||
return conv4(le_CPU, b_ind(bp)[index]);
|
||||
}
|
||||
|
||||
|
||||
|
@ -424,6 +424,7 @@ unsigned bytes_ahead; /* bytes beyond position for immediate use */
|
|||
*/
|
||||
/* Minimum number of blocks to prefetch. */
|
||||
# define BLOCKS_MINIMUM (nr_bufs < 50 ? 18 : 32)
|
||||
int nr_bufs = lmfs_nr_bufs();
|
||||
int block_spec, read_q_size;
|
||||
unsigned int blocks_ahead, fragment, block_size;
|
||||
block_t block, blocks_left;
|
||||
|
@ -460,7 +461,7 @@ unsigned bytes_ahead; /* bytes beyond position for immediate use */
|
|||
block = baseblock;
|
||||
bp = get_block(dev, block, PREFETCH);
|
||||
assert(bp != NULL);
|
||||
if (bp->b_dev != NO_DEV) return(bp);
|
||||
if (lmfs_dev(bp) != NO_DEV) return(bp);
|
||||
|
||||
/* The best guess for the number of blocks to prefetch: A lot.
|
||||
* It is impossible to tell what the device looks like, so we don't even
|
||||
|
@ -523,18 +524,18 @@ unsigned bytes_ahead; /* bytes beyond position for immediate use */
|
|||
if (--blocks_ahead == 0) break;
|
||||
|
||||
/* Don't trash the cache, leave 4 free. */
|
||||
if (bufs_in_use >= nr_bufs - 4) break;
|
||||
if (lmfs_bufs_in_use() >= nr_bufs - 4) break;
|
||||
|
||||
block++;
|
||||
|
||||
bp = get_block(dev, block, PREFETCH);
|
||||
if (bp->b_dev != NO_DEV) {
|
||||
if (lmfs_dev(bp) != NO_DEV) {
|
||||
/* Oops, block already in the cache, get out. */
|
||||
put_block(bp, FULL_DATA_BLOCK);
|
||||
break;
|
||||
}
|
||||
}
|
||||
rw_scattered(dev, read_q, read_q_size, READING);
|
||||
lmfs_rw_scattered(dev, read_q, read_q_size, READING);
|
||||
return(get_block(dev, baseblock, NORMAL));
|
||||
}
|
||||
|
||||
|
@ -592,18 +593,18 @@ int fs_getdents(void)
|
|||
assert(bp != NULL);
|
||||
|
||||
/* Search a directory block. */
|
||||
d_desc = (struct ext2_disk_dir_desc*) &bp->b_data;
|
||||
d_desc = (struct ext2_disk_dir_desc*) &b_data(bp);
|
||||
|
||||
/* we need to seek to entry at off bytes.
|
||||
* when NEXT_DISC_DIR_POS == block_size it's last dentry.
|
||||
*/
|
||||
for (; temp_pos + conv2(le_CPU, d_desc->d_rec_len) <= pos
|
||||
&& NEXT_DISC_DIR_POS(d_desc, &bp->b_data) < block_size;
|
||||
&& NEXT_DISC_DIR_POS(d_desc, &b_data(bp)) < block_size;
|
||||
d_desc = NEXT_DISC_DIR_DESC(d_desc)) {
|
||||
temp_pos += conv2(le_CPU, d_desc->d_rec_len);
|
||||
}
|
||||
|
||||
for (; CUR_DISC_DIR_POS(d_desc, &bp->b_data) < block_size;
|
||||
for (; CUR_DISC_DIR_POS(d_desc, &b_data(bp)) < block_size;
|
||||
d_desc = NEXT_DISC_DIR_DESC(d_desc)) {
|
||||
if (d_desc->d_ino == 0)
|
||||
continue; /* Entry is not in use */
|
||||
|
@ -622,7 +623,7 @@ int fs_getdents(void)
|
|||
reclen += sizeof(long) - o;
|
||||
|
||||
/* Need the position of this entry in the directory */
|
||||
ent_pos = block_pos + ((char *)d_desc - bp->b_data);
|
||||
ent_pos = block_pos + ((char *)d_desc - b_data(bp));
|
||||
|
||||
if (userbuf_off + tmpbuf_off + reclen >= size) {
|
||||
/* The user has no space for one more record */
|
||||
|
@ -680,7 +681,7 @@ int fs_getdents(void)
|
|||
fs_m_out.RES_NBYTES = userbuf_off;
|
||||
fs_m_out.RES_SEEK_POS_LO = new_pos;
|
||||
rip->i_update |= ATIME;
|
||||
rip->i_dirt = DIRTY;
|
||||
rip->i_dirt = IN_DIRTY;
|
||||
r = OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -128,3 +128,16 @@ int fs_statvfs()
|
|||
|
||||
return(r);
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* blockstats *
|
||||
*===========================================================================*/
|
||||
void fs_blockstats(u32_t *blocks, u32_t *free, u32_t *used)
|
||||
{
|
||||
struct super_block *sp = get_super(fs_dev);
|
||||
|
||||
*blocks = sp->s_blocks_count;
|
||||
*free = sp->s_free_blocks_count;
|
||||
*used = *blocks - *free;
|
||||
}
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ unsigned int get_block_size(dev_t dev)
|
|||
{
|
||||
if (dev == NO_DEV)
|
||||
panic("request for block size of NO_DEV");
|
||||
return(fs_block_size);
|
||||
return(lmfs_fs_block_size());
|
||||
}
|
||||
|
||||
static struct group_desc *ondisk_group_descs;
|
||||
|
@ -235,7 +235,7 @@ struct super_block *sp; /* pointer to a superblock */
|
|||
if (r != SUPER_SIZE_D)
|
||||
printf("ext2: Warning, failed to write superblock to the disk!\n");
|
||||
|
||||
if (group_descriptors_dirty == DIRTY) {
|
||||
if (group_descriptors_dirty) {
|
||||
/* Locate the appropriate super_block. */
|
||||
gd_size = sp->s_gdb_count * sp->s_block_size;
|
||||
|
||||
|
@ -253,7 +253,7 @@ struct super_block *sp; /* pointer to a superblock */
|
|||
if (r != (ssize_t) gd_size) {
|
||||
printf("Can not write group descriptors\n");
|
||||
}
|
||||
group_descriptors_dirty = CLEAN;
|
||||
group_descriptors_dirty = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ int fs_utime()
|
|||
rip->i_atime = fs_m_in.REQ_ACTIME;
|
||||
rip->i_mtime = fs_m_in.REQ_MODTIME;
|
||||
rip->i_update = CTIME; /* discard any stale ATIME and MTIME flags */
|
||||
rip->i_dirt = DIRTY;
|
||||
rip->i_dirt = IN_DIRTY;
|
||||
}
|
||||
|
||||
put_inode(rip);
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
#ifndef EXT2_TYPE_H
|
||||
#define EXT2_TYPE_H
|
||||
|
||||
#include <minix/libminixfs.h>
|
||||
|
||||
/* On the disk all attributes are stored in little endian format.
|
||||
* Inode structure was taken from linux/include/linux/ext2_fs.h.
|
||||
*/
|
||||
|
@ -81,23 +83,6 @@ struct ext2_disk_dir_desc {
|
|||
/* Return next dentry's position in block */
|
||||
#define NEXT_DISC_DIR_POS(cur_desc, base) (cur_desc->d_rec_len +\
|
||||
CUR_DISC_DIR_POS(cur_desc, base))
|
||||
|
||||
struct buf {
|
||||
/* Data portion of the buffer. */
|
||||
union fsdata_u *bp;
|
||||
|
||||
/* Header portion of the buffer. */
|
||||
struct buf *b_next; /* used to link all free bufs in a chain */
|
||||
struct buf *b_prev; /* used to link all free bufs the other way */
|
||||
struct buf *b_hash; /* used to link bufs on hash chains */
|
||||
block_t b_blocknr; /* block number of its (minor) device */
|
||||
dev_t b_dev; /* major | minor device where block resides */
|
||||
char b_dirt; /* CLEAN or DIRTY */
|
||||
char b_count; /* number of users of this buffer */
|
||||
unsigned int b_bytes; /* Number of bytes allocated in bp */
|
||||
};
|
||||
|
||||
|
||||
/* Structure with options affecting global behavior. */
|
||||
struct opt {
|
||||
int use_orlov; /* Bool: Use Orlov allocator */
|
||||
|
|
|
@ -107,9 +107,9 @@ void sanitycheck(char *file, int line)
|
|||
MYASSERT(SELF_E > 0);
|
||||
if(superblock->s_dev != NO_DEV) {
|
||||
MYASSERT(superblock->s_dev == fs_dev);
|
||||
MYASSERT(superblock->s_block_size == fs_block_size);
|
||||
MYASSERT(superblock->s_block_size == lmfs_fs_block_size());
|
||||
} else {
|
||||
MYASSERT(_MIN_BLOCK_SIZE == fs_block_size);
|
||||
MYASSERT(_MIN_BLOCK_SIZE == lmfs_fs_block_size());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -62,7 +62,7 @@ int op; /* special actions */
|
|||
}
|
||||
|
||||
block_pos = position / rip->i_sp->s_block_size; /* relative blk # in file */
|
||||
rip->i_dirt = DIRTY; /* inode will be changed */
|
||||
rip->i_dirt = IN_DIRTY; /* inode will be changed */
|
||||
|
||||
/* Is 'position' to be found in the inode itself? */
|
||||
if (block_pos < EXT2_NDIR_BLOCKS) {
|
||||
|
@ -115,7 +115,7 @@ int op; /* special actions */
|
|||
bp_tindir = get_block(rip->i_dev, b3, (new_triple ? NO_READ : NORMAL));
|
||||
if (new_triple) {
|
||||
zero_block(bp_tindir);
|
||||
bp_tindir->b_dirt = DIRTY;
|
||||
lmfs_markdirty(bp_tindir);
|
||||
}
|
||||
excess = block_pos - triple_ind_s;
|
||||
index3 = excess / addr_in_block2;
|
||||
|
@ -135,7 +135,7 @@ int op; /* special actions */
|
|||
}
|
||||
if (triple) {
|
||||
wr_indir(bp_tindir, index3, b2); /* update triple indir */
|
||||
bp_tindir->b_dirt = DIRTY;
|
||||
lmfs_markdirty(bp_tindir);
|
||||
} else {
|
||||
rip->i_block[EXT2_DIND_BLOCK] = b2;
|
||||
}
|
||||
|
@ -156,7 +156,7 @@ int op; /* special actions */
|
|||
bp_dindir = get_block(rip->i_dev, b2, (new_dbl ? NO_READ : NORMAL));
|
||||
if (new_dbl) {
|
||||
zero_block(bp_dindir);
|
||||
bp_dindir->b_dirt = DIRTY;
|
||||
lmfs_markdirty(bp_dindir);
|
||||
}
|
||||
index2 = excess / addr_in_block;
|
||||
b1 = rd_indir(bp_dindir, index2);
|
||||
|
@ -181,7 +181,7 @@ int op; /* special actions */
|
|||
rip->i_block[EXT2_NDIR_BLOCKS] = b1; /* update inode single indirect */
|
||||
} else {
|
||||
wr_indir(bp_dindir, index2, b1); /* update dbl indir */
|
||||
bp_dindir->b_dirt = DIRTY;
|
||||
lmfs_markdirty(bp_dindir);
|
||||
}
|
||||
rip->i_blocks += rip->i_sp->s_sectors_in_block;
|
||||
new_ind = TRUE;
|
||||
|
@ -216,7 +216,7 @@ int op; /* special actions */
|
|||
rip->i_block[EXT2_NDIR_BLOCKS] = b1;
|
||||
} else {
|
||||
wr_indir(bp_dindir, index2, b1);
|
||||
bp_dindir->b_dirt = DIRTY;
|
||||
lmfs_markdirty(bp_dindir);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -224,7 +224,10 @@ int op; /* special actions */
|
|||
rip->i_blocks += rip->i_sp->s_sectors_in_block;
|
||||
}
|
||||
/* b1 equals NO_BLOCK only when we are freeing up the indirect block. */
|
||||
bp->b_dirt = (b1 == NO_BLOCK) ? CLEAN : DIRTY;;
|
||||
if(b1 == NO_BLOCK)
|
||||
lmfs_markclean(bp);
|
||||
else
|
||||
lmfs_markdirty(bp);
|
||||
put_block(bp, INDIRECT_BLOCK);
|
||||
}
|
||||
|
||||
|
@ -234,13 +237,13 @@ int op; /* special actions */
|
|||
*/
|
||||
if (b1 == NO_BLOCK && !single && b2 != NO_BLOCK &&
|
||||
empty_indir(bp_dindir, rip->i_sp)) {
|
||||
bp_dindir->b_dirt = CLEAN;
|
||||
lmfs_markclean(bp_dindir);
|
||||
free_block(rip->i_sp, b2);
|
||||
rip->i_blocks -= rip->i_sp->s_sectors_in_block;
|
||||
b2 = NO_BLOCK;
|
||||
if (triple) {
|
||||
wr_indir(bp_tindir, index3, b2); /* update triple indir */
|
||||
bp_tindir->b_dirt = DIRTY;
|
||||
lmfs_markdirty(bp_tindir);
|
||||
} else {
|
||||
rip->i_block[EXT2_DIND_BLOCK] = b2;
|
||||
}
|
||||
|
@ -251,7 +254,7 @@ int op; /* special actions */
|
|||
*/
|
||||
if (b2 == NO_BLOCK && triple && b3 != NO_BLOCK &&
|
||||
empty_indir(bp_tindir, rip->i_sp)) {
|
||||
bp_tindir->b_dirt = CLEAN;
|
||||
lmfs_markclean(bp_tindir);
|
||||
free_block(rip->i_sp, b3);
|
||||
rip->i_blocks -= rip->i_sp->s_sectors_in_block;
|
||||
rip->i_block[EXT2_TIND_BLOCK] = NO_BLOCK;
|
||||
|
@ -278,7 +281,7 @@ block_t block; /* block to write */
|
|||
panic("wr_indir() on NULL");
|
||||
|
||||
/* write a block into an indirect block */
|
||||
bp->b_ind[index] = conv4(le_CPU, block);
|
||||
b_ind(bp)[index] = conv4(le_CPU, block);
|
||||
}
|
||||
|
||||
|
||||
|
@ -295,7 +298,7 @@ struct super_block *sb; /* superblock of device block resides on */
|
|||
long addr_in_block = sb->s_block_size/4; /* 4 bytes per addr */
|
||||
int i;
|
||||
for(i = 0; i < addr_in_block; i++)
|
||||
if(bp->b_ind[i] != NO_BLOCK)
|
||||
if(b_ind(bp)[i] != NO_BLOCK)
|
||||
return(0);
|
||||
return(1);
|
||||
}
|
||||
|
@ -368,8 +371,8 @@ void zero_block(bp)
|
|||
register struct buf *bp; /* pointer to buffer to zero */
|
||||
{
|
||||
/* Zero a block. */
|
||||
ASSERT(bp->b_bytes > 0);
|
||||
ASSERT(bp->bp);
|
||||
memset(bp->b_data, 0, (size_t) bp->b_bytes);
|
||||
bp->b_dirt = DIRTY;
|
||||
ASSERT(lmfs_bytes(bp) > 0);
|
||||
ASSERT(bp->data);
|
||||
memset(b_data(bp), 0, (size_t) lmfs_bytes(bp));
|
||||
lmfs_markdirty(bp);
|
||||
}
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
# Makefile for ISO9660 fs
|
||||
PROG= isofs
|
||||
SRCS= main.c table.c mount.c super.c inode.c \
|
||||
utility.c misc.c path.c read.c stadir.c cache.c
|
||||
utility.c misc.c path.c read.c stadir.c
|
||||
|
||||
DPADD+= ${LIBBDEV} ${LIBSYS}
|
||||
LDADD+= -lbdev -lsys -lc
|
||||
LDADD+= -lbdev -lsys -lc -lminixfs
|
||||
|
||||
MAN=
|
||||
|
||||
|
|
|
@ -1,10 +1,6 @@
|
|||
#include <dirent.h>
|
||||
|
||||
struct buf {
|
||||
char b_data[_MAX_BLOCK_SIZE]; /* ordinary user data */
|
||||
block_t b_blocknr; /* block number of its (minor) device */
|
||||
char b_count; /* number of users of this buffer */
|
||||
} buf[NR_BUFS];
|
||||
#define b_data(bp) ((char *) (bp->data))
|
||||
|
||||
/* A block is free if b_dev == NO_DEV. */
|
||||
|
||||
|
|
|
@ -1,114 +0,0 @@
|
|||
/* The file system maintains a buffer cache to reduce the number of disk
|
||||
* accesses needed. Whenever a read or write to the disk is done, a check is
|
||||
* first made to see if the block is in the cache. This file manages the
|
||||
* cache.
|
||||
*
|
||||
* The entry points into this file are:
|
||||
* get_block: request to fetch a block for reading or writing from cache
|
||||
* put_block: return a block previously requested with get_block
|
||||
*
|
||||
* Private functions:
|
||||
* read_block: read physically the block
|
||||
*/
|
||||
|
||||
#include "inc.h"
|
||||
#include <minix/com.h>
|
||||
#include <minix/u64.h>
|
||||
#include <minix/bdev.h>
|
||||
#include "buf.h"
|
||||
|
||||
static int read_block(struct buf *);
|
||||
|
||||
struct buf *bp_to_pickup = buf; /* This is a pointer to the next node in the
|
||||
* buffer cache to pick up*/
|
||||
|
||||
|
||||
/*===========================================================================*
|
||||
* get_block *
|
||||
*===========================================================================*/
|
||||
struct buf *get_block(block)
|
||||
register block_t block; /* which block is wanted? */
|
||||
{
|
||||
register struct buf *bp, *free_bp;
|
||||
|
||||
free_bp = NULL;
|
||||
|
||||
/* Find if the block is already loaded */
|
||||
for (bp = &buf[0]; bp < &buf[NR_BUFS]; bp++)
|
||||
if (bp->b_blocknr == block) {
|
||||
/* Block found. Increment count and return it */
|
||||
bp->b_count++;
|
||||
return bp;
|
||||
} else
|
||||
if (bp == bp_to_pickup) {
|
||||
if (bp->b_count == 0)
|
||||
free_bp = bp;
|
||||
else /* Increment the node to pickup */
|
||||
if (bp_to_pickup < &buf[NR_BUFS] - 1)
|
||||
bp_to_pickup++;
|
||||
else
|
||||
bp_to_pickup = buf;
|
||||
}
|
||||
|
||||
if (free_bp == NULL &&
|
||||
bp_to_pickup == buf &&
|
||||
bp_to_pickup->b_count == 0)
|
||||
free_bp = bp_to_pickup;
|
||||
|
||||
if (free_bp != NULL) {
|
||||
/* Set fields of data structure */
|
||||
free_bp->b_blocknr = block;
|
||||
if (read_block(free_bp) != OK) return NULL;
|
||||
free_bp->b_count = 1;
|
||||
|
||||
if (bp_to_pickup < &buf[NR_BUFS] - 1)
|
||||
bp_to_pickup++;
|
||||
else
|
||||
bp_to_pickup = buf;
|
||||
|
||||
return free_bp;
|
||||
} else {
|
||||
/* No free blocks. Return NULL */
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* put_block *
|
||||
*===========================================================================*/
|
||||
void put_block(bp)
|
||||
register struct buf *bp; /* pointer to the buffer to be released */
|
||||
{
|
||||
if (bp == NULL) return; /* it is easier to check here than in caller */
|
||||
|
||||
bp->b_count--; /* there is one use fewer now */
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* read_block *
|
||||
*===========================================================================*/
|
||||
static int read_block(bp)
|
||||
register struct buf *bp; /* buffer pointer */
|
||||
{
|
||||
int r;
|
||||
u64_t pos;
|
||||
vir_bytes block_size;
|
||||
|
||||
block_size = v_pri.logical_block_size_l; /* The block size is indicated by
|
||||
* the superblock */
|
||||
|
||||
|
||||
pos = mul64u(bp->b_blocknr, block_size); /* get absolute position */
|
||||
r = bdev_read(fs_dev, pos, bp->b_data, block_size, BDEV_NOFLAGS);
|
||||
if (r != (ssize_t) block_size) {
|
||||
if (r == OK) r = END_OF_FILE;
|
||||
else printf("ISOFS(%d) I/O error on device %d/%d, block %u\n",
|
||||
SELF_E, (fs_dev>>MAJOR)&BYTE, (fs_dev>>MINOR)&BYTE,
|
||||
bp->b_blocknr);
|
||||
|
||||
rdwt_err = r;
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
return OK;
|
||||
}
|
|
@ -244,7 +244,7 @@ u32_t address;
|
|||
return(NULL);
|
||||
|
||||
/* Fill the dir record with the data read from the device */
|
||||
create_dir_record(dir,bp->b_data + offset, address);
|
||||
create_dir_record(dir,b_data(bp) + offset, address);
|
||||
|
||||
/* In case the file is composed of more file sections, load also the
|
||||
* next section into the structure */
|
||||
|
@ -253,7 +253,7 @@ u32_t address;
|
|||
new_address = address + dir->length;
|
||||
while (new_pos < block_size) {
|
||||
dir_next = get_free_dir_record();
|
||||
create_dir_record(dir_next, bp->b_data + new_pos, new_address);
|
||||
create_dir_record(dir_next, b_data(bp) + new_pos, new_address);
|
||||
|
||||
if (dir_next->length > 0) {
|
||||
strncpy(name,dir_next->file_id,dir_next->length_file_id);
|
||||
|
|
|
@ -125,7 +125,7 @@ int search_dir(ldir_ptr,string,numb)
|
|||
return EINVAL;
|
||||
}
|
||||
|
||||
if (create_dir_record(dir_tmp,bp->b_data + pos,
|
||||
if (create_dir_record(dir_tmp,b_data(bp) + pos,
|
||||
ldir_ptr->loc_extent_l*v_pri.logical_block_size_l + pos) == EINVAL)
|
||||
return EINVAL;
|
||||
|
||||
|
@ -160,7 +160,7 @@ int search_dir(ldir_ptr,string,numb)
|
|||
|
||||
if (dir_tmp->ext_attr_rec_length != 0) {
|
||||
dir_tmp->ext_attr = get_free_ext_attr();
|
||||
create_ext_attr(dir_tmp->ext_attr,bp->b_data);
|
||||
create_ext_attr(dir_tmp->ext_attr,b_data(bp));
|
||||
}
|
||||
|
||||
*numb = ID_DIR_RECORD(dir_tmp);
|
||||
|
|
|
@ -4,15 +4,15 @@ struct dir_record;
|
|||
struct ext_attr_rec;
|
||||
struct iso9660_vd_pri;
|
||||
|
||||
#include <minix/libminixfs.h>
|
||||
|
||||
#define get_block(n) lmfs_get_block(fs_dev, n, NORMAL)
|
||||
#define put_block(n) lmfs_put_block(n, FULL_DATA_BLOCK)
|
||||
|
||||
/* main.c */
|
||||
int main(void);
|
||||
void reply(int who, message *m_out);
|
||||
|
||||
/* cache.c */
|
||||
struct buf *get_block(block_t block);
|
||||
void put_block(struct buf *bp);
|
||||
|
||||
/* inode.c */
|
||||
int create_dir_record(struct dir_record *dir, char *buffer, u32_t
|
||||
address);
|
||||
|
|
|
@ -181,7 +181,7 @@ int fs_getdents(void) {
|
|||
|
||||
while (block_pos < block_size) {
|
||||
dir_tmp = get_free_dir_record();
|
||||
create_dir_record(dir_tmp,bp->b_data + block_pos,
|
||||
create_dir_record(dir_tmp,b_data(bp) + block_pos,
|
||||
block*block_size + block_pos);
|
||||
if (dir_tmp->length == 0) { /* EOF. I exit and return 0s */
|
||||
block_pos = block_size;
|
||||
|
@ -241,7 +241,7 @@ int fs_getdents(void) {
|
|||
/* The standard data structure is created using the
|
||||
* data in the buffer. */
|
||||
dirp = (struct dirent *) &getdents_buf[tmpbuf_offset];
|
||||
dirp->d_ino = (ino_t)(bp->b_data + block_pos);
|
||||
dirp->d_ino = (ino_t)(b_data(bp) + block_pos);
|
||||
dirp->d_off= cur_pos;
|
||||
dirp->d_reclen= reclen;
|
||||
memcpy(dirp->d_name, name, len);
|
||||
|
@ -328,7 +328,7 @@ int *completed; /* number of bytes copied */
|
|||
}
|
||||
|
||||
r = sys_safecopyto(VFS_PROC_NR, gid, buf_off,
|
||||
(vir_bytes) (bp->b_data+off), (phys_bytes) chunk);
|
||||
(vir_bytes) (b_data(bp)+off), (phys_bytes) chunk);
|
||||
|
||||
put_block(bp);
|
||||
|
||||
|
|
|
@ -138,3 +138,13 @@ int fs_statvfs()
|
|||
|
||||
return(r);
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* blockstats *
|
||||
*===========================================================================*/
|
||||
void fs_blockstats(u32_t *blocks, u32_t *free, u32_t *used)
|
||||
{
|
||||
*used = *blocks = v_pri.volume_space_size_l;
|
||||
*free = 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -38,29 +38,13 @@ union fsdata_u {
|
|||
|
||||
|
||||
/* These defs make it possible to use to bp->b_data instead of bp->b.b__data */
|
||||
#define b_data bp->b__data
|
||||
#define b_dir bp->b__dir
|
||||
#define b_v1_ind bp->b__v1_ind
|
||||
#define b_v2_ind bp->b__v2_ind
|
||||
#define b_v1_ino bp->b__v1_ino
|
||||
#define b_v2_ino bp->b__v2_ino
|
||||
#define b_bitmap bp->b__bitmap
|
||||
|
||||
#define BUFHASH(b) ((b) % nr_bufs)
|
||||
|
||||
EXTERN struct buf *front; /* points to least recently used free block */
|
||||
EXTERN struct buf *rear; /* points to most recently used free block */
|
||||
EXTERN unsigned int bufs_in_use;/* # bufs currently in use (not on free list)*/
|
||||
|
||||
/* When a block is released, the type of usage is passed to put_block(). */
|
||||
#define ONE_SHOT 0200 /* set if block not likely to be needed soon */
|
||||
|
||||
#define INODE_BLOCK 0 /* inode block */
|
||||
#define DIRECTORY_BLOCK 1 /* directory block */
|
||||
#define INDIRECT_BLOCK 2 /* pointer block */
|
||||
#define MAP_BLOCK 3 /* bit map */
|
||||
#define FULL_DATA_BLOCK 5 /* data, fully used */
|
||||
#define PARTIAL_DATA_BLOCK 6 /* data, partly used*/
|
||||
#define b_data(b) ((union fsdata_u *) b->data)->b__data
|
||||
#define b_dir(b) ((union fsdata_u *) b->data)->b__dir
|
||||
#define b_v1_ind(b) ((union fsdata_u *) b->data)->b__v1_ind
|
||||
#define b_v2_ind(b) ((union fsdata_u *) b->data)->b__v2_ind
|
||||
#define b_v1_ino(b) ((union fsdata_u *) b->data)->b__v1_ino
|
||||
#define b_v2_ino(b) ((union fsdata_u *) b->data)->b__v2_ino
|
||||
#define b_bitmap(b) ((union fsdata_u *) b->data)->b__bitmap
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -26,236 +26,6 @@
|
|||
#include "super.h"
|
||||
#include "inode.h"
|
||||
|
||||
static void rm_lru(struct buf *bp);
|
||||
static void read_block(struct buf *);
|
||||
|
||||
static int vmcache = 0; /* are we using vm's secondary cache? (initially not) */
|
||||
|
||||
static block_t super_start = 0, super_end = 0;
|
||||
|
||||
/*===========================================================================*
|
||||
* get_block *
|
||||
*===========================================================================*/
|
||||
struct buf *get_block(
|
||||
register dev_t dev, /* on which device is the block? */
|
||||
register block_t block, /* which block is wanted? */
|
||||
int only_search /* if NO_READ, don't read, else act normal */
|
||||
)
|
||||
{
|
||||
/* Check to see if the requested block is in the block cache. If so, return
|
||||
* a pointer to it. If not, evict some other block and fetch it (unless
|
||||
* 'only_search' is 1). All the blocks in the cache that are not in use
|
||||
* are linked together in a chain, with 'front' pointing to the least recently
|
||||
* used block and 'rear' to the most recently used block. If 'only_search' is
|
||||
* 1, the block being requested will be overwritten in its entirety, so it is
|
||||
* only necessary to see if it is in the cache; if it is not, any free buffer
|
||||
* will do. It is not necessary to actually read the block in from disk.
|
||||
* If 'only_search' is PREFETCH, the block need not be read from the disk,
|
||||
* and the device is not to be marked on the block, so callers can tell if
|
||||
* the block returned is valid.
|
||||
* In addition to the LRU chain, there is also a hash chain to link together
|
||||
* blocks whose block numbers end with the same bit strings, for fast lookup.
|
||||
*/
|
||||
|
||||
int b;
|
||||
static struct buf *bp, *prev_ptr;
|
||||
u64_t yieldid = VM_BLOCKID_NONE, getid = make64(dev, block);
|
||||
|
||||
assert(buf_hash);
|
||||
assert(buf);
|
||||
assert(nr_bufs > 0);
|
||||
|
||||
ASSERT(fs_block_size > 0);
|
||||
|
||||
/* Search the hash chain for (dev, block). Do_read() can use
|
||||
* get_block(NO_DEV ...) to get an unnamed block to fill with zeros when
|
||||
* someone wants to read from a hole in a file, in which case this search
|
||||
* is skipped
|
||||
*/
|
||||
if (dev != NO_DEV) {
|
||||
b = BUFHASH(block);
|
||||
bp = buf_hash[b];
|
||||
while (bp != NULL) {
|
||||
if (bp->b_blocknr == block && bp->b_dev == dev) {
|
||||
/* Block needed has been found. */
|
||||
if (bp->b_count == 0) rm_lru(bp);
|
||||
bp->b_count++; /* record that block is in use */
|
||||
ASSERT(bp->b_bytes == fs_block_size);
|
||||
ASSERT(bp->b_dev == dev);
|
||||
ASSERT(bp->b_dev != NO_DEV);
|
||||
ASSERT(bp->bp);
|
||||
return(bp);
|
||||
} else {
|
||||
/* This block is not the one sought. */
|
||||
bp = bp->b_hash; /* move to next block on hash chain */
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Desired block is not on available chain. Take oldest block ('front'). */
|
||||
if ((bp = front) == NULL) panic("all buffers in use: %d", nr_bufs);
|
||||
|
||||
if(bp->b_bytes < fs_block_size) {
|
||||
ASSERT(!bp->bp);
|
||||
ASSERT(bp->b_bytes == 0);
|
||||
if(!(bp->bp = alloc_contig( (size_t) fs_block_size, 0, NULL))) {
|
||||
printf("MFS: couldn't allocate a new block.\n");
|
||||
for(bp = front;
|
||||
bp && bp->b_bytes < fs_block_size; bp = bp->b_next)
|
||||
;
|
||||
if(!bp) {
|
||||
panic("no buffer available");
|
||||
}
|
||||
} else {
|
||||
bp->b_bytes = fs_block_size;
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT(bp);
|
||||
ASSERT(bp->bp);
|
||||
ASSERT(bp->b_bytes == fs_block_size);
|
||||
ASSERT(bp->b_count == 0);
|
||||
|
||||
rm_lru(bp);
|
||||
|
||||
/* Remove the block that was just taken from its hash chain. */
|
||||
b = BUFHASH(bp->b_blocknr);
|
||||
prev_ptr = buf_hash[b];
|
||||
if (prev_ptr == bp) {
|
||||
buf_hash[b] = bp->b_hash;
|
||||
} else {
|
||||
/* The block just taken is not on the front of its hash chain. */
|
||||
while (prev_ptr->b_hash != NULL)
|
||||
if (prev_ptr->b_hash == bp) {
|
||||
prev_ptr->b_hash = bp->b_hash; /* found it */
|
||||
break;
|
||||
} else {
|
||||
prev_ptr = prev_ptr->b_hash; /* keep looking */
|
||||
}
|
||||
}
|
||||
|
||||
/* If the block taken is dirty, make it clean by writing it to the disk.
|
||||
* Avoid hysteresis by flushing all other dirty blocks for the same device.
|
||||
*/
|
||||
if (bp->b_dev != NO_DEV) {
|
||||
if (ISDIRTY(bp)) flushall(bp->b_dev);
|
||||
|
||||
/* Are we throwing out a block that contained something?
|
||||
* Give it to VM for the second-layer cache.
|
||||
*/
|
||||
yieldid = make64(bp->b_dev, bp->b_blocknr);
|
||||
assert(bp->b_bytes == fs_block_size);
|
||||
bp->b_dev = NO_DEV;
|
||||
}
|
||||
|
||||
/* Fill in block's parameters and add it to the hash chain where it goes. */
|
||||
MARKCLEAN(bp); /* NO_DEV blocks may be marked dirty */
|
||||
bp->b_dev = dev; /* fill in device number */
|
||||
bp->b_blocknr = block; /* fill in block number */
|
||||
bp->b_count++; /* record that block is being used */
|
||||
b = BUFHASH(bp->b_blocknr);
|
||||
bp->b_hash = buf_hash[b];
|
||||
|
||||
buf_hash[b] = bp; /* add to hash list */
|
||||
|
||||
if(dev == NO_DEV) {
|
||||
if(vmcache && cmp64(yieldid, VM_BLOCKID_NONE) != 0) {
|
||||
vm_yield_block_get_block(yieldid, VM_BLOCKID_NONE,
|
||||
bp->bp, fs_block_size);
|
||||
}
|
||||
return(bp); /* If the caller wanted a NO_DEV block, work is done. */
|
||||
}
|
||||
|
||||
/* Go get the requested block unless searching or prefetching. */
|
||||
if(only_search == PREFETCH || only_search == NORMAL) {
|
||||
/* Block is not found in our cache, but we do want it
|
||||
* if it's in the vm cache.
|
||||
*/
|
||||
if(vmcache) {
|
||||
/* If we can satisfy the PREFETCH or NORMAL request
|
||||
* from the vm cache, work is done.
|
||||
*/
|
||||
if(vm_yield_block_get_block(yieldid, getid,
|
||||
bp->bp, fs_block_size) == OK) {
|
||||
return bp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if(only_search == PREFETCH) {
|
||||
/* PREFETCH: don't do i/o. */
|
||||
bp->b_dev = NO_DEV;
|
||||
} else if (only_search == NORMAL) {
|
||||
read_block(bp);
|
||||
} else if(only_search == NO_READ) {
|
||||
/* we want this block, but its contents
|
||||
* will be overwritten. VM has to forget
|
||||
* about it.
|
||||
*/
|
||||
if(vmcache) {
|
||||
vm_forgetblock(getid);
|
||||
}
|
||||
} else
|
||||
panic("unexpected only_search value: %d", only_search);
|
||||
|
||||
assert(bp->bp);
|
||||
|
||||
return(bp); /* return the newly acquired block */
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* put_block *
|
||||
*===========================================================================*/
|
||||
void put_block(bp, block_type)
|
||||
register struct buf *bp; /* pointer to the buffer to be released */
|
||||
int block_type; /* INODE_BLOCK, DIRECTORY_BLOCK, or whatever */
|
||||
{
|
||||
/* Return a block to the list of available blocks. Depending on 'block_type'
|
||||
* it may be put on the front or rear of the LRU chain. Blocks that are
|
||||
* expected to be needed again shortly (e.g., partially full data blocks)
|
||||
* go on the rear; blocks that are unlikely to be needed again shortly
|
||||
* (e.g., full data blocks) go on the front. Blocks whose loss can hurt
|
||||
* the integrity of the file system (e.g., inode blocks) are written to
|
||||
* disk immediately if they are dirty.
|
||||
*/
|
||||
if (bp == NULL) return; /* it is easier to check here than in caller */
|
||||
|
||||
bp->b_count--; /* there is one use fewer now */
|
||||
if (bp->b_count != 0) return; /* block is still in use */
|
||||
|
||||
bufs_in_use--; /* one fewer block buffers in use */
|
||||
|
||||
/* Put this block back on the LRU chain. If the ONE_SHOT bit is set in
|
||||
* 'block_type', the block is not likely to be needed again shortly, so put
|
||||
* it on the front of the LRU chain where it will be the first one to be
|
||||
* taken when a free buffer is needed later.
|
||||
*/
|
||||
if (bp->b_dev == DEV_RAM || (block_type & ONE_SHOT)) {
|
||||
/* Block probably won't be needed quickly. Put it on front of chain.
|
||||
* It will be the next block to be evicted from the cache.
|
||||
*/
|
||||
bp->b_prev = NULL;
|
||||
bp->b_next = front;
|
||||
if (front == NULL)
|
||||
rear = bp; /* LRU chain was empty */
|
||||
else
|
||||
front->b_prev = bp;
|
||||
front = bp;
|
||||
}
|
||||
else {
|
||||
/* Block probably will be needed quickly. Put it on rear of chain.
|
||||
* It will not be evicted from the cache for a long time.
|
||||
*/
|
||||
bp->b_prev = rear;
|
||||
bp->b_next = NULL;
|
||||
if (rear == NULL)
|
||||
front = bp;
|
||||
else
|
||||
rear->b_next = bp;
|
||||
rear = bp;
|
||||
}
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* alloc_zone *
|
||||
*===========================================================================*/
|
||||
|
@ -320,361 +90,4 @@ void free_zone(
|
|||
if (bit < sp->s_zsearch) sp->s_zsearch = bit;
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* read_block *
|
||||
*===========================================================================*/
|
||||
static void read_block(bp)
|
||||
register struct buf *bp; /* buffer pointer */
|
||||
{
|
||||
/* Read or write a disk block. This is the only routine in which actual disk
|
||||
* I/O is invoked. If an error occurs, a message is printed here, but the error
|
||||
* is not reported to the caller. If the error occurred while purging a block
|
||||
* from the cache, it is not clear what the caller could do about it anyway.
|
||||
*/
|
||||
int r, op_failed;
|
||||
u64_t pos;
|
||||
dev_t dev;
|
||||
|
||||
op_failed = 0;
|
||||
|
||||
if ( (dev = bp->b_dev) != NO_DEV) {
|
||||
pos = mul64u(bp->b_blocknr, fs_block_size);
|
||||
r = bdev_read(dev, pos, bp->b_data, fs_block_size,
|
||||
BDEV_NOFLAGS);
|
||||
if (r < 0) {
|
||||
printf("MFS(%d) I/O error on device %d/%d, block %u\n",
|
||||
SELF_E, major(dev), minor(dev), bp->b_blocknr);
|
||||
op_failed = 1;
|
||||
} else if (r != (ssize_t) fs_block_size) {
|
||||
r = END_OF_FILE;
|
||||
op_failed = 1;
|
||||
}
|
||||
|
||||
if (op_failed) {
|
||||
bp->b_dev = NO_DEV; /* invalidate block */
|
||||
|
||||
/* Report read errors to interested parties. */
|
||||
rdwt_err = r;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* invalidate *
|
||||
*===========================================================================*/
|
||||
void invalidate(
|
||||
dev_t device /* device whose blocks are to be purged */
|
||||
)
|
||||
{
|
||||
/* Remove all the blocks belonging to some device from the cache. */
|
||||
|
||||
register struct buf *bp;
|
||||
|
||||
for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++)
|
||||
if (bp->b_dev == device) bp->b_dev = NO_DEV;
|
||||
|
||||
vm_forgetblocks();
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* block_write_ok *
|
||||
*===========================================================================*/
|
||||
int block_write_ok(struct buf *bp)
|
||||
{
|
||||
if(superblock.s_dev != bp->b_dev) return 1;
|
||||
|
||||
if(bp->b_blocknr >= super_start && bp->b_blocknr <= super_end) {
|
||||
printf("MFS: blocking write to superblock on mounted filesystem dev 0x%x.\n", bp->b_dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if(superblock.s_rd_only) {
|
||||
printf("MFS: blocking write to mounted readonly filesystem 0x%x.\n", bp->b_dev);
|
||||
printf("This shouldn't happen.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* flushall *
|
||||
*===========================================================================*/
|
||||
void flushall(
|
||||
dev_t dev /* device to flush */
|
||||
)
|
||||
{
|
||||
/* Flush all dirty blocks for one device. */
|
||||
|
||||
register struct buf *bp;
|
||||
static struct buf **dirty; /* static so it isn't on stack */
|
||||
static unsigned int dirtylistsize = 0;
|
||||
int ndirty;
|
||||
|
||||
if(dirtylistsize != nr_bufs) {
|
||||
if(dirtylistsize > 0) {
|
||||
assert(dirty != NULL);
|
||||
free(dirty);
|
||||
}
|
||||
if(!(dirty = malloc(sizeof(dirty[0])*nr_bufs)))
|
||||
panic("couldn't allocate dirty buf list");
|
||||
dirtylistsize = nr_bufs;
|
||||
}
|
||||
|
||||
for (bp = &buf[0], ndirty = 0; bp < &buf[nr_bufs]; bp++) {
|
||||
if (ISDIRTY(bp) && bp->b_dev == dev) {
|
||||
if(!block_write_ok(bp)) {
|
||||
printf("MFS: LATE: ignoring changes in block %d\n", bp->b_blocknr);
|
||||
MARKCLEAN(bp);
|
||||
continue;
|
||||
}
|
||||
dirty[ndirty++] = bp;
|
||||
}
|
||||
}
|
||||
rw_scattered(dev, dirty, ndirty, WRITING);
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* rw_scattered *
|
||||
*===========================================================================*/
|
||||
void rw_scattered(
|
||||
dev_t dev, /* major-minor device number */
|
||||
struct buf **bufq, /* pointer to array of buffers */
|
||||
int bufqsize, /* number of buffers */
|
||||
int rw_flag /* READING or WRITING */
|
||||
)
|
||||
{
|
||||
/* Read or write scattered data from a device. */
|
||||
|
||||
register struct buf *bp;
|
||||
int gap;
|
||||
register int i;
|
||||
register iovec_t *iop;
|
||||
static iovec_t *iovec = NULL;
|
||||
u64_t pos;
|
||||
int j, r;
|
||||
|
||||
STATICINIT(iovec, NR_IOREQS);
|
||||
|
||||
/* (Shell) sort buffers on b_blocknr. */
|
||||
gap = 1;
|
||||
do
|
||||
gap = 3 * gap + 1;
|
||||
while (gap <= bufqsize);
|
||||
while (gap != 1) {
|
||||
gap /= 3;
|
||||
for (j = gap; j < bufqsize; j++) {
|
||||
for (i = j - gap;
|
||||
i >= 0 && bufq[i]->b_blocknr > bufq[i + gap]->b_blocknr;
|
||||
i -= gap) {
|
||||
bp = bufq[i];
|
||||
bufq[i] = bufq[i + gap];
|
||||
bufq[i + gap] = bp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Set up I/O vector and do I/O. The result of bdev I/O is OK if everything
|
||||
* went fine, otherwise the error code for the first failed transfer.
|
||||
*/
|
||||
while (bufqsize > 0) {
|
||||
for (j = 0, iop = iovec; j < NR_IOREQS && j < bufqsize; j++, iop++) {
|
||||
bp = bufq[j];
|
||||
if (bp->b_blocknr != (block_t) bufq[0]->b_blocknr + j) break;
|
||||
iop->iov_addr = (vir_bytes) bp->b_data;
|
||||
iop->iov_size = (vir_bytes) fs_block_size;
|
||||
}
|
||||
pos = mul64u(bufq[0]->b_blocknr, fs_block_size);
|
||||
if (rw_flag == READING)
|
||||
r = bdev_gather(dev, pos, iovec, j, BDEV_NOFLAGS);
|
||||
else
|
||||
r = bdev_scatter(dev, pos, iovec, j, BDEV_NOFLAGS);
|
||||
|
||||
/* Harvest the results. The driver may have returned an error, or it
|
||||
* may have done less than what we asked for.
|
||||
*/
|
||||
if (r < 0) {
|
||||
printf("MFS: I/O error %d on device %d/%d, block %u\n",
|
||||
r, major(dev), minor(dev), bufq[0]->b_blocknr);
|
||||
}
|
||||
for (i = 0; i < j; i++) {
|
||||
bp = bufq[i];
|
||||
if (r < (ssize_t) fs_block_size) {
|
||||
/* Transfer failed. */
|
||||
if (i == 0) {
|
||||
bp->b_dev = NO_DEV; /* Invalidate block */
|
||||
vm_forgetblocks();
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (rw_flag == READING) {
|
||||
bp->b_dev = dev; /* validate block */
|
||||
put_block(bp, PARTIAL_DATA_BLOCK);
|
||||
} else {
|
||||
MARKCLEAN(bp);
|
||||
}
|
||||
r -= fs_block_size;
|
||||
}
|
||||
bufq += i;
|
||||
bufqsize -= i;
|
||||
if (rw_flag == READING) {
|
||||
/* Don't bother reading more than the device is willing to
|
||||
* give at this time. Don't forget to release those extras.
|
||||
*/
|
||||
while (bufqsize > 0) {
|
||||
put_block(*bufq++, PARTIAL_DATA_BLOCK);
|
||||
bufqsize--;
|
||||
}
|
||||
}
|
||||
if (rw_flag == WRITING && i == 0) {
|
||||
/* We're not making progress, this means we might keep
|
||||
* looping. Buffers remain dirty if un-written. Buffers are
|
||||
* lost if invalidate()d or LRU-removed while dirty. This
|
||||
* is better than keeping unwritable blocks around forever..
|
||||
*/
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* rm_lru *
|
||||
*===========================================================================*/
|
||||
static void rm_lru(bp)
|
||||
struct buf *bp;
|
||||
{
|
||||
/* Remove a block from its LRU chain. */
|
||||
struct buf *next_ptr, *prev_ptr;
|
||||
|
||||
bufs_in_use++;
|
||||
next_ptr = bp->b_next; /* successor on LRU chain */
|
||||
prev_ptr = bp->b_prev; /* predecessor on LRU chain */
|
||||
if (prev_ptr != NULL)
|
||||
prev_ptr->b_next = next_ptr;
|
||||
else
|
||||
front = next_ptr; /* this block was at front of chain */
|
||||
|
||||
if (next_ptr != NULL)
|
||||
next_ptr->b_prev = prev_ptr;
|
||||
else
|
||||
rear = prev_ptr; /* this block was at rear of chain */
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* cache_resize *
|
||||
*===========================================================================*/
|
||||
static void cache_resize(unsigned int blocksize, unsigned int bufs)
|
||||
{
|
||||
struct buf *bp;
|
||||
struct inode *rip;
|
||||
|
||||
#define MINBUFS 10
|
||||
assert(blocksize > 0);
|
||||
assert(bufs >= MINBUFS);
|
||||
|
||||
for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++)
|
||||
if(bp->b_count != 0) panic("change blocksize with buffer in use");
|
||||
|
||||
for (rip = &inode[0]; rip < &inode[NR_INODES]; rip++)
|
||||
if (rip->i_count > 0) panic("change blocksize with inode in use");
|
||||
|
||||
buf_pool(bufs);
|
||||
|
||||
fs_block_size = blocksize;
|
||||
super_start = SUPER_BLOCK_BYTES / fs_block_size;
|
||||
super_end = (SUPER_BLOCK_BYTES + _MIN_BLOCK_SIZE - 1) / fs_block_size;
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* bufs_heuristic *
|
||||
*===========================================================================*/
|
||||
static int bufs_heuristic(struct super_block *sp)
|
||||
{
|
||||
u32_t btotal, bfree, bused;
|
||||
|
||||
blockstats(&btotal, &bfree, &bused);
|
||||
|
||||
return fs_bufs_heuristic(MINBUFS, btotal, bfree,
|
||||
sp->s_block_size, major(sp->s_dev));
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* set_blocksize *
|
||||
*===========================================================================*/
|
||||
void set_blocksize(struct super_block *sp)
|
||||
{
|
||||
int bufs;
|
||||
|
||||
cache_resize(sp->s_block_size, MINBUFS);
|
||||
bufs = bufs_heuristic(sp);
|
||||
cache_resize(sp->s_block_size, bufs);
|
||||
|
||||
/* Decide whether to use seconday cache or not.
|
||||
* Only do this if
|
||||
* - it's available, and
|
||||
* - use of it hasn't been disabled for this fs, and
|
||||
* - our main FS device isn't a memory device
|
||||
*/
|
||||
|
||||
vmcache = 0;
|
||||
if(vm_forgetblock(VM_BLOCKID_NONE) != ENOSYS &&
|
||||
may_use_vmcache && major(sp->s_dev) != MEMORY_MAJOR) {
|
||||
vmcache = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* buf_pool *
|
||||
*===========================================================================*/
|
||||
void buf_pool(int new_nr_bufs)
|
||||
{
|
||||
/* Initialize the buffer pool. */
|
||||
register struct buf *bp;
|
||||
|
||||
assert(new_nr_bufs >= MINBUFS);
|
||||
|
||||
if(nr_bufs > 0) {
|
||||
assert(buf);
|
||||
(void) fs_sync();
|
||||
for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) {
|
||||
if(bp->bp) {
|
||||
assert(bp->b_bytes > 0);
|
||||
free_contig(bp->bp, bp->b_bytes);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if(buf)
|
||||
free(buf);
|
||||
|
||||
if(!(buf = calloc(sizeof(buf[0]), new_nr_bufs)))
|
||||
panic("couldn't allocate buf list (%d)", new_nr_bufs);
|
||||
|
||||
if(buf_hash)
|
||||
free(buf_hash);
|
||||
if(!(buf_hash = calloc(sizeof(buf_hash[0]), new_nr_bufs)))
|
||||
panic("couldn't allocate buf hash list (%d)", new_nr_bufs);
|
||||
|
||||
nr_bufs = new_nr_bufs;
|
||||
|
||||
bufs_in_use = 0;
|
||||
front = &buf[0];
|
||||
rear = &buf[nr_bufs - 1];
|
||||
|
||||
for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) {
|
||||
bp->b_blocknr = NO_BLOCK;
|
||||
bp->b_dev = NO_DEV;
|
||||
bp->b_next = bp + 1;
|
||||
bp->b_prev = bp - 1;
|
||||
bp->bp = NULL;
|
||||
bp->b_bytes = 0;
|
||||
}
|
||||
front->b_prev = NULL;
|
||||
rear->b_next = NULL;
|
||||
|
||||
for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) bp->b_hash = bp->b_next;
|
||||
buf_hash[0] = front;
|
||||
|
||||
vm_forgetblocks();
|
||||
}
|
||||
|
||||
|
|
|
@ -2,10 +2,10 @@
|
|||
#ifndef _MFS_CLEAN_H
|
||||
#define _MFS_CLEAN_H 1
|
||||
|
||||
#define MARKDIRTY(b) do { if(superblock.s_dev == (b)->b_dev && superblock.s_rd_only) { printf("%s:%d: dirty block on rofs! ", __FILE__, __LINE__); util_stacktrace(); } else { (b)->b_dirt = BP_DIRTY; } } while(0)
|
||||
#define MARKCLEAN(b) ((b)->b_dirt = BP_CLEAN)
|
||||
#define MARKDIRTY(b) do { if(superblock.s_dev == lmfs_dev(b) && superblock.s_rd_only) { printf("%s:%d: dirty block on rofs! ", __FILE__, __LINE__); util_stacktrace(); } else { lmfs_markdirty(b); } } while(0)
|
||||
#define MARKCLEAN(b) lmfs_markclean(b)
|
||||
|
||||
#define ISDIRTY(b) ((b)->b_dirt == BP_DIRTY)
|
||||
#define ISCLEAN(b) ((b)->b_dirt == BP_CLEAN)
|
||||
#define ISDIRTY(b) (!lmfs_isclean(b))
|
||||
#define ISCLEAN(b) (lmfs_isclean(b))
|
||||
|
||||
#endif
|
||||
|
|
|
@ -39,9 +39,6 @@
|
|||
|
||||
/* Miscellaneous constants */
|
||||
#define SU_UID ((uid_t) 0) /* super_user's uid_t */
|
||||
#define NORMAL 0 /* forces get_block to do disk read */
|
||||
#define NO_READ 1 /* prevents get_block from doing disk read */
|
||||
#define PREFETCH 2 /* tells get_block not to read or mark dev */
|
||||
|
||||
#define NO_BIT ((bit_t) 0) /* returned by alloc_bit() to signal failure */
|
||||
|
||||
|
@ -56,8 +53,6 @@
|
|||
#define IGN_PERM 0
|
||||
#define CHK_PERM 1
|
||||
|
||||
#define BP_CLEAN 0 /* on-disk block and memory copies identical */
|
||||
#define BP_DIRTY 1 /* on-disk block and memory copies differ */
|
||||
#define IN_CLEAN 0 /* in-block inode and memory copies identical */
|
||||
#define IN_DIRTY 1 /* in-block inode and memory copies differ */
|
||||
#define ATIME 002 /* set if atime field needs updating */
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
|
||||
/* The following variables are used for returning results to the caller. */
|
||||
EXTERN int err_code; /* temporary storage for error number */
|
||||
EXTERN int rdwt_err; /* status of last disk i/o request */
|
||||
|
||||
EXTERN int cch[NR_INODES];
|
||||
|
||||
|
@ -41,13 +40,4 @@ EXTERN char fs_dev_label[16]; /* Name of the device driver that is handled
|
|||
EXTERN int unmountdone;
|
||||
EXTERN int exitsignaled;
|
||||
|
||||
/* our block size. */
|
||||
EXTERN unsigned int fs_block_size;
|
||||
|
||||
/* Buffer cache. */
|
||||
EXTERN struct buf *buf;
|
||||
EXTERN struct buf **buf_hash; /* the buffer hash table */
|
||||
EXTERN unsigned int nr_bufs;
|
||||
EXTERN int may_use_vmcache;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -398,8 +398,8 @@ int rw_flag; /* READING or WRITING */
|
|||
offset = START_BLOCK + sp->s_imap_blocks + sp->s_zmap_blocks;
|
||||
b = (block_t) (rip->i_num - 1)/sp->s_inodes_per_block + offset;
|
||||
bp = get_block(rip->i_dev, b, NORMAL);
|
||||
dip = bp->b_v1_ino + (rip->i_num - 1) % V1_INODES_PER_BLOCK;
|
||||
dip2 = bp->b_v2_ino + (rip->i_num - 1) %
|
||||
dip = b_v1_ino(bp) + (rip->i_num - 1) % V1_INODES_PER_BLOCK;
|
||||
dip2 = b_v2_ino(bp) + (rip->i_num - 1) %
|
||||
V2_INODES_PER_BLOCK(sp->s_block_size);
|
||||
|
||||
/* Do the read or write. */
|
||||
|
|
|
@ -195,7 +195,7 @@ int fs_rdlink()
|
|||
copylen = min( copylen, (unsigned) rip->i_size);
|
||||
bp = get_block(rip->i_dev, b, NORMAL);
|
||||
r = sys_safecopyto(VFS_PROC_NR, (cp_grant_id_t) fs_m_in.REQ_GRANT,
|
||||
(vir_bytes) 0, (vir_bytes) bp->b_data,
|
||||
(vir_bytes) 0, (vir_bytes) b_data(bp),
|
||||
(size_t) copylen);
|
||||
put_block(bp, DIRECTORY_BLOCK);
|
||||
if (r == OK)
|
||||
|
@ -694,7 +694,7 @@ off_t len;
|
|||
bytes = block_size - offset;
|
||||
if (bytes > (size_t) len)
|
||||
bytes = len;
|
||||
memset(bp->b_data + offset, 0, bytes);
|
||||
memset(b_data(bp) + offset, 0, bytes);
|
||||
MARKDIRTY(bp);
|
||||
put_block(bp, FULL_DATA_BLOCK);
|
||||
|
||||
|
|
|
@ -107,7 +107,7 @@ static int sef_cb_init_fresh(int UNUSED(type), sef_init_info_t *UNUSED(info))
|
|||
/* Initialize the Minix file server. */
|
||||
int i;
|
||||
|
||||
may_use_vmcache = 1;
|
||||
lmfs_may_use_vmcache(1);
|
||||
|
||||
/* Init inode table */
|
||||
for (i = 0; i < NR_INODES; ++i) {
|
||||
|
@ -118,8 +118,7 @@ static int sef_cb_init_fresh(int UNUSED(type), sef_init_info_t *UNUSED(info))
|
|||
init_inode_cache();
|
||||
|
||||
SELF_E = getprocnr();
|
||||
buf_pool(DEFAULT_NR_BUFS);
|
||||
fs_block_size = _MIN_BLOCK_SIZE;
|
||||
lmfs_buf_pool(DEFAULT_NR_BUFS);
|
||||
|
||||
return(OK);
|
||||
}
|
||||
|
|
|
@ -16,19 +16,15 @@ int fs_sync()
|
|||
* the block cache.
|
||||
*/
|
||||
struct inode *rip;
|
||||
struct buf *bp;
|
||||
|
||||
assert(nr_bufs > 0);
|
||||
assert(buf);
|
||||
assert(lmfs_nr_bufs() > 0);
|
||||
|
||||
/* Write all the dirty inodes to the disk. */
|
||||
for(rip = &inode[0]; rip < &inode[NR_INODES]; rip++)
|
||||
if(rip->i_count > 0 && IN_ISDIRTY(rip)) rw_inode(rip, WRITING);
|
||||
|
||||
/* Write all the dirty blocks to the disk, one drive at a time. */
|
||||
for(bp = &buf[0]; bp < &buf[nr_bufs]; bp++)
|
||||
if(bp->b_dev != NO_DEV && ISDIRTY(bp))
|
||||
flushall(bp->b_dev);
|
||||
/* Write all the dirty blocks to the disk. */
|
||||
lmfs_flushall();
|
||||
|
||||
return(OK); /* sync() can't fail */
|
||||
}
|
||||
|
@ -45,8 +41,8 @@ int fs_flush()
|
|||
dev_t dev = (dev_t) fs_m_in.REQ_DEV;
|
||||
if(dev == fs_dev) return(EBUSY);
|
||||
|
||||
flushall(dev);
|
||||
invalidate(dev);
|
||||
lmfs_flushall();
|
||||
lmfs_invalidate(dev);
|
||||
|
||||
return(OK);
|
||||
}
|
||||
|
|
|
@ -78,7 +78,7 @@ int fs_readsuper()
|
|||
printf("MFS: WARNING: FS 0x%x unclean, mounting readonly\n", fs_dev);
|
||||
}
|
||||
|
||||
set_blocksize(&superblock);
|
||||
lmfs_set_blocksize(superblock.s_block_size, major(fs_dev));
|
||||
|
||||
/* Get the root inode of the mounted file system. */
|
||||
if( (root_ip = get_inode(fs_dev, ROOT_INODE)) == NULL) {
|
||||
|
|
|
@ -205,12 +205,12 @@ int fs_slink()
|
|||
else
|
||||
r = sys_safecopyfrom(VFS_PROC_NR,
|
||||
(cp_grant_id_t) fs_m_in.REQ_GRANT3,
|
||||
(vir_bytes) 0, (vir_bytes) bp->b_data,
|
||||
(vir_bytes) 0, (vir_bytes) b_data(bp),
|
||||
(size_t) fs_m_in.REQ_MEM_SIZE);
|
||||
|
||||
if(bp != NULL && r == OK) {
|
||||
bp->b_data[_MIN_BLOCK_SIZE-1] = '\0';
|
||||
sip->i_size = (off_t) strlen(bp->b_data);
|
||||
b_data(bp)[_MIN_BLOCK_SIZE-1] = '\0';
|
||||
sip->i_size = (off_t) strlen(b_data(bp));
|
||||
if(sip->i_size != fs_m_in.REQ_MEM_SIZE) {
|
||||
/* This can happen if the user provides a buffer
|
||||
* with a \0 in it. This can cause a lot of trouble
|
||||
|
|
|
@ -298,7 +298,7 @@ char *suffix; /* current remaining path. Has to point in the
|
|||
|
||||
bp = get_block(rip->i_dev, blink, NORMAL);
|
||||
llen = (size_t) rip->i_size;
|
||||
sp = bp->b_data;
|
||||
sp = b_data(bp);
|
||||
slen = strlen(suffix);
|
||||
|
||||
/* The path we're parsing looks like this:
|
||||
|
@ -532,8 +532,8 @@ int check_permissions; /* check permissions when flag is !IS_EMPTY */
|
|||
assert(bp != NULL);
|
||||
|
||||
/* Search a directory block. */
|
||||
for (dp = &bp->b_dir[0];
|
||||
dp < &bp->b_dir[NR_DIR_ENTRIES(ldir_ptr->i_sp->s_block_size)];
|
||||
for (dp = &b_dir(bp)[0];
|
||||
dp < &b_dir(bp)[NR_DIR_ENTRIES(ldir_ptr->i_sp->s_block_size)];
|
||||
dp++) {
|
||||
if (++new_slots > old_slots) { /* not found, but room left */
|
||||
if (flag == ENTER) e_hit = TRUE;
|
||||
|
@ -607,7 +607,7 @@ int check_permissions; /* check permissions when flag is !IS_EMPTY */
|
|||
if (new_slots == 0) return(EFBIG); /* dir size limited by slot count */
|
||||
if ( (bp = new_block(ldir_ptr, ldir_ptr->i_size)) == NULL)
|
||||
return(err_code);
|
||||
dp = &bp->b_dir[0];
|
||||
dp = &b_dir(bp)[0];
|
||||
extended = 1;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,10 @@
|
|||
#ifndef __MFS_PROTO_H__
|
||||
#define __MFS_PROTO_H__
|
||||
|
||||
/* Some shortcuts to functions in -lminixfs */
|
||||
#define get_block(d, b, t) lmfs_get_block(d, b, t)
|
||||
#define put_block(b, t) lmfs_put_block(b, t)
|
||||
|
||||
/* Function prototypes. */
|
||||
|
||||
/* Structs used in prototypes must be declared as such first. */
|
||||
|
@ -12,16 +16,7 @@ struct super_block;
|
|||
|
||||
/* cache.c */
|
||||
zone_t alloc_zone(dev_t dev, zone_t z);
|
||||
void buf_pool(int bufs);
|
||||
void flushall(dev_t dev);
|
||||
void free_zone(dev_t dev, zone_t numb);
|
||||
struct buf *get_block(dev_t dev, block_t block,int only_search);
|
||||
void invalidate(dev_t device);
|
||||
void put_block(struct buf *bp, int block_type);
|
||||
void set_blocksize(struct super_block *);
|
||||
void rw_scattered(dev_t dev, struct buf **bufq, int bufqsize, int
|
||||
rw_flag);
|
||||
int block_write_ok(struct buf *bp);
|
||||
|
||||
/* inode.c */
|
||||
struct inode *alloc_inode(dev_t dev, mode_t bits);
|
||||
|
@ -96,7 +91,6 @@ int write_super(struct super_block *sp);
|
|||
|
||||
/* stats.c */
|
||||
bit_t count_free_bits(struct super_block *sp, int map);
|
||||
void blockstats(u32_t *total, u32_t *free, u32_t *avail);
|
||||
|
||||
/* time.c */
|
||||
int fs_utime(void);
|
||||
|
|
|
@ -58,7 +58,7 @@ int fs_readwrite(void)
|
|||
position = (off_t) fs_m_in.REQ_SEEK_POS_LO;
|
||||
nrbytes = (size_t) fs_m_in.REQ_NBYTES;
|
||||
|
||||
rdwt_err = OK; /* set to EIO if disk error occurs */
|
||||
lmfs_reset_rdwt_err();
|
||||
|
||||
/* If this is file i/o, check we can write */
|
||||
if (rw_flag == WRITING && !block_spec) {
|
||||
|
@ -98,7 +98,7 @@ int fs_readwrite(void)
|
|||
nrbytes, rw_flag, gid, cum_io, block_size, &completed);
|
||||
|
||||
if (r != OK) break; /* EOF reached */
|
||||
if (rdwt_err < 0) break;
|
||||
if (lmfs_rdwt_err() < 0) break;
|
||||
|
||||
/* Update counters and pointers. */
|
||||
nrbytes -= chunk; /* bytes yet to be read */
|
||||
|
@ -118,8 +118,8 @@ int fs_readwrite(void)
|
|||
|
||||
rip->i_seek = NO_SEEK;
|
||||
|
||||
if (rdwt_err != OK) r = rdwt_err; /* check for disk error */
|
||||
if (rdwt_err == END_OF_FILE) r = OK;
|
||||
if (lmfs_rdwt_err() != OK) r = lmfs_rdwt_err(); /* check for disk error */
|
||||
if (lmfs_rdwt_err() == END_OF_FILE) r = OK;
|
||||
|
||||
/* even on a ROFS, writing to a device node on it is fine,
|
||||
* just don't update the inode stats for it. And dito for reading.
|
||||
|
@ -172,7 +172,7 @@ int fs_breadwrite(void)
|
|||
rip.i_mode = I_BLOCK_SPECIAL;
|
||||
rip.i_size = 0;
|
||||
|
||||
rdwt_err = OK; /* set to EIO if disk error occurs */
|
||||
lmfs_reset_rdwt_err();
|
||||
|
||||
cum_io = 0;
|
||||
/* Split the transfer into chunks that don't span two blocks. */
|
||||
|
@ -185,7 +185,7 @@ int fs_breadwrite(void)
|
|||
cum_io, block_size, &completed);
|
||||
|
||||
if (r != OK) break; /* EOF reached */
|
||||
if (rdwt_err < 0) break;
|
||||
if (lmfs_rdwt_err() < 0) break;
|
||||
|
||||
/* Update counters and pointers. */
|
||||
nrbytes -= chunk; /* bytes yet to be read */
|
||||
|
@ -196,8 +196,8 @@ int fs_breadwrite(void)
|
|||
fs_m_out.RES_SEEK_POS_LO = ex64lo(position);
|
||||
fs_m_out.RES_SEEK_POS_HI = ex64hi(position);
|
||||
|
||||
if (rdwt_err != OK) r = rdwt_err; /* check for disk error */
|
||||
if (rdwt_err == END_OF_FILE) r = OK;
|
||||
if (lmfs_rdwt_err() != OK) r = lmfs_rdwt_err(); /* check for disk error */
|
||||
if (lmfs_rdwt_err() == END_OF_FILE) r = OK;
|
||||
|
||||
fs_m_out.RES_NBYTES = cum_io;
|
||||
|
||||
|
@ -279,15 +279,11 @@ int *completed; /* number of bytes copied */
|
|||
if (rw_flag == READING) {
|
||||
/* Copy a chunk from the block buffer to user space. */
|
||||
r = sys_safecopyto(VFS_PROC_NR, gid, (vir_bytes) buf_off,
|
||||
(vir_bytes) (bp->b_data+off), (size_t) chunk);
|
||||
} else if(!block_write_ok(bp)) {
|
||||
/* Let cache layer veto writing to this block */
|
||||
printf("MFS: block write not allowed\n");
|
||||
r = EPERM;
|
||||
(vir_bytes) (b_data(bp)+off), (size_t) chunk);
|
||||
} else {
|
||||
/* Copy a chunk from user space to the block buffer. */
|
||||
r = sys_safecopyfrom(VFS_PROC_NR, gid, (vir_bytes) buf_off,
|
||||
(vir_bytes) (bp->b_data+off), (size_t) chunk);
|
||||
(vir_bytes) (b_data(bp)+off), (size_t) chunk);
|
||||
MARKDIRTY(bp);
|
||||
}
|
||||
|
||||
|
@ -348,8 +344,8 @@ off_t position; /* position in file whose blk wanted */
|
|||
if ((unsigned int) index > rip->i_nindirs)
|
||||
return(NO_BLOCK); /* Can't go beyond double indirects */
|
||||
bp = get_block(rip->i_dev, b, NORMAL); /* get double indirect block */
|
||||
ASSERT(bp->b_dev != NO_DEV);
|
||||
ASSERT(bp->b_dev == rip->i_dev);
|
||||
ASSERT(lmfs_dev(bp) != NO_DEV);
|
||||
ASSERT(lmfs_dev(bp) == rip->i_dev);
|
||||
z = rd_indir(bp, index); /* z= zone for single*/
|
||||
put_block(bp, INDIRECT_BLOCK); /* release double ind block */
|
||||
excess = excess % nr_indirects; /* index into single ind blk */
|
||||
|
@ -385,13 +381,13 @@ int index; /* index into *bp */
|
|||
if(bp == NULL)
|
||||
panic("rd_indir() on NULL");
|
||||
|
||||
sp = get_super(bp->b_dev); /* need super block to find file sys type */
|
||||
sp = get_super(lmfs_dev(bp)); /* need super block to find file sys type */
|
||||
|
||||
/* read a zone from an indirect block */
|
||||
if (sp->s_version == V1)
|
||||
zone = (zone_t) conv2(sp->s_native, (int) bp->b_v1_ind[index]);
|
||||
zone = (zone_t) conv2(sp->s_native, (int) b_v1_ind(bp)[index]);
|
||||
else
|
||||
zone = (zone_t) conv4(sp->s_native, (long) bp->b_v2_ind[index]);
|
||||
zone = (zone_t) conv4(sp->s_native, (long) b_v2_ind(bp)[index]);
|
||||
|
||||
if (zone != NO_ZONE &&
|
||||
(zone < (zone_t) sp->s_firstdatazone || zone >= sp->s_zones)) {
|
||||
|
@ -420,6 +416,7 @@ unsigned bytes_ahead; /* bytes beyond position for immediate use */
|
|||
* flag on all reads to allow this.
|
||||
*/
|
||||
/* Minimum number of blocks to prefetch. */
|
||||
int nr_bufs = lmfs_nr_bufs();
|
||||
# define BLOCKS_MINIMUM (nr_bufs < 50 ? 18 : 32)
|
||||
int block_spec, scale, read_q_size;
|
||||
unsigned int blocks_ahead, fragment, block_size;
|
||||
|
@ -451,7 +448,7 @@ unsigned bytes_ahead; /* bytes beyond position for immediate use */
|
|||
block = baseblock;
|
||||
bp = get_block(dev, block, PREFETCH);
|
||||
assert(bp != NULL);
|
||||
if (bp->b_dev != NO_DEV) return(bp);
|
||||
if (lmfs_dev(bp) != NO_DEV) return(bp);
|
||||
|
||||
/* The best guess for the number of blocks to prefetch: A lot.
|
||||
* It is impossible to tell what the device looks like, so we don't even
|
||||
|
@ -516,18 +513,18 @@ unsigned bytes_ahead; /* bytes beyond position for immediate use */
|
|||
if (--blocks_ahead == 0) break;
|
||||
|
||||
/* Don't trash the cache, leave 4 free. */
|
||||
if (bufs_in_use >= nr_bufs - 4) break;
|
||||
if (lmfs_bufs_in_use() >= nr_bufs - 4) break;
|
||||
|
||||
block++;
|
||||
|
||||
bp = get_block(dev, block, PREFETCH);
|
||||
if (bp->b_dev != NO_DEV) {
|
||||
if (lmfs_dev(bp) != NO_DEV) {
|
||||
/* Oops, block already in the cache, get out. */
|
||||
put_block(bp, FULL_DATA_BLOCK);
|
||||
break;
|
||||
}
|
||||
}
|
||||
rw_scattered(dev, read_q, read_q_size, READING);
|
||||
lmfs_rw_scattered(dev, read_q, read_q_size, READING);
|
||||
return(get_block(dev, baseblock, NORMAL));
|
||||
}
|
||||
|
||||
|
@ -588,10 +585,10 @@ int fs_getdents(void)
|
|||
|
||||
/* Search a directory block. */
|
||||
if (block_pos < pos)
|
||||
dp = &bp->b_dir[off / DIR_ENTRY_SIZE];
|
||||
dp = &b_dir(bp)[off / DIR_ENTRY_SIZE];
|
||||
else
|
||||
dp = &bp->b_dir[0];
|
||||
for (; dp < &bp->b_dir[NR_DIR_ENTRIES(block_size)]; dp++) {
|
||||
dp = &b_dir(bp)[0];
|
||||
for (; dp < &b_dir(bp)[NR_DIR_ENTRIES(block_size)]; dp++) {
|
||||
if (dp->mfs_d_ino == 0)
|
||||
continue; /* Entry is not in use */
|
||||
|
||||
|
@ -609,7 +606,7 @@ int fs_getdents(void)
|
|||
reclen += sizeof(long) - o;
|
||||
|
||||
/* Need the position of this entry in the directory */
|
||||
ent_pos = block_pos + ((char *) dp - (bp->b_data));
|
||||
ent_pos = block_pos + ((char *) dp - (char *) bp->data);
|
||||
|
||||
if (userbuf_off + tmpbuf_off + reclen >= size) {
|
||||
/* The user has no space for one more record */
|
||||
|
|
|
@ -76,7 +76,7 @@ static int stat_inode(
|
|||
statbuf.st_atime = rip->i_atime;
|
||||
statbuf.st_mtime = rip->i_mtime;
|
||||
statbuf.st_ctime = rip->i_ctime;
|
||||
statbuf.st_blksize = fs_block_size;
|
||||
statbuf.st_blksize = lmfs_fs_block_size();
|
||||
statbuf.st_blocks = estimate_blocks(rip);
|
||||
|
||||
/* Copy the struct to user space. */
|
||||
|
@ -122,7 +122,7 @@ int fs_statvfs()
|
|||
|
||||
scale = sp->s_log_zone_size;
|
||||
|
||||
blockstats((u32_t *) &st.f_blocks, (u32_t *) &st.f_bfree, &used);
|
||||
fs_blockstats((u32_t *) &st.f_blocks, (u32_t *) &st.f_bfree, &used);
|
||||
st.f_bavail = st.f_bfree;
|
||||
|
||||
st.f_bsize = sp->s_block_size << scale;
|
||||
|
|
|
@ -54,10 +54,10 @@ int map; /* IMAP (inode map) or ZMAP (zone map) */
|
|||
do {
|
||||
bp = get_block(sp->s_dev, start_block + block, NORMAL);
|
||||
assert(bp);
|
||||
wlim = &bp->b_bitmap[FS_BITMAP_CHUNKS(sp->s_block_size)];
|
||||
wlim = &b_bitmap(bp)[FS_BITMAP_CHUNKS(sp->s_block_size)];
|
||||
|
||||
/* Iterate over the words in block. */
|
||||
for (wptr = &bp->b_bitmap[word]; wptr < wlim; wptr++) {
|
||||
for (wptr = &b_bitmap(bp)[word]; wptr < wlim; wptr++) {
|
||||
|
||||
/* Does this word contain a free bit? */
|
||||
if (*wptr == (bitchunk_t) ~0) continue;
|
||||
|
@ -67,7 +67,7 @@ int map; /* IMAP (inode map) or ZMAP (zone map) */
|
|||
for (i = 0; i < 8*sizeof(k); ++i) {
|
||||
/* Bit number from the start of the bit map. */
|
||||
b = ((bit_t) block * FS_BITS_PER_BLOCK(sp->s_block_size))
|
||||
+ (wptr - &bp->b_bitmap[0]) * FS_BITCHUNK_BITS
|
||||
+ (wptr - &b_bitmap(bp)[0]) * FS_BITCHUNK_BITS
|
||||
+ i;
|
||||
|
||||
/* Don't count bits beyond the end of the map. */
|
||||
|
@ -92,7 +92,7 @@ int map; /* IMAP (inode map) or ZMAP (zone map) */
|
|||
/*===========================================================================*
|
||||
* blockstats *
|
||||
*===========================================================================*/
|
||||
void blockstats(u32_t *blocks, u32_t *free, u32_t *used)
|
||||
void fs_blockstats(u32_t *blocks, u32_t *free, u32_t *used)
|
||||
{
|
||||
struct super_block *sp;
|
||||
int scale;
|
||||
|
|
|
@ -66,10 +66,10 @@ bit_t origin; /* number of bit to start searching at */
|
|||
bcount = bit_blocks + 1;
|
||||
do {
|
||||
bp = get_block(sp->s_dev, start_block + block, NORMAL);
|
||||
wlim = &bp->b_bitmap[FS_BITMAP_CHUNKS(sp->s_block_size)];
|
||||
wlim = &b_bitmap(bp)[FS_BITMAP_CHUNKS(sp->s_block_size)];
|
||||
|
||||
/* Iterate over the words in block. */
|
||||
for (wptr = &bp->b_bitmap[word]; wptr < wlim; wptr++) {
|
||||
for (wptr = &b_bitmap(bp)[word]; wptr < wlim; wptr++) {
|
||||
|
||||
/* Does this word contain a free bit? */
|
||||
if (*wptr == (bitchunk_t) ~0) continue;
|
||||
|
@ -80,7 +80,7 @@ bit_t origin; /* number of bit to start searching at */
|
|||
|
||||
/* Bit number from the start of the bit map. */
|
||||
b = ((bit_t) block * FS_BITS_PER_BLOCK(sp->s_block_size))
|
||||
+ (wptr - &bp->b_bitmap[0]) * FS_BITCHUNK_BITS
|
||||
+ (wptr - &b_bitmap(bp)[0]) * FS_BITCHUNK_BITS
|
||||
+ i;
|
||||
|
||||
/* Don't allocate bits beyond the end of the map. */
|
||||
|
@ -133,14 +133,14 @@ bit_t bit_returned; /* number of bit to insert into the map */
|
|||
|
||||
bp = get_block(sp->s_dev, start_block + block, NORMAL);
|
||||
|
||||
k = (bitchunk_t) conv4(sp->s_native, (int) bp->b_bitmap[word]);
|
||||
k = (bitchunk_t) conv4(sp->s_native, (int) b_bitmap(bp)[word]);
|
||||
if (!(k & mask)) {
|
||||
if (map == IMAP) panic("tried to free unused inode");
|
||||
else panic("tried to free unused block: %u", bit_returned);
|
||||
}
|
||||
|
||||
k &= ~mask;
|
||||
bp->b_bitmap[word] = (bitchunk_t) conv4(sp->s_native, (int) k);
|
||||
b_bitmap(bp)[word] = (bitchunk_t) conv4(sp->s_native, (int) k);
|
||||
MARKDIRTY(bp);
|
||||
|
||||
put_block(bp, MAP_BLOCK);
|
||||
|
@ -172,8 +172,7 @@ unsigned int get_block_size(dev_t dev)
|
|||
if (dev == NO_DEV)
|
||||
panic("request for block size of NO_DEV");
|
||||
|
||||
return(fs_block_size);
|
||||
|
||||
return(lmfs_fs_block_size());
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
#ifndef __MFS_TYPE_H__
|
||||
#define __MFS_TYPE_H__
|
||||
|
||||
#include <minix/libminixfs.h>
|
||||
|
||||
/* Declaration of the V1 inode as it is on the disk (not in core). */
|
||||
typedef struct { /* V1.x disk inode */
|
||||
u16_t d1_mode; /* file type, protection, etc. */
|
||||
|
@ -25,20 +27,5 @@ typedef struct { /* V2.x disk inode */
|
|||
zone_t d2_zone[V2_NR_TZONES]; /* block nums for direct, ind, and dbl ind */
|
||||
} d2_inode;
|
||||
|
||||
struct buf {
|
||||
/* Data portion of the buffer. */
|
||||
union fsdata_u *bp;
|
||||
|
||||
/* Header portion of the buffer. */
|
||||
struct buf *b_next; /* used to link all free bufs in a chain */
|
||||
struct buf *b_prev; /* used to link all free bufs the other way */
|
||||
struct buf *b_hash; /* used to link bufs on hash chains */
|
||||
block_t b_blocknr; /* block number of its (minor) device */
|
||||
dev_t b_dev; /* major | minor device where block resides */
|
||||
char b_dirt; /* BP_CLEAN or BP_DIRTY */
|
||||
char b_count; /* number of users of this buffer */
|
||||
unsigned int b_bytes; /* Number of bytes allocated in bp */
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -103,9 +103,9 @@ void sanitycheck(char *file, int line)
|
|||
MYASSERT(SELF_E > 0);
|
||||
if(superblock.s_dev != NO_DEV) {
|
||||
MYASSERT(superblock.s_dev == fs_dev);
|
||||
MYASSERT(superblock.s_block_size == fs_block_size);
|
||||
MYASSERT(superblock.s_block_size == lmfs_fs_block_size());
|
||||
} else {
|
||||
MYASSERT(_MIN_BLOCK_SIZE == fs_block_size);
|
||||
MYASSERT(_MIN_BLOCK_SIZE == lmfs_fs_block_size());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -198,13 +198,13 @@ zone_t zone; /* zone to write */
|
|||
if(bp == NULL)
|
||||
panic("wr_indir() on NULL");
|
||||
|
||||
sp = get_super(bp->b_dev); /* need super block to find file sys type */
|
||||
sp = get_super(lmfs_dev(bp)); /* need super block to find file sys type */
|
||||
|
||||
/* write a zone into an indirect block */
|
||||
if (sp->s_version == V1)
|
||||
bp->b_v1_ind[index] = (zone1_t) conv2(sp->s_native, (int) zone);
|
||||
b_v1_ind(bp)[index] = (zone1_t) conv2(sp->s_native, (int) zone);
|
||||
else
|
||||
bp->b_v2_ind[index] = (zone_t) conv4(sp->s_native, (long) zone);
|
||||
b_v2_ind(bp)[index] = (zone_t) conv4(sp->s_native, (long) zone);
|
||||
}
|
||||
|
||||
|
||||
|
@ -220,7 +220,7 @@ struct super_block *sb; /* superblock of device block resides on */
|
|||
*/
|
||||
unsigned int i;
|
||||
for(i = 0; i < V2_INDIRECTS(sb->s_block_size); i++)
|
||||
if( bp->b_v2_ind[i] != NO_ZONE)
|
||||
if( b_v2_ind(bp)[i] != NO_ZONE)
|
||||
return(0);
|
||||
|
||||
return(1);
|
||||
|
@ -329,9 +329,9 @@ void zero_block(bp)
|
|||
register struct buf *bp; /* pointer to buffer to zero */
|
||||
{
|
||||
/* Zero a block. */
|
||||
ASSERT(bp->b_bytes > 0);
|
||||
ASSERT(bp->bp);
|
||||
memset(bp->b_data, 0, (size_t) bp->b_bytes);
|
||||
ASSERT(lmfs_bytes(bp) > 0);
|
||||
ASSERT(bp->data);
|
||||
memset(b_data(bp), 0, (size_t) lmfs_bytes(bp));
|
||||
MARKDIRTY(bp);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue