libminixfs: miscellaneous API cleanup

Mostly removal of unused parameters from calls.

Change-Id: I0eb7b568265d1669492d958e78b9e69d7cf6fc05
This commit is contained in:
David van Moolenbroek 2015-03-28 00:45:28 +00:00
parent b8f6d4a649
commit 0314acfb2d
34 changed files with 155 additions and 202 deletions

View file

@ -228,11 +228,11 @@ struct inode *rip; /* used for preallocation */
rip->i_prealloc_count = EXT2_PREALLOC_BLOCKS - 1;
lmfs_markdirty(bp);
put_block(bp, MAP_BLOCK);
put_block(bp);
gd->free_blocks_count -= EXT2_PREALLOC_BLOCKS;
sp->s_free_blocks_count -= EXT2_PREALLOC_BLOCKS;
lmfs_blockschange(sp->s_dev, -EXT2_PREALLOC_BLOCKS);
lmfs_blockschange(-EXT2_PREALLOC_BLOCKS);
group_descriptors_dirty = 1;
return block;
}
@ -253,11 +253,11 @@ struct inode *rip; /* used for preallocation */
check_block_number(block, sp, gd);
lmfs_markdirty(bp);
put_block(bp, MAP_BLOCK);
put_block(bp);
gd->free_blocks_count--;
sp->s_free_blocks_count--;
lmfs_blockschange(sp->s_dev, -1);
lmfs_blockschange(-1);
group_descriptors_dirty = 1;
if (update_bsearch && block != -1 && block != NO_BLOCK) {
@ -319,11 +319,11 @@ void free_block(struct super_block *sp, bit_t bit_returned)
panic("Tried to free unused block %d", bit_returned);
lmfs_markdirty(bp);
put_block(bp, MAP_BLOCK);
put_block(bp);
gd->free_blocks_count++;
sp->s_free_blocks_count++;
lmfs_blockschange(sp->s_dev, 1);
lmfs_blockschange(1);
group_descriptors_dirty = 1;

View file

@ -1,31 +1,12 @@
/* Buffer (block) cache. To acquire a block, a routine calls get_block(),
* telling which block it wants. The block is then regarded as "in use"
* and has its 'b_count' field incremented. All the blocks that are not
* in use are chained together in an LRU list, with 'front' pointing
* to the least recently used block, and 'rear' to the most recently used
* block. A reverse chain, using the field b_prev is also maintained.
* Usage for LRU is measured by the time the put_block() is done. The second
* parameter to put_block() can violate the LRU order and put a block on the
* front of the list, if it will probably not be needed soon. If a block
* is modified, the modifying routine must set b_dirt to DIRTY, so the block
* will eventually be rewritten to the disk.
*/
#ifndef EXT2_BUF_H
#define EXT2_BUF_H
#include <sys/dirent.h>
union fsdata_u {
char b__data[1]; /* ordinary user data */
/* indirect block */
block_t b__ind[1];
/* bit map block */
bitchunk_t b__bitmap[1];
char b__data[1]; /* ordinary user data */
block_t b__ind[1]; /* indirect block */
bitchunk_t b__bitmap[1]; /* bit map block */
};
/* A block is free if b_dev == NO_DEV. */
/* These defs make it possible to use to bp->b_data instead of bp->b.b__data */
#define b_data(bp) ((union fsdata_u *) bp->data)->b__data
#define b_ind(bp) ((union fsdata_u *) bp->data)->b__ind

View file

@ -181,7 +181,7 @@ int is_dir; /* inode will be a directory if it is TRUE */
}
lmfs_markdirty(bp);
put_block(bp, MAP_BLOCK);
put_block(bp);
gd->free_inodes_count--;
sp->s_free_inodes_count--;
@ -233,7 +233,7 @@ static void free_inode_bit(struct super_block *sp, bit_t bit_returned,
panic("Tried to free unused inode %d", bit_returned);
lmfs_markdirty(bp);
put_block(bp, MAP_BLOCK);
put_block(bp);
gd->free_inodes_count++;
sp->s_free_inodes_count++;

View file

@ -341,7 +341,7 @@ void rw_inode(
icopy(rip, dip, rw_flag, TRUE);
put_block(bp, INODE_BLOCK);
put_block(bp);
rip->i_dirt = IN_CLEAN;
}

View file

@ -181,7 +181,7 @@ ssize_t fs_rdlink(ino_t ino_nr, struct fsdriver_data *data, size_t bytes)
if (bytes > rip->i_size)
bytes = rip->i_size;
r = fsdriver_copyout(data, 0, link_text, bytes);
put_block(bp, DIRECTORY_BLOCK);
put_block(bp);
if (r == OK)
r = bytes;
}
@ -652,5 +652,5 @@ off_t len;
panic("zeroblock_range: len too long: %lld", len);
memset(b_data(bp) + offset, 0, len);
lmfs_markdirty(bp);
put_block(bp, FULL_DATA_BLOCK);
put_block(bp);
}

View file

@ -88,7 +88,7 @@ int fs_mount(dev_t dev, unsigned int flags, struct fsdriver_node *root_node,
return(EINVAL);
}
lmfs_set_blocksize(superblock->s_block_size, major(fs_dev));
lmfs_set_blocksize(superblock->s_block_size);
/* Get the root inode of the mounted file system. */
if ( (root_ip = get_inode(fs_dev, ROOT_INODE)) == NULL) {

View file

@ -185,7 +185,7 @@ int fs_slink(ino_t dir_nr, char *name, uid_t uid, gid_t gid,
}
}
put_block(bp, DIRECTORY_BLOCK); /* put_block() accepts NULL. */
put_block(bp); /* put_block() accepts NULL. */
if(r != OK) {
sip->i_links_count = NO_LINK;

View file

@ -216,7 +216,7 @@ int ftype; /* used when ENTER and INCOMPAT_FILETYPE */
*numb = (ino_t) conv4(le_CPU, dp->d_ino);
}
assert(lmfs_dev(bp) != NO_DEV);
put_block(bp, DIRECTORY_BLOCK);
put_block(bp);
return(r);
}
@ -252,7 +252,7 @@ int ftype; /* used when ENTER and INCOMPAT_FILETYPE */
/* The whole block has been searched or ENTER has a free slot. */
assert(lmfs_dev(bp) != NO_DEV);
if (e_hit) break; /* e_hit set if ENTER can be performed now */
put_block(bp, DIRECTORY_BLOCK); /* otherwise, continue searching dir */
put_block(bp); /* otherwise, continue searching dir */
}
/* The whole directory has now been searched. */
@ -302,7 +302,7 @@ int ftype; /* used when ENTER and INCOMPAT_FILETYPE */
dp->d_file_type = EXT2_FT_UNKNOWN;
}
lmfs_markdirty(bp);
put_block(bp, DIRECTORY_BLOCK);
put_block(bp);
ldir_ptr->i_update |= CTIME | MTIME; /* mark mtime for update later */
ldir_ptr->i_dirt = IN_DIRTY;

View file

@ -2,7 +2,7 @@
#define EXT2_PROTO_H
#define get_block(d, n, t) lmfs_get_block(d, n, t)
#define put_block(n, t) lmfs_put_block(n, t)
#define put_block(n) lmfs_put_block(n)
/* Function prototypes. */

View file

@ -10,6 +10,7 @@
#include "inode.h"
#include "super.h"
#include <sys/param.h>
#include <sys/dirent.h>
#include <assert.h>
@ -193,8 +194,7 @@ int *completed; /* number of bytes copied */
lmfs_markdirty(bp);
}
n = (off + chunk == block_size ? FULL_DATA_BLOCK : PARTIAL_DATA_BLOCK);
put_block(bp, n);
put_block(bp);
return(r);
}
@ -262,7 +262,7 @@ int opportunistic;
excess = block_pos - triple_ind_s;
mindex = excess / addr_in_block2;
b = rd_indir(bp, mindex); /* num of double ind block */
put_block(bp, INDIRECT_BLOCK); /* release triple ind block */
put_block(bp); /* release triple ind block */
excess = excess % addr_in_block2;
}
if (b == NO_BLOCK) return(NO_BLOCK);
@ -273,7 +273,7 @@ int opportunistic;
ASSERT(lmfs_dev(bp) == rip->i_dev);
mindex = excess / addr_in_block;
b = rd_indir(bp, mindex); /* num of single ind block */
put_block(bp, INDIRECT_BLOCK); /* release double ind block */
put_block(bp); /* release double ind block */
mindex = excess % addr_in_block; /* index into single ind blk */
}
if (b == NO_BLOCK) return(NO_BLOCK);
@ -284,7 +284,7 @@ int opportunistic;
ASSERT(lmfs_dev(bp) != NO_DEV);
ASSERT(lmfs_dev(bp) == rip->i_dev);
b = rd_indir(bp, mindex);
put_block(bp, INDIRECT_BLOCK); /* release single ind block */
put_block(bp); /* release single ind block */
return(b);
}
@ -439,7 +439,7 @@ unsigned bytes_ahead; /* bytes beyond position for immediate use */
}
if (lmfs_dev(bp) != NO_DEV) {
/* Oops, block already in the cache, get out. */
put_block(bp, FULL_DATA_BLOCK);
put_block(bp);
break;
}
}
@ -558,7 +558,7 @@ ssize_t fs_getdents(ino_t ino_nr, struct fsdriver_data *data, size_t bytes,
}
}
put_block(bp, DIRECTORY_BLOCK);
put_block(bp);
if (done)
break;
}

View file

@ -75,12 +75,11 @@ int fs_statvfs(struct statvfs *st)
/*===========================================================================*
* blockstats *
*===========================================================================*/
void fs_blockstats(u64_t *blocks, u64_t *free, u64_t *used)
void fs_blockstats(u64_t *blocks, u64_t *free)
{
struct super_block *sp = get_super(fs_dev);
*blocks = sp->s_blocks_count;
*free = sp->s_free_blocks_count;
*used = *blocks - *free;
}

View file

@ -131,7 +131,7 @@ int op; /* special actions */
/* Create the double indirect block. */
if ( (b2 = alloc_block(rip, rip->i_bsearch) ) == NO_BLOCK) {
/* Release triple ind blk. */
put_block(bp_tindir, INDIRECT_BLOCK);
put_block(bp_tindir);
ext2_debug("failed to allocate dblock near %d\n", rip->i_block[0]);
return(ENOSPC);
}
@ -174,8 +174,8 @@ int op; /* special actions */
if (b1 == NO_BLOCK && !(op & WMAP_FREE)) {
if ( (b1 = alloc_block(rip, rip->i_bsearch) ) == NO_BLOCK) {
/* Release dbl and triple indirect blks. */
put_block(bp_dindir, INDIRECT_BLOCK);
put_block(bp_tindir, INDIRECT_BLOCK);
put_block(bp_dindir);
put_block(bp_tindir);
ext2_debug("failed to allocate dblock near %d\n", rip->i_block[0]);
return(ENOSPC);
}
@ -228,7 +228,7 @@ int op; /* special actions */
/* b1 equals NO_BLOCK only when we are freeing up the indirect block. */
if(b1 != NO_BLOCK)
lmfs_markdirty(bp);
put_block(bp, INDIRECT_BLOCK);
put_block(bp);
}
/* If the single indirect block isn't there (or was just freed),
@ -256,8 +256,8 @@ int op; /* special actions */
rip->i_block[EXT2_TIND_BLOCK] = NO_BLOCK;
}
put_block(bp_dindir, INDIRECT_BLOCK); /* release double indirect blk */
put_block(bp_tindir, INDIRECT_BLOCK); /* release triple indirect blk */
put_block(bp_dindir); /* release double indirect blk */
put_block(bp_tindir); /* release triple indirect blk */
return(OK);
}
@ -368,8 +368,7 @@ void zero_block(bp)
register struct buf *bp; /* pointer to buffer to zero */
{
/* Zero a block. */
ASSERT(lmfs_bytes(bp) > 0);
ASSERT(bp->data);
memset(b_data(bp), 0, (size_t) lmfs_bytes(bp));
memset(b_data(bp), 0, lmfs_fs_block_size());
lmfs_markdirty(bp);
}

View file

@ -155,7 +155,7 @@ static struct buf* fetch_inode(struct dir_extent *extent, size_t *offset)
break;
}
lmfs_put_block(bp, FULL_DATA_BLOCK);
lmfs_put_block(bp);
bp = read_extent_block(extent, *offset /
v_pri.logical_block_size_l);
}
@ -180,7 +180,7 @@ int read_inode(struct inode *i_node, struct dir_extent *extent, size_t offset,
/* Parse basic ISO 9660 specs. */
if (check_dir_record(dir_rec,
offset % v_pri.logical_block_size_l) != OK) {
lmfs_put_block(bp, FULL_DATA_BLOCK);
lmfs_put_block(bp);
return EINVAL;
}
@ -199,7 +199,7 @@ int read_inode(struct inode *i_node, struct dir_extent *extent, size_t offset,
offset += dir_rec->length;
read_inode_extents(i_node, dir_rec, extent, &offset);
lmfs_put_block(bp, FULL_DATA_BLOCK);
lmfs_put_block(bp);
if (new_offset != NULL)
*new_offset = offset;
return OK;
@ -291,7 +291,7 @@ void read_inode_extents(struct inode *i,
if (check_dir_record(dir_rec,
*offset % v_pri.logical_block_size_l) != OK) {
lmfs_put_block(bp, FULL_DATA_BLOCK);
lmfs_put_block(bp);
return;
}
@ -322,7 +322,7 @@ void read_inode_extents(struct inode *i,
if ((dir_rec->file_flags & D_NOT_LAST_EXTENT) == 0)
done = TRUE;
lmfs_put_block(bp, FULL_DATA_BLOCK);
lmfs_put_block(bp);
}
}

View file

@ -44,7 +44,7 @@ ssize_t fs_read(ino_t ino_nr, struct fsdriver_data *data, size_t bytes,
r = fsdriver_copyout(data, cum_io, b_data(bp)+off, chunk);
lmfs_put_block(bp, FULL_DATA_BLOCK);
lmfs_put_block(bp);
if (r != OK)
break; /* EOF reached. */

View file

@ -26,8 +26,8 @@ int fs_statvfs(struct statvfs *st)
return OK;
}
void fs_blockstats(u64_t *blocks, u64_t *free, u64_t *used)
void fs_blockstats(u64_t *blocks, u64_t *free)
{
*used = *blocks = v_pri.volume_space_size_l;
*blocks = v_pri.volume_space_size_l;
*free = 0;
}

View file

@ -45,7 +45,7 @@ static int create_vol_pri_desc(struct iso9660_vol_pri_desc *vol_pri, char *buf,
(vol_pri->file_struct_ver != 1))
return EINVAL;
lmfs_set_blocksize(vol_pri->logical_block_size_l, major(fs_dev));
lmfs_set_blocksize(vol_pri->logical_block_size_l);
/* Read root directory record. */
root_record = (struct iso9660_dir_record *)vol_pri->root_directory;

View file

@ -50,7 +50,7 @@ int parse_susp(struct rrii_dir_record *dir, char *buffer)
}
parse_susp_buffer(dir, b_data(ca_bp) + ca_offset, ca_length);
lmfs_put_block(ca_bp, FULL_DATA_BLOCK);
lmfs_put_block(ca_bp);
return OK;
}

View file

@ -3,36 +3,14 @@
#include "clean.h"
/* Buffer (block) cache. To acquire a block, a routine calls get_block(),
* telling which block it wants. The block is then regarded as "in use"
* and has its 'b_count' field incremented. All the blocks that are not
* in use are chained together in an LRU list, with 'front' pointing
* to the least recently used block, and 'rear' to the most recently used
* block. A reverse chain, using the field b_prev is also maintained.
* Usage for LRU is measured by the time the put_block() is done. The second
* parameter to put_block() can violate the LRU order and put a block on the
* front of the list, if it will probably not be needed soon. If a block
* is modified, the modifying routine must set b_dirt to DIRTY, so the block
* will eventually be rewritten to the disk.
*/
#include <sys/dirent.h>
union fsdata_u {
char b__data[1]; /* ordinary user data */
/* directory block */
struct direct b__dir[1];
/* V2 indirect block */
zone_t b__v2_ind[1];
/* V2 inode block */
d2_inode b__v2_ino[1];
/* bit map block */
bitchunk_t b__bitmap[1];
char b__data[1]; /* ordinary user data */
struct direct b__dir[1]; /* directory block */
zone_t b__v2_ind[1]; /* V2 indirect block */
d2_inode b__v2_ino[1]; /* V2 inode block */
bitchunk_t b__bitmap[1]; /* bit map block */
};
/* A block is free if b_dev == NO_DEV. */
/* These defs make it possible to use to bp->b_data instead of bp->b.b__data */
#define b_data(b) ((union fsdata_u *) b->data)->b__data
#define b_dir(b) ((union fsdata_u *) b->data)->b__dir
@ -41,4 +19,3 @@ union fsdata_u {
#define b_bitmap(b) ((union fsdata_u *) b->data)->b__bitmap
#endif

View file

@ -405,7 +405,7 @@ int rw_flag; /* READING or WRITING */
assert(sp->s_version == V3);
new_icopy(rip, dip2, rw_flag, sp->s_native);
put_block(bp, INODE_BLOCK);
put_block(bp);
IN_MARKCLEAN(rip);
}

View file

@ -168,7 +168,7 @@ ssize_t fs_rdlink(ino_t ino_nr, struct fsdriver_data *data, size_t bytes)
if (bytes > rip->i_size)
bytes = rip->i_size;
r = fsdriver_copyout(data, 0, b_data(bp), bytes);
put_block(bp, DIRECTORY_BLOCK);
put_block(bp);
if (r == OK)
r = bytes;
}
@ -629,7 +629,7 @@ off_t len;
bytes = len;
memset(b_data(bp) + offset, 0, bytes);
MARKDIRTY(bp);
put_block(bp, FULL_DATA_BLOCK);
put_block(bp);
pos += bytes;
len -= bytes;

View file

@ -49,7 +49,7 @@ int fs_mount(dev_t dev, unsigned int flags, struct fsdriver_node *root_node,
printf("MFS: WARNING: FS 0x%llx unclean, mounting readonly\n", fs_dev);
}
lmfs_set_blocksize(superblock.s_block_size, major(fs_dev));
lmfs_set_blocksize(superblock.s_block_size);
/* Get the root inode of the mounted file system. */
if( (root_ip = get_inode(fs_dev, ROOT_INODE)) == NULL) {

View file

@ -170,7 +170,7 @@ int fs_slink(ino_t dir_nr, char *name, uid_t uid, gid_t gid,
}
}
put_block(bp, DIRECTORY_BLOCK); /* put_block() accepts NULL. */
put_block(bp); /* put_block() accepts NULL. */
if(r != OK) {
sip->i_nlinks = NO_LINK;

View file

@ -186,7 +186,7 @@ int flag; /* LOOK_UP, ENTER, DELETE or IS_EMPTY */
(int) dp->mfs_d_ino);
}
assert(lmfs_dev(bp) != NO_DEV);
put_block(bp, DIRECTORY_BLOCK);
put_block(bp);
return(r);
}
@ -200,7 +200,7 @@ int flag; /* LOOK_UP, ENTER, DELETE or IS_EMPTY */
/* The whole block has been searched or ENTER has a free slot. */
if (e_hit) break; /* e_hit set if ENTER can be performed now */
assert(lmfs_dev(bp) != NO_DEV);
put_block(bp, DIRECTORY_BLOCK); /* otherwise, continue searching dir */
put_block(bp); /* otherwise, continue searching dir */
}
/* The whole directory has now been searched. */
@ -231,7 +231,7 @@ int flag; /* LOOK_UP, ENTER, DELETE or IS_EMPTY */
sp = ldir_ptr->i_sp;
dp->mfs_d_ino = conv4(sp->s_native, (int) *numb);
MARKDIRTY(bp);
put_block(bp, DIRECTORY_BLOCK);
put_block(bp);
ldir_ptr->i_update |= CTIME | MTIME; /* mark mtime for update later */
IN_MARKDIRTY(ldir_ptr);
if (new_slots > old_slots) {

View file

@ -3,7 +3,7 @@
/* Some shortcuts to functions in -lminixfs */
#define get_block(d, b, t) lmfs_get_block(d, b, t)
#define put_block(b, t) lmfs_put_block(b, t)
#define put_block(b) lmfs_put_block(b)
/* Function prototypes. */

View file

@ -6,6 +6,7 @@
#include "inode.h"
#include "super.h"
#include <sys/param.h>
#include <sys/dirent.h>
#include <assert.h>
@ -203,8 +204,7 @@ int *completed; /* number of bytes copied */
MARKDIRTY(bp);
}
n = (off + chunk == block_size ? FULL_DATA_BLOCK : PARTIAL_DATA_BLOCK);
put_block(bp, n);
put_block(bp);
return(r);
}
@ -269,7 +269,7 @@ int opportunistic; /* if nonzero, only use cache for metadata */
ASSERT(lmfs_dev(bp) != NO_DEV);
ASSERT(lmfs_dev(bp) == rip->i_dev);
z = rd_indir(bp, index); /* z= zone for single*/
put_block(bp, INDIRECT_BLOCK); /* release double ind block */
put_block(bp); /* release double ind block */
excess = excess % nr_indirects; /* index into single ind blk */
}
@ -280,7 +280,7 @@ int opportunistic; /* if nonzero, only use cache for metadata */
if (bp == NULL)
return NO_BLOCK; /* peeking failed */
z = rd_indir(bp, (int) excess); /* get block pointed to */
put_block(bp, INDIRECT_BLOCK); /* release single indir blk */
put_block(bp); /* release single indir blk */
if (z == NO_ZONE) return(NO_BLOCK);
b = (block_t) ((z << scale) + boff);
return(b);
@ -452,7 +452,7 @@ unsigned bytes_ahead; /* bytes beyond position for immediate use */
assert(bp->lmfs_count > 0);
if (lmfs_dev(bp) != NO_DEV) {
/* Oops, block already in the cache, get out. */
put_block(bp, FULL_DATA_BLOCK);
put_block(bp);
break;
}
}
@ -554,7 +554,7 @@ ssize_t fs_getdents(ino_t ino_nr, struct fsdriver_data *data, size_t bytes,
}
}
put_block(bp, DIRECTORY_BLOCK);
put_block(bp);
if (done)
break;
}

View file

@ -84,13 +84,12 @@ int fs_statvfs(struct statvfs *st)
{
struct super_block *sp;
int scale;
u64_t used;
sp = get_super(fs_dev);
scale = sp->s_log_zone_size;
fs_blockstats(&st->f_blocks, &st->f_bfree, &used);
fs_blockstats(&st->f_blocks, &st->f_bfree);
st->f_bavail = st->f_bfree;
st->f_bsize = sp->s_block_size << scale;

View file

@ -81,18 +81,18 @@ int map; /* IMAP (inode map) or ZMAP (zone map) */
if (b >= map_bits) break;
}
put_block(bp, MAP_BLOCK);
put_block(bp);
++block;
word = 0;
} while (--bcount > 0);
return free_bits; /* no bit could be allocated */
return free_bits;
}
/*===========================================================================*
* blockstats *
*===========================================================================*/
void fs_blockstats(u64_t *blocks, u64_t *free, u64_t *used)
void fs_blockstats(u64_t *blocks, u64_t *free)
{
struct super_block *sp;
@ -102,8 +102,7 @@ void fs_blockstats(u64_t *blocks, u64_t *free, u64_t *used)
assert(!sp->s_log_zone_size);
*blocks = sp->s_zones;
*used = get_used_blocks(sp);
*free = *blocks - *used;
*free = *blocks - get_used_blocks(sp);
return;
}

View file

@ -93,14 +93,14 @@ bit_t origin; /* number of bit to start searching at */
k |= 1 << i;
*wptr = (bitchunk_t) conv4(sp->s_native, (int) k);
MARKDIRTY(bp);
put_block(bp, MAP_BLOCK);
put_block(bp);
if(map == ZMAP) {
used_blocks++;
lmfs_blockschange(sp->s_dev, 1);
lmfs_blockschange(1);
}
return(b);
}
put_block(bp, MAP_BLOCK);
put_block(bp);
if (++block >= (unsigned int) bit_blocks) /* last block, wrap around */
block = 0;
word = 0;
@ -150,11 +150,11 @@ bit_t bit_returned; /* number of bit to insert into the map */
b_bitmap(bp)[word] = (bitchunk_t) conv4(sp->s_native, (int) k);
MARKDIRTY(bp);
put_block(bp, MAP_BLOCK);
put_block(bp);
if(map == ZMAP) {
used_blocks--;
lmfs_blockschange(sp->s_dev, -1);
lmfs_blockschange(-1);
}
}
@ -238,7 +238,7 @@ static int rw_super(struct super_block *sp, int writing)
sp->s_dev = save_dev;
}
put_block(bp, FULL_DATA_BLOCK);
put_block(bp);
lmfs_flushall();
return OK;

View file

@ -125,7 +125,7 @@ int op; /* special actions */
if (bp_dindir != NULL) MARKDIRTY(bp_dindir);
if (z1 == NO_ZONE) {
/* Release dbl indirect blk. */
put_block(bp_dindir, INDIRECT_BLOCK);
put_block(bp_dindir);
return(err_code); /* couldn't create single ind */
}
}
@ -166,7 +166,7 @@ int op; /* special actions */
}
/* z1 equals NO_ZONE only when we are freeing up the indirect block. */
if(z1 != NO_ZONE) MARKDIRTY(bp);
put_block(bp, INDIRECT_BLOCK);
put_block(bp);
}
/* If the single indirect block isn't there (or was just freed),
@ -179,7 +179,7 @@ int op; /* special actions */
rip->i_zone[zones+1] = NO_ZONE;
}
put_block(bp_dindir, INDIRECT_BLOCK); /* release double indirect blk */
put_block(bp_dindir); /* release double indirect blk */
return(OK);
}
@ -311,9 +311,8 @@ void zero_block(bp)
register struct buf *bp; /* pointer to buffer to zero */
{
/* Zero a block. */
ASSERT(lmfs_bytes(bp) > 0);
ASSERT(bp->data);
memset(b_data(bp), 0, (size_t) lmfs_bytes(bp));
memset(b_data(bp), 0, lmfs_fs_block_size());
MARKDIRTY(bp);
}

View file

@ -31,31 +31,30 @@ void lmfs_markdirty(struct buf *bp);
void lmfs_markclean(struct buf *bp);
int lmfs_isclean(struct buf *bp);
dev_t lmfs_dev(struct buf *bp);
int lmfs_bytes(struct buf *bp);
int lmfs_bufs_in_use(void);
int lmfs_nr_bufs(void);
void lmfs_flushall(void);
void lmfs_flushdev(dev_t dev);
int lmfs_fs_block_size(void);
void lmfs_may_use_vmcache(int);
void lmfs_set_blocksize(int blocksize, int major);
void lmfs_reset_rdwt_err(void);
int lmfs_rdwt_err(void);
void lmfs_may_use_vmcache(int);
void lmfs_set_blocksize(int blocksize);
void lmfs_reset_rdwt_err(void);
int lmfs_rdwt_err(void);
void lmfs_buf_pool(int new_nr_bufs);
struct buf *lmfs_get_block(dev_t dev, block64_t block, int how);
struct buf *lmfs_get_block_ino(dev_t dev, block64_t block, int how, ino_t ino,
u64_t off);
void lmfs_put_block(struct buf *bp, int block_type);
void lmfs_put_block(struct buf *bp);
void lmfs_free_block(dev_t dev, block64_t block);
void lmfs_zero_block_ino(dev_t dev, ino_t ino, u64_t off);
void lmfs_invalidate(dev_t device);
void lmfs_rw_scattered(dev_t, struct buf **, int, int);
void lmfs_setquiet(int q);
void lmfs_cache_reevaluate(dev_t dev);
void lmfs_blockschange(dev_t dev, int delta);
void lmfs_cache_reevaluate(void);
void lmfs_blockschange(int delta);
/* calls that libminixfs does into fs */
void fs_blockstats(u64_t *blocks, u64_t *free, u64_t *used);
void fs_blockstats(u64_t *blocks, u64_t *free);
/* get_block arguments */
#define NORMAL 0 /* forces get_block to do disk read */
@ -63,16 +62,6 @@ void fs_blockstats(u64_t *blocks, u64_t *free, u64_t *used);
#define PREFETCH 2 /* tells get_block not to read or mark dev */
#define PEEK 3 /* returns NULL if not in cache or VM cache */
/* When a block is released, the type of usage is passed to put_block(). */
#define ONE_SHOT 0200 /* set if block not likely to be needed soon */
#define INODE_BLOCK 0 /* inode block */
#define DIRECTORY_BLOCK 1 /* directory block */
#define INDIRECT_BLOCK 2 /* pointer block */
#define MAP_BLOCK 3 /* bit map */
#define FULL_DATA_BLOCK 5 /* data, fully used */
#define PARTIAL_DATA_BLOCK 6 /* data, partly used*/
#define END_OF_FILE (-104) /* eof detected */
/* Block I/O helper functions. */

View file

@ -66,7 +66,7 @@ block_prefetch(dev_t dev, block64_t block, unsigned int nblocks)
assert(bp != NULL);
if (lmfs_dev(bp) != NO_DEV) {
lmfs_put_block(bp, FULL_DATA_BLOCK);
lmfs_put_block(bp);
break;
}
@ -175,7 +175,7 @@ lmfs_bio(dev_t dev, struct fsdriver_data * data, size_t bytes, off_t pos,
(char *)bp->data + block_off, chunk);
}
lmfs_put_block(bp, FULL_DATA_BLOCK);
lmfs_put_block(bp);
if (r != OK)
break;

View file

@ -18,6 +18,22 @@
#include <minix/u64.h>
#include <minix/bdev.h>
/* Buffer (block) cache. To acquire a block, a routine calls lmfs_get_block(),
* telling which block it wants. The block is then regarded as "in use" and
* has its reference count incremented. All the blocks that are not in use are
* chained together in an LRU list, with 'front' pointing to the least recently
* used block, and 'rear' to the most recently used block. A reverse chain is
* also maintained. Usage for LRU is measured by the time the put_block() is
* done. The second parameter to put_block() can violate the LRU order and put
* a block on the front of the list, if it will probably not be needed again.
* This is used internally only; the lmfs_put_block() API call has no second
* parameter. If a block is modified, the modifying routine must mark the
* block as dirty, so the block will eventually be rewritten to the disk.
*/
/* Flags to put_block(). */
#define ONE_SHOT 0x1 /* set if block will not be needed again */
#define BUFHASH(b) ((unsigned int)((b) % nr_bufs))
#define MARKCLEAN lmfs_markclean
@ -30,7 +46,8 @@ static unsigned int bufs_in_use;/* # bufs currently in use (not on free list)*/
static void rm_lru(struct buf *bp);
static void read_block(struct buf *);
static void freeblock(struct buf *bp);
static void cache_heuristic_check(int major);
static void cache_heuristic_check(void);
static void put_block(struct buf *bp, int put_flags);
static int vmcache = 0; /* are we using vm's secondary cache? (initially not) */
@ -48,7 +65,7 @@ static int quiet = 0;
void lmfs_setquiet(int q) { quiet = q; }
static u32_t fs_bufs_heuristic(int minbufs, u32_t btotal, u64_t bfree,
int blocksize, dev_t majordev)
int blocksize)
{
struct vm_stats_info vsi;
int bufs;
@ -96,7 +113,7 @@ static u32_t fs_bufs_heuristic(int minbufs, u32_t btotal, u64_t bfree,
return bufs;
}
void lmfs_blockschange(dev_t dev, int delta)
void lmfs_blockschange(int delta)
{
/* Change the number of allocated blocks by 'delta.'
* Also accumulate the delta since the last cache re-evaluation.
@ -108,7 +125,7 @@ void lmfs_blockschange(dev_t dev, int delta)
#define BANDKB (10*1024) /* recheck cache every 10MB change */
if(bitdelta*(int)fs_block_size/1024 > BANDKB ||
bitdelta*(int)fs_block_size/1024 < -BANDKB) {
lmfs_cache_reevaluate(dev);
lmfs_cache_reevaluate();
bitdelta = 0;
}
}
@ -133,11 +150,6 @@ dev_t lmfs_dev(struct buf *bp)
return bp->lmfs_dev;
}
int lmfs_bytes(struct buf *bp)
{
return bp->lmfs_bytes;
}
static void free_unused_blocks(void)
{
struct buf *bp;
@ -414,7 +426,7 @@ struct buf *lmfs_get_block_ino(dev_t dev, block64_t block, int how, ino_t ino,
if (how == PEEK) {
bp->lmfs_dev = NO_DEV;
lmfs_put_block(bp, ONE_SHOT);
put_block(bp, ONE_SHOT);
return NULL;
}
@ -441,26 +453,20 @@ struct buf *lmfs_get_block_ino(dev_t dev, block64_t block, int how, ino_t ino,
}
/*===========================================================================*
* lmfs_put_block *
* put_block *
*===========================================================================*/
void lmfs_put_block(
struct buf *bp, /* pointer to the buffer to be released */
int block_type /* INODE_BLOCK, DIRECTORY_BLOCK, or whatever */
)
static void put_block(struct buf *bp, int put_flags)
{
/* Return a block to the list of available blocks. Depending on 'block_type'
/* Return a block to the list of available blocks. Depending on 'put_flags'
* it may be put on the front or rear of the LRU chain. Blocks that are
* expected to be needed again shortly (e.g., partially full data blocks)
* go on the rear; blocks that are unlikely to be needed again shortly
* (e.g., full data blocks) go on the front. Blocks whose loss can hurt
* the integrity of the file system (e.g., inode blocks) are written to
* disk immediately if they are dirty.
* expected to be needed again at some point go on the rear; blocks that are
* unlikely to be needed again at all go on the front.
*/
dev_t dev;
uint64_t dev_off;
int r, setflags;
if (bp == NULL) return; /* it is easier to check here than in caller */
assert(bp != NULL);
dev = bp->lmfs_dev;
@ -470,8 +476,8 @@ void lmfs_put_block(
if (bp->lmfs_count != 0) return; /* block is still in use */
/* Put this block back on the LRU chain. */
if (dev == NO_DEV || dev == DEV_RAM || (block_type & ONE_SHOT)) {
/* Block probably won't be needed quickly. Put it on front of chain.
if (dev == NO_DEV || dev == DEV_RAM || (put_flags & ONE_SHOT)) {
/* Block will not be needed again. Put it on front of chain.
* It will be the next block to be evicted from the cache.
*/
bp->lmfs_prev = NULL;
@ -483,7 +489,7 @@ void lmfs_put_block(
front = bp;
}
else {
/* Block probably will be needed quickly. Put it on rear of chain.
/* Block may be needed again. Put it on rear of chain.
* It will not be evicted from the cache for a long time.
*/
bp->lmfs_prev = rear;
@ -502,7 +508,7 @@ void lmfs_put_block(
if(vmcache && bp->lmfs_needsetcache && dev != NO_DEV) {
assert(bp->data);
setflags = (block_type & ONE_SHOT) ? VMSF_ONCE : 0;
setflags = (put_flags & ONE_SHOT) ? VMSF_ONCE : 0;
if ((r = vm_set_cacheblock(bp->data, dev, dev_off, bp->lmfs_inode,
bp->lmfs_inode_offset, &bp->lmfs_flags, fs_block_size,
setflags)) != OK) {
@ -522,10 +528,22 @@ void lmfs_put_block(
* after, which could be a problem if VM already forgot the block and we are
* expected to pass it to VM again, which then wouldn't happen.
*/
if (block_type & ONE_SHOT)
if (put_flags & ONE_SHOT)
bp->lmfs_dev = NO_DEV;
}
/*===========================================================================*
* lmfs_put_block *
*===========================================================================*/
void lmfs_put_block(struct buf *bp)
{
/* User interface to put_block(). */
if (bp == NULL) return; /* for poorly written file systems */
put_block(bp, 0);
}
/*===========================================================================*
* lmfs_free_block *
*===========================================================================*/
@ -621,14 +639,14 @@ void lmfs_zero_block_ino(dev_t dev, ino_t ino, u64_t ino_off)
* TODO: tell VM that it is an all-zeroes block, so that VM can deduplicate
* all such pages in its cache.
*/
lmfs_put_block(bp, ONE_SHOT);
put_block(bp, ONE_SHOT);
}
void lmfs_cache_reevaluate(dev_t dev)
void lmfs_cache_reevaluate(void)
{
if(bufs_in_use == 0 && dev != NO_DEV) {
if (bufs_in_use == 0) {
/* if the cache isn't in use any more, we could resize it. */
cache_heuristic_check(major(dev));
cache_heuristic_check();
}
}
@ -876,7 +894,7 @@ void lmfs_rw_scattered(
}
if (rw_flag == READING) {
bp->lmfs_dev = dev; /* validate block */
lmfs_put_block(bp, PARTIAL_DATA_BLOCK);
lmfs_put_block(bp);
} else {
MARKCLEAN(bp);
}
@ -891,7 +909,7 @@ void lmfs_rw_scattered(
* give at this time. Don't forget to release those extras.
*/
while (bufqsize > 0) {
lmfs_put_block(*bufq++, PARTIAL_DATA_BLOCK);
lmfs_put_block(*bufq++);
bufqsize--;
}
}
@ -952,15 +970,14 @@ static void cache_resize(unsigned int blocksize, unsigned int bufs)
fs_block_size = blocksize;
}
static void cache_heuristic_check(int major)
static void cache_heuristic_check(void)
{
int bufs, d;
u64_t btotal, bfree, bused;
u64_t btotal, bfree;
fs_blockstats(&btotal, &bfree, &bused);
fs_blockstats(&btotal, &bfree);
bufs = fs_bufs_heuristic(10, btotal, bfree,
fs_block_size, major);
bufs = fs_bufs_heuristic(10, btotal, bfree, fs_block_size);
/* set the cache to the new heuristic size if the new one
* is more than 10% off from the current one.
@ -975,16 +992,14 @@ static void cache_heuristic_check(int major)
/*===========================================================================*
* lmfs_set_blocksize *
*===========================================================================*/
void lmfs_set_blocksize(int new_block_size, int major)
void lmfs_set_blocksize(int new_block_size)
{
cache_resize(new_block_size, MINBUFS);
cache_heuristic_check(major);
cache_heuristic_check();
/* Decide whether to use seconday cache or not.
* Only do this if
* - it's available, and
* - use of it hasn't been disabled for this fs, and
* - our main FS device isn't a memory device
* Only do this if the block size is a multiple of the page size, and using
* the VM cache has been enabled for this FS.
*/
vmcache = 0;

View file

@ -1,4 +0,0 @@
#define _SYSTEM
#include <lib.h> /* common to all libraries */
#include <minix/com.h> /* need task numbers + message types */

View file

@ -53,7 +53,7 @@ dowriteblock(int b, int blocksize, u32_t seed, char *data)
lmfs_markdirty(bp);
lmfs_put_block(bp, FULL_DATA_BLOCK);
lmfs_put_block(bp);
return blocksize;
}
@ -72,7 +72,7 @@ readblock(int b, int blocksize, u32_t seed, char *data)
memcpy(data, bp->data, blocksize);
lmfs_put_block(bp, FULL_DATA_BLOCK);
lmfs_put_block(bp);
return blocksize;
}
@ -91,9 +91,9 @@ void testend(void)
/* Fake some libminixfs client functions */
void
fs_blockstats(u64_t *total, u64_t *free, u64_t *used)
fs_blockstats(u64_t *total, u64_t *free)
{
*total = *free = *used = 0;
*total = *free = 0;
}
static void allocate(int b)
@ -269,7 +269,7 @@ main(int argc, char *argv[])
for(p = 1; p <= 3; p++) {
/* Do not update curblocksize until the cache is flushed. */
newblocksize = PAGE_SIZE*p;
lmfs_set_blocksize(newblocksize, MYMAJOR);
lmfs_set_blocksize(newblocksize);
curblocksize = newblocksize; /* now it's safe to update */
lmfs_buf_pool(BLOCKS);
if(dotest(curblocksize, BLOCKS, ITER)) e(n);
@ -282,7 +282,7 @@ main(int argc, char *argv[])
for(wss = 2; wss <= 3; wss++) {
int wsblocks = 10*wss*wss*wss*wss*wss;
for(cs = wsblocks/4; cs <= wsblocks*3; cs *= 1.5) {
lmfs_set_blocksize(PAGE_SIZE, MYMAJOR);
lmfs_set_blocksize(PAGE_SIZE);
curblocksize = PAGE_SIZE; /* same as above */
lmfs_buf_pool(cs);
if(dotest(curblocksize, wsblocks, ITER)) e(n);