libminixfs: add support for peeking blocks

With this change, the lmfs_get_block*(3) functions allow the caller to
specify that it only wants the block if it is in the cache or the
secondary VM cache.  If the block is not found there, the functions
return NULL.  Previously, the PREFETCH method would be used to this
end instead, which was both abuse in name and less efficient.

Change-Id: Ieb5a15b67fa25d2008a8eeef9d126ac908fc2395
This commit is contained in:
David van Moolenbroek 2015-03-27 20:00:55 +00:00
parent d75faf18d9
commit cb9453ca63
4 changed files with 52 additions and 42 deletions

View file

@ -222,9 +222,9 @@ int opportunistic;
static long doub_ind_s;
static long triple_ind_s;
static long out_range_s;
int iomode = NORMAL;
int iomode;
if(opportunistic) iomode = PREFETCH;
iomode = opportunistic ? PEEK : NORMAL;
if (first_time) {
addr_in_block = rip->i_sp->s_block_size / BLOCK_ADDRESS_BYTES;
@ -267,10 +267,8 @@ int opportunistic;
}
if (b == NO_BLOCK) return(NO_BLOCK);
bp = get_block(rip->i_dev, b, iomode); /* get double indirect block */
if(opportunistic && lmfs_dev(bp) == NO_DEV) {
put_block(bp, INDIRECT_BLOCK);
return NO_BLOCK;
}
if (bp == NULL)
return NO_BLOCK; /* peeking failed */
ASSERT(lmfs_dev(bp) != NO_DEV);
ASSERT(lmfs_dev(bp) == rip->i_dev);
mindex = excess / addr_in_block;
@ -280,10 +278,8 @@ int opportunistic;
}
if (b == NO_BLOCK) return(NO_BLOCK);
bp = get_block(rip->i_dev, b, iomode); /* get single indirect block */
if(opportunistic && lmfs_dev(bp) == NO_DEV) {
put_block(bp, INDIRECT_BLOCK);
return NO_BLOCK;
}
if (bp == NULL)
return NO_BLOCK; /* peeking failed */
ASSERT(lmfs_dev(bp) != NO_DEV);
ASSERT(lmfs_dev(bp) == rip->i_dev);

View file

@ -228,9 +228,9 @@ int opportunistic; /* if nonzero, only use cache for metadata */
unsigned int dzones, nr_indirects;
block_t b;
unsigned long excess, zone, block_pos;
int iomode = NORMAL;
int iomode;
if(opportunistic) iomode = PREFETCH;
iomode = opportunistic ? PEEK : NORMAL;
scale = rip->i_sp->s_log_zone_size; /* for block-zone conversion */
block_pos = position/rip->i_sp->s_block_size; /* relative blk # in file */
@ -264,10 +264,8 @@ int opportunistic; /* if nonzero, only use cache for metadata */
if ((unsigned int) index > rip->i_nindirs)
return(NO_BLOCK); /* Can't go beyond double indirects */
bp = get_block(rip->i_dev, b, iomode); /* get double indirect block */
if(opportunistic && lmfs_dev(bp) == NO_DEV) {
put_block(bp, INDIRECT_BLOCK);
return NO_BLOCK;
}
if (bp == NULL)
return NO_BLOCK; /* peeking failed */
ASSERT(lmfs_dev(bp) != NO_DEV);
ASSERT(lmfs_dev(bp) == rip->i_dev);
z = rd_indir(bp, index); /* z= zone for single*/
@ -279,10 +277,8 @@ int opportunistic; /* if nonzero, only use cache for metadata */
if (z == NO_ZONE) return(NO_BLOCK);
b = (block_t) z << scale; /* b is blk # for single ind */
bp = get_block(rip->i_dev, b, iomode); /* get single indirect block */
if(opportunistic && lmfs_dev(bp) == NO_DEV) {
put_block(bp, INDIRECT_BLOCK);
return NO_BLOCK;
}
if (bp == NULL)
return NO_BLOCK; /* peeking failed */
z = rd_indir(bp, (int) excess); /* get block pointed to */
put_block(bp, INDIRECT_BLOCK); /* release single indir blk */
if (z == NO_ZONE) return(NO_BLOCK);

View file

@ -42,9 +42,9 @@ void lmfs_set_blocksize(int blocksize, int major);
void lmfs_reset_rdwt_err(void);
int lmfs_rdwt_err(void);
void lmfs_buf_pool(int new_nr_bufs);
struct buf *lmfs_get_block(dev_t dev, block64_t block,int only_search);
struct buf *lmfs_get_block_ino(dev_t dev, block64_t block,int only_search,
ino_t ino, u64_t off);
struct buf *lmfs_get_block(dev_t dev, block64_t block, int how);
struct buf *lmfs_get_block_ino(dev_t dev, block64_t block, int how, ino_t ino,
u64_t off);
void lmfs_put_block(struct buf *bp, int block_type);
void lmfs_free_block(dev_t dev, block64_t block);
void lmfs_zero_block_ino(dev_t dev, ino_t ino, u64_t off);
@ -61,6 +61,7 @@ void fs_blockstats(u64_t *blocks, u64_t *free, u64_t *used);
#define NORMAL 0 /* forces get_block to do disk read */
#define NO_READ 1 /* prevents get_block from doing disk read */
#define PREFETCH 2 /* tells get_block not to read or mark dev */
#define PEEK 3 /* returns NULL if not in cache or VM cache */
/* When a block is released, the type of usage is passed to put_block(). */
#define ONE_SHOT 0200 /* set if block not likely to be needed soon */

View file

@ -178,9 +178,9 @@ static void lmfs_alloc_block(struct buf *bp)
/*===========================================================================*
* lmfs_get_block *
*===========================================================================*/
struct buf *lmfs_get_block(dev_t dev, block64_t block, int only_search)
struct buf *lmfs_get_block(dev_t dev, block64_t block, int how)
{
return lmfs_get_block_ino(dev, block, only_search, VMC_NO_INODE, 0);
return lmfs_get_block_ino(dev, block, how, VMC_NO_INODE, 0);
}
static void munmap_t(void *a, int len)
@ -263,24 +263,24 @@ static struct buf *find_block(dev_t dev, block64_t block)
/*===========================================================================*
* lmfs_get_block_ino *
*===========================================================================*/
struct buf *lmfs_get_block_ino(dev_t dev, block64_t block, int only_search,
ino_t ino, u64_t ino_off)
struct buf *lmfs_get_block_ino(dev_t dev, block64_t block, int how, ino_t ino,
u64_t ino_off)
{
/* Check to see if the requested block is in the block cache. If so, return
* a pointer to it. If not, evict some other block and fetch it (unless
* 'only_search' is 1). All the blocks in the cache that are not in use
* are linked together in a chain, with 'front' pointing to the least recently
* used block and 'rear' to the most recently used block. If 'only_search' is
* 1, the block being requested will be overwritten in its entirety, so it is
* only necessary to see if it is in the cache; if it is not, any free buffer
* will do. It is not necessary to actually read the block in from disk.
* If 'only_search' is PREFETCH, the block need not be read from the disk,
* and the device is not to be marked on the block, so callers can tell if
* the block returned is valid.
* 'how' is NO_READ). All the blocks in the cache that are not in use are
* linked together in a chain, with 'front' pointing to the least recently used
* block and 'rear' to the most recently used block. If 'how' is NO_READ, the
* block being requested will be overwritten in its entirety, so it is only
* necessary to see if it is in the cache; if it is not, any free buffer will
* do. It is not necessary to actually read the block in from disk. If 'how'
* is PREFETCH, the block need not be read from the disk, and the device is not
* to be marked on the block (i.e., set to NO_DEV), so callers can tell if the
* block returned is valid. If 'how' is PEEK, the function returns the block
* if it is in the cache or could be obtained from VM, and NULL otherwise.
* In addition to the LRU chain, there is also a hash chain to link together
* blocks whose block numbers end with the same bit strings, for fast lookup.
*/
int b;
static struct buf *bp;
uint64_t dev_off;
@ -406,21 +406,34 @@ struct buf *lmfs_get_block_ino(dev_t dev, block64_t block, int only_search,
}
bp->data = NULL;
/* The block is not in the cache, and VM does not know about it. If we were
* requested to search for the block only, we can now return failure to the
* caller. Return the block to the pool without allocating data pages, since
* these would be freed upon recycling the block anyway.
*/
if (how == PEEK) {
bp->lmfs_dev = NO_DEV;
lmfs_put_block(bp, ONE_SHOT);
return NULL;
}
/* Not in the cache; reserve memory for its contents. */
lmfs_alloc_block(bp);
assert(bp->data);
if(only_search == PREFETCH) {
if(how == PREFETCH) {
/* PREFETCH: don't do i/o. */
bp->lmfs_dev = NO_DEV;
} else if (only_search == NORMAL) {
} else if (how == NORMAL) {
read_block(bp);
} else if(only_search == NO_READ) {
} else if(how == NO_READ) {
/* This block will be overwritten by new contents. */
} else
panic("unexpected only_search value: %d", only_search);
panic("unexpected 'how' value: %d", how);
assert(bp->data);
@ -485,8 +498,10 @@ void lmfs_put_block(
assert(bp->lmfs_flags & VMMC_BLOCK_LOCKED);
bp->lmfs_flags &= ~VMMC_BLOCK_LOCKED;
/* block has sensible content - if necesary, identify it to VM */
/* block has sensible content - if necessary, identify it to VM */
if(vmcache && bp->lmfs_needsetcache && dev != NO_DEV) {
assert(bp->data);
setflags = (block_type & ONE_SHOT) ? VMSF_ONCE : 0;
if ((r = vm_set_cacheblock(bp->data, dev, dev_off, bp->lmfs_inode,
bp->lmfs_inode_offset, &bp->lmfs_flags, fs_block_size,
@ -689,6 +704,8 @@ void lmfs_invalidate(
register struct buf *bp;
assert(device != NO_DEV);
for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) {
if (bp->lmfs_dev == device) {
assert(bp->data);