diff --git a/lib/libminixfs/cache.c b/lib/libminixfs/cache.c index 008ea8d09..02d496aa9 100644 --- a/lib/libminixfs/cache.c +++ b/lib/libminixfs/cache.c @@ -619,6 +619,23 @@ void lmfs_rw_scattered( static iovec_t iovec[NR_IOREQS]; u64_t pos; int iov_per_block; + int start_in_use = bufs_in_use, start_bufqsize = bufqsize; + + assert(bufqsize >= 0); + if(bufqsize == 0) return; + + /* for READING, check all buffers on the list are obtained and held + * (count > 0) + */ + if (rw_flag == READING) { + for(i = 0; i < bufqsize; i++) { + assert(bufq[i] != NULL); + assert(bufq[i]->lmfs_count > 0); + } + + /* therefore they are all 'in use' and must be at least this many */ + assert(start_in_use >= start_bufqsize); + } assert(dev != NO_DEV); assert(!(fs_block_size % PAGE_SIZE)); @@ -700,8 +717,10 @@ void lmfs_rw_scattered( } r -= fs_block_size; } - bufq += nblocks; - bufqsize -= nblocks; + + bufq += i; + bufqsize -= i; + if (rw_flag == READING) { /* Don't bother reading more than the device is willing to * give at this time. Don't forget to release those extras. @@ -720,6 +739,13 @@ void lmfs_rw_scattered( break; } } + + if(rw_flag == READING) { + assert(start_in_use >= start_bufqsize); + + /* READING callers assume all bufs are released. */ + assert(start_in_use - start_bufqsize == bufs_in_use); + } } /*===========================================================================* diff --git a/servers/mfs/read.c b/servers/mfs/read.c index a0414668f..fa41d27b8 100644 --- a/servers/mfs/read.c +++ b/servers/mfs/read.c @@ -297,7 +297,7 @@ int *completed; /* number of bytes copied */ } /* In all cases, bp now points to a valid buffer. */ - assert(bp); + assert(bp != NULL); if (rw_flag == WRITING && chunk != block_size && !block_spec && (off_t) ex64lo(position) >= rip->i_size && off == 0) { @@ -477,6 +477,7 @@ unsigned bytes_ahead; /* bytes beyond position for immediate use */ static unsigned int readqsize = 0; static struct buf **read_q; u64_t position_running; + int inuse_before = lmfs_bufs_in_use(); if(readqsize != nr_bufs) { if(readqsize > 0) { @@ -512,6 +513,7 @@ unsigned bytes_ahead; /* bytes beyond position for immediate use */ bp = lmfs_get_block_ino(dev, block, PREFETCH, rip->i_num, position); assert(bp != NULL); + assert(bp->lmfs_count > 0); if (lmfs_dev(bp) != NO_DEV) return(bp); /* The best guess for the number of blocks to prefetch: A lot. @@ -567,6 +569,7 @@ unsigned bytes_ahead; /* bytes beyond position for immediate use */ /* Acquire block buffers. */ for (;;) { block_t thisblock; + assert(bp->lmfs_count > 0); read_q[read_q_size++] = bp; if (--blocks_ahead == 0) break; @@ -583,6 +586,8 @@ unsigned bytes_ahead; /* bytes beyond position for immediate use */ } else { bp = get_block(dev, block, PREFETCH); } + assert(bp); + assert(bp->lmfs_count > 0); if (lmfs_dev(bp) != NO_DEV) { /* Oops, block already in the cache, get out. */ put_block(bp, FULL_DATA_BLOCK); @@ -591,6 +596,8 @@ unsigned bytes_ahead; /* bytes beyond position for immediate use */ } lmfs_rw_scattered(dev, read_q, read_q_size, READING); + assert(inuse_before == lmfs_bufs_in_use()); + if(block_spec) return get_block(dev, baseblock, NORMAL); return(lmfs_get_block_ino(dev, baseblock, NORMAL, rip->i_num, position));