3a5fa7ed90
Remove I_BUSY, B_BUSY, and intrans defs and usages One spinlock per buf to avoid ugly loop in bget fix race in filewrite (don't update f->off after releasing lock)
151 lines
3.5 KiB
C
151 lines
3.5 KiB
C
// Mutual exclusion spin locks.
|
|
|
|
#include "types.h"
|
|
#include "defs.h"
|
|
#include "param.h"
|
|
#include "x86.h"
|
|
#include "memlayout.h"
|
|
#include "mmu.h"
|
|
#include "proc.h"
|
|
#include "spinlock.h"
|
|
|
|
void
|
|
initlock(struct spinlock *lk, char *name)
|
|
{
|
|
lk->name = name;
|
|
lk->locked = 0;
|
|
lk->cpu = 0;
|
|
}
|
|
|
|
// Acquire a spin lock. Loops (spins) until the lock is acquired.
|
|
// Holding a lock for a long time may cause other CPUs to waste time spinning to acquire it.
|
|
// Spinlocks shouldn't be held across sleep(); for those cases, use sleeplocks.
|
|
void
|
|
acquire(struct spinlock *lk)
|
|
{
|
|
pushcli(); // disable interrupts to avoid deadlock.
|
|
if(holding(lk))
|
|
panic("acquire");
|
|
|
|
// The xchg is atomic.
|
|
// It also serializes, so that reads after acquire are not
|
|
// reordered before it.
|
|
while(xchg(&lk->locked, 1) != 0)
|
|
;
|
|
|
|
// Record info about lock acquisition for debugging.
|
|
lk->cpu = cpu;
|
|
getcallerpcs(&lk, lk->pcs);
|
|
}
|
|
|
|
// Release the lock.
|
|
void
|
|
release(struct spinlock *lk)
|
|
{
|
|
if(!holding(lk))
|
|
panic("release");
|
|
|
|
lk->pcs[0] = 0;
|
|
lk->cpu = 0;
|
|
|
|
// The xchg serializes, so that reads before release are
|
|
// not reordered after it. The 1996 PentiumPro manual (Volume 3,
|
|
// 7.2) says reads can be carried out speculatively and in
|
|
// any order, which implies we need to serialize here.
|
|
// But the 2007 Intel 64 Architecture Memory Ordering White
|
|
// Paper says that Intel 64 and IA-32 will not move a load
|
|
// after a store. So lock->locked = 0 would work here.
|
|
// The xchg being asm volatile ensures gcc emits it after
|
|
// the above assignments (and after the critical section).
|
|
xchg(&lk->locked, 0);
|
|
|
|
popcli();
|
|
}
|
|
|
|
// Record the current call stack in pcs[] by following the %ebp chain.
|
|
void
|
|
getcallerpcs(void *v, uint pcs[])
|
|
{
|
|
uint *ebp;
|
|
int i;
|
|
|
|
ebp = (uint*)v - 2;
|
|
for(i = 0; i < 10; i++){
|
|
if(ebp == 0 || ebp < (uint*)KERNBASE || ebp == (uint*)0xffffffff)
|
|
break;
|
|
pcs[i] = ebp[1]; // saved %eip
|
|
ebp = (uint*)ebp[0]; // saved %ebp
|
|
}
|
|
for(; i < 10; i++)
|
|
pcs[i] = 0;
|
|
}
|
|
|
|
// Check whether this cpu is holding the lock.
|
|
int
|
|
holding(struct spinlock *lock)
|
|
{
|
|
return lock->locked && lock->cpu == cpu;
|
|
}
|
|
|
|
|
|
// Pushcli/popcli are like cli/sti except that they are matched:
|
|
// it takes two popcli to undo two pushcli. Also, if interrupts
|
|
// are off, then pushcli, popcli leaves them off.
|
|
|
|
void
|
|
pushcli(void)
|
|
{
|
|
int eflags;
|
|
|
|
eflags = readeflags();
|
|
cli();
|
|
if(cpu->ncli++ == 0)
|
|
cpu->intena = eflags & FL_IF;
|
|
}
|
|
|
|
void
|
|
popcli(void)
|
|
{
|
|
if(readeflags()&FL_IF)
|
|
panic("popcli - interruptible");
|
|
if(--cpu->ncli < 0)
|
|
panic("popcli");
|
|
if(cpu->ncli == 0 && cpu->intena)
|
|
sti();
|
|
}
|
|
|
|
void
|
|
initsleeplock(struct sleeplock *l)
|
|
{
|
|
l->locked = 0;
|
|
}
|
|
|
|
// Grab the sleeplock that is protected by spinl. Sleeplocks allow a process to lock
|
|
// a data structure for long times, including across sleeps. Other processes that try
|
|
// to acquire a sleeplock will be put to sleep when another process hold the sleeplock.
|
|
// To update status of the sleeplock atomically, the caller must hold spinl
|
|
void
|
|
acquire_sleeplock(struct sleeplock *sleepl, struct spinlock *spinl)
|
|
{
|
|
while (sleepl->locked) {
|
|
sleep(sleepl, spinl);
|
|
}
|
|
sleepl->locked = 1;
|
|
}
|
|
|
|
// Release the sleeplock that is protected by a spin lock
|
|
// Caller must hold the spinlock that protects the sleeplock
|
|
void
|
|
release_sleeplock(struct sleeplock *sleepl)
|
|
{
|
|
sleepl->locked = 0;
|
|
wakeup(sleepl);
|
|
}
|
|
|
|
// Is the sleeplock acquired?
|
|
// Caller must hold the spinlock that protects the sleeplock
|
|
int
|
|
acquired_sleeplock(struct sleeplock *sleepl)
|
|
{
|
|
return sleepl->locked;
|
|
}
|