Incorporate new understanding of/with Intel SMP spec.
Dropped cmpxchg in favor of xchg, to match lecture notes. Use xchg to release lock, for future protection and to keep gcc from acting clever.feat/start
parent
9fd9f80431
commit
943fd378a1
8
TRICKS
8
TRICKS
|
|
@ -102,5 +102,11 @@ after observing the earlier writes by CPU0.
|
||||||
So any reads in B are guaranteed to observe the
|
So any reads in B are guaranteed to observe the
|
||||||
effects of writes in A.
|
effects of writes in A.
|
||||||
|
|
||||||
Not sure about the second one yet.
|
According to the Intel manual behavior spec, the
|
||||||
|
second condition requires a serialization instruction
|
||||||
|
in release, to avoid reads in A happening after giving
|
||||||
|
up lk. No Intel SMP processor in existence actually
|
||||||
|
moves reads down after writes, but the language in
|
||||||
|
the spec allows it. There is no telling whether future
|
||||||
|
processors will need it.
|
||||||
|
|
||||||
|
|
|
||||||
3
main.c
3
main.c
|
|
@ -50,8 +50,9 @@ mpmain(void)
|
||||||
if(cpu() != mp_bcpu())
|
if(cpu() != mp_bcpu())
|
||||||
lapic_init(cpu());
|
lapic_init(cpu());
|
||||||
setupsegs(0);
|
setupsegs(0);
|
||||||
cpus[cpu()].booted = 1;
|
xchg(&cpus[cpu()].booted, 1);
|
||||||
|
|
||||||
|
cprintf("cpu%d: scheduling\n");
|
||||||
scheduler();
|
scheduler();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
2
proc.h
2
proc.h
|
|
@ -56,7 +56,7 @@ struct cpu {
|
||||||
struct context context; // Switch here to enter scheduler
|
struct context context; // Switch here to enter scheduler
|
||||||
struct taskstate ts; // Used by x86 to find stack for interrupt
|
struct taskstate ts; // Used by x86 to find stack for interrupt
|
||||||
struct segdesc gdt[NSEGS]; // x86 global descriptor table
|
struct segdesc gdt[NSEGS]; // x86 global descriptor table
|
||||||
volatile int booted; // Has the CPU started?
|
volatile uint booted; // Has the CPU started?
|
||||||
int ncli; // Depth of pushcli nesting.
|
int ncli; // Depth of pushcli nesting.
|
||||||
int intena; // Were interrupts enabled before pushcli?
|
int intena; // Were interrupts enabled before pushcli?
|
||||||
};
|
};
|
||||||
|
|
|
||||||
19
spinlock.c
19
spinlock.c
|
|
@ -10,12 +10,6 @@
|
||||||
|
|
||||||
extern int use_console_lock;
|
extern int use_console_lock;
|
||||||
|
|
||||||
// Barrier to gcc's instruction reordering.
|
|
||||||
static void inline gccbarrier(void)
|
|
||||||
{
|
|
||||||
asm volatile("" : : : "memory");
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
void
|
||||||
initlock(struct spinlock *lock, char *name)
|
initlock(struct spinlock *lock, char *name)
|
||||||
{
|
{
|
||||||
|
|
@ -35,7 +29,10 @@ acquire(struct spinlock *lock)
|
||||||
if(holding(lock))
|
if(holding(lock))
|
||||||
panic("acquire");
|
panic("acquire");
|
||||||
|
|
||||||
while(cmpxchg(0, 1, &lock->locked) == 1)
|
// The xchg is atomic.
|
||||||
|
// It also serializes, so that reads after acquire are not
|
||||||
|
// reordered before it.
|
||||||
|
while(xchg(&lock->locked, 1) == 1)
|
||||||
;
|
;
|
||||||
|
|
||||||
// Record info about lock acquisition for debugging.
|
// Record info about lock acquisition for debugging.
|
||||||
|
|
@ -56,8 +53,12 @@ release(struct spinlock *lock)
|
||||||
lock->pcs[0] = 0;
|
lock->pcs[0] = 0;
|
||||||
lock->cpu = 0xffffffff;
|
lock->cpu = 0xffffffff;
|
||||||
|
|
||||||
gccbarrier(); // Keep gcc from moving lock->locked = 0 earlier.
|
// The xchg serializes, so that reads before release are
|
||||||
lock->locked = 0;
|
// not reordered after it. (This reordering would be allowed
|
||||||
|
// by the Intel manuals, but does not happen on current
|
||||||
|
// Intel processors. The xchg being asm volatile also keeps
|
||||||
|
// gcc from delaying the above assignments.)
|
||||||
|
xchg(&lock->locked, 0);
|
||||||
|
|
||||||
popcli();
|
popcli();
|
||||||
}
|
}
|
||||||
|
|
|
||||||
29
x86.h
29
x86.h
|
|
@ -96,35 +96,16 @@ write_eflags(uint eflags)
|
||||||
asm volatile("pushl %0; popfl" : : "r" (eflags));
|
asm volatile("pushl %0; popfl" : : "r" (eflags));
|
||||||
}
|
}
|
||||||
|
|
||||||
// XXX: Kill this if not used.
|
|
||||||
static inline void
|
|
||||||
cpuid(uint info, uint *eaxp, uint *ebxp, uint *ecxp, uint *edxp)
|
|
||||||
{
|
|
||||||
uint eax, ebx, ecx, edx;
|
|
||||||
|
|
||||||
asm volatile("cpuid" :
|
|
||||||
"=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) :
|
|
||||||
"a" (info));
|
|
||||||
if(eaxp)
|
|
||||||
*eaxp = eax;
|
|
||||||
if(ebxp)
|
|
||||||
*ebxp = ebx;
|
|
||||||
if(ecxp)
|
|
||||||
*ecxp = ecx;
|
|
||||||
if(edxp)
|
|
||||||
*edxp = edx;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline uint
|
static inline uint
|
||||||
cmpxchg(uint oldval, uint newval, volatile uint* lock_addr)
|
xchg(volatile uint *addr, uint newval)
|
||||||
{
|
{
|
||||||
uint result;
|
uint result;
|
||||||
|
|
||||||
// The + in "+m" denotes a read-modify-write operand.
|
// The + in "+m" denotes a read-modify-write operand.
|
||||||
asm volatile("lock; cmpxchgl %2, %0" :
|
asm volatile("lock; xchgl %0, %1" :
|
||||||
"+m" (*lock_addr), "=a" (result) :
|
"+m" (*addr), "=a" (result) :
|
||||||
"r"(newval), "1"(oldval) :
|
"1" (newval) :
|
||||||
"cc");
|
"cc");
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue