lab-lock Memory allocator 每个CPU都有一条对应的空闲内存链表,有利于分配时减少冲突。
1 2 3 4 struct { struct spinlock lock ; struct run *freelist ; } kmem[NCPU];
初始化空闲内存表锁。
1 2 3 4 5 6 7 void kinit () { for (int i =0 ; i < NCPU; i++) initlock(&kmem[i].lock, "kmem" ); freerange(end, (void *)PHYSTOP); }
让freerange
将所有空闲内存分配给运行freerange
的 CPU
freerange
调用kfree_init
完成分配工作,获取当前cpuid
时需关中断。
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 void kfree_init (void *pa) { push_off(); int id = cpuid(); struct run *r ; if (((uint64)pa % PGSIZE) != 0 || (char *)pa < end || (uint64)pa >= PHYSTOP) panic("kfree" ); memset (pa, 1 , PGSIZE); r = (struct run*)pa; acquire(&kmem[id].lock); r->next = kmem[id].freelist; kmem[id].freelist = r; release(&kmem[id].lock); pop_off(); }
kfree
很简单,只需要获取对应的cpuid
再回收内存即可。
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 void kfree (void *pa) { push_off(); int id = cpuid(); struct run *r ; if (((uint64)pa % PGSIZE) != 0 || (char *)pa < end || (uint64)pa >= PHYSTOP) panic("kfree" ); memset (pa, 1 , PGSIZE); r = (struct run*)pa; acquire(&kmem[id].lock); r->next = kmem[id].freelist; kmem[id].freelist = r; release(&kmem[id].lock); pop_off(); }
由于初始化时空闲内存全部分配给了调用freerange
的CPU,其他CPU在分配时没有空闲内存可用,因此需要到其他的CPU处窃取空闲内存。
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 void *kalloc (void ) { push_off(); int id = cpuid(); struct run *r ; acquire(&kmem[id].lock); r = kmem[id].freelist; if (r) { kmem[id].freelist = r->next; release(&kmem[id].lock); } else { for (int i = 0 ; i < NCPU; i++) { if (i == id) continue ; acquire(&kmem[i].lock); r = kmem[i].freelist; if (r) { kmem[i].freelist = r->next; release(&kmem[i].lock); break ; } release(&kmem[i].lock); } release(&kmem[id].lock); } pop_off(); if (r) memset ((char *)r, 5 , PGSIZE); return (void *)r; }
Buffer cache 1 2 #define NBUCKET 13 #define NBUF (NBUCKET*3)
添加一个timestamp字段,代表该buf
最后访问时间
1 2 3 4 5 6 7 8 9 10 11 12 13 14 struct buf { int valid; int disk; uint dev; uint blockno; struct sleeplock lock ; uint refcnt; struct buf *next ; uchar data[BSIZE]; uint timestamp; };
添加一个hashtable[NBUCKET]
结构体数组,每一个数组元素bucket
代表一个桶,每个桶都有一把锁。
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 struct { struct spinlock lock ; struct buf buf [NBUF ]; } bcache; struct bucket { struct spinlock lock ; struct buf head ; } hashtable[NBUCKET]; int ihash (uint blockno) { return blockno % NBUCKET; }
初始化。
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 void binit (void ) { struct buf *b ; initlock(&bcache.lock, "bcache" ); for (b = bcache.buf; b < bcache.buf+NBUF; b++) { initsleeplock(&b->lock, "buffer" ); } b = bcache.buf; for (int i = 0 ; i < NBUCKET; i++) { initlock(&hashtable[i].lock,"bcache.bucket" ); for (int j = 0 ; j < NBUF / NBUCKET; j++) { b->next = hashtable[i].head.next; hashtable[i].head.next = b; b++; } } }
原始的xv6每次调用brelse、bpin、bunpin
都会直接一把大锁锁住整个buffer cache
,修改后只需要锁住buf
所在的bucket
即可,可以提高并行访问效率。释放一块buf时需要重设timestamp
,表示最后访问时间。
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 void brelse (struct buf *b) { if (!holdingsleep(&b->lock)) panic("brelse" ); releasesleep(&b->lock); int index = ihash(b->blockno); acquire(&hashtable[index].lock); b->refcnt--; if (b->refcnt == 0 ) { b->timestamp = ticks; } release(&hashtable[index].lock); } void bpin (struct buf *b) { acquire(&hashtable[ihash(b->blockno)].lock); b->refcnt++; release(&hashtable[ihash(b->blockno)].lock); } void bunpin (struct buf *b) { acquire(&hashtable[ihash(b->blockno)].lock); b->refcnt--; release(&hashtable[ihash(b->blockno)].lock); }
重点是bget
。
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 static struct buf*bget (uint dev, uint blockno) { struct buf *b ; int index = ihash(blockno); acquire(&hashtable[index].lock); for (b = hashtable[index].head.next; b; b = b->next) { if (b->dev == dev && b->blockno == blockno){ b->refcnt++; release(&hashtable[index].lock); acquiresleep(&b->lock); return b; } } uint min_stamp = (1 << 20 ); struct buf *lru_buf = 0 ; for (b = hashtable[index].head.next; b; b = b->next) { if (b->refcnt == 0 && b->timestamp < min_stamp) { lru_buf = b; min_stamp = b->timestamp; } } if (lru_buf) { goto find; } acquire(&bcache.lock); lru_buf = loop_find(lru_buf); if (lru_buf) { lru_buf->next = hashtable[index].head.next; hashtable[index].head.next = lru_buf; release(&bcache.lock); goto find; } else { panic("bget: no buffers" ); } find: lru_buf->dev = dev; lru_buf->blockno = blockno; lru_buf->valid = 0 ; lru_buf->refcnt = 1 ; release(&hashtable[ihash(blockno)].lock); acquiresleep(&lru_buf->lock); return lru_buf; } struct buf*loop_find (struct buf * lru_buf) { struct buf *b ; uint min_stamp = 1 << 20 ; for (;;) { for (b = bcache.buf; b < bcache.buf + NBUF; b++) { if (b->refcnt == 0 && b->timestamp < min_stamp) { lru_buf = b; min_stamp = b->timestamp; } } if (lru_buf) { int rbuf_index = ihash(lru_buf->blockno); acquire(&hashtable[rbuf_index].lock); if (lru_buf->refcnt != 0 ) { release(&hashtable[rbuf_index].lock); } else { struct buf *pre = &hashtable[rbuf_index].head; struct buf *p = hashtable[rbuf_index].head.next; while (p != lru_buf) { pre = pre->next; p = p->next; } pre->next = p->next; release(&hashtable[rbuf_index].lock); break ; } } } return lru_buf; }