Skip to content

Commit

Permalink
Merge branch 'next'
Browse files Browse the repository at this point in the history
  • Loading branch information
Robbert van Renesse committed Nov 16, 2023
2 parents 8253a0e + c3d57a3 commit c478874
Show file tree
Hide file tree
Showing 14 changed files with 334 additions and 201 deletions.
10 changes: 6 additions & 4 deletions code/disk.hny
Original file line number Diff line number Diff line change
@@ -1,13 +1,15 @@
from alloc import malloc

def disk_init(n_blocks) returns disk:
const BITS_PER_BLOCK = 4

def new(n_blocks) returns disk:
disk = malloc([ None, ] * n_blocks)

def disk_getsize(disk) returns size:
def getsize(disk) returns size:
size = len !disk

def disk_read(disk, bno) returns block:
def read(disk, bno) returns block:
block = (!disk)[bno]

def disk_write(disk, bno, block):
def write(disk, bno, block):
(!disk)[bno] = block
173 changes: 80 additions & 93 deletions code/fs.hny
Original file line number Diff line number Diff line change
Expand Up @@ -2,84 +2,60 @@ from synch import * # shared queue for file server and lock for supe
from alloc import * # malloc/free
from RW import * # read/write locks for inode blocks
from list import subseq # list slicing
from disk import * # disk service
import disk # disk interface
import wal # write-ahead-log

const N_BLOCKS = 10
const N_BLOCKS = 10 # total number of disk blocks
const INODES_PER_BLOCK = 2 # number of inodes that fit in a block
const INDIR_PER_BLOCK = 4 # number of block pointers per block
const INDIR_PER_BLOCK = 4 # number of block pointers per block

# The file system consists of a superblock, an array of inode blocks, and
# the remaining blocks. The remaining blocks are dynamic and can be of
# The file system consists of a superblock, an array of bitmap blocks
# (to track the free blocks), an array of inode blocks, and the
# remaining blocks. The remaining blocks are dynamic and can be of
# the following types:
# - free: not in use
# - free: not in use (marked False in the bitmap blocks)
# - data: a data block
# - indir: an indirect block, with pointers to other blocks
# An inode has a pointer to a direct block and a pointer to an indirect block,
# so the maximum file size is 1 + INDIR_PER_BLOCK.

# Put block bno on the free list. The free list is a linked list of
# indirect blocks. The first entry in a free list block points to the
# next free list block. The other entries point to free blocks. The
# superblock points to the first free list block.
def fs_release(fs_state, bno):
acquire(?fs_state->super_lock)
var super = disk_read(fs_state->disk, 0)
if super.free == None:
disk_write(fs_state->disk, bno, [ None, ])
super.free = bno
disk_write(fs_state->disk, 0, super)
else:
let fb = disk_read(fs_state->disk, super.free):
if len(fb) == INDIR_PER_BLOCK:
# The first free list block is full
disk_write(fs_state->disk, bno, [ super.free, ])
super.free = bno
disk_write(fs_state->disk, 0, super)
else:
disk_write(fs_state->disk, super.free, fb + [ bno, ])
release(?fs_state->super_lock)

# Allocate a disk block
# Allocate a disk block. Currently uses first available strategy,
# which is not very efficient but easy. Note, this does not update the
# bitmap on disk, which is done through the WAL.
def fs_alloc(fs_state) returns bno:
acquire(?fs_state->super_lock)
var super = disk_read(fs_state->disk, 0)
if super.free == None:
bno = None
else:
let fb = disk_read(fs_state->disk, super.free):
if len(fb) == 1:
bno = super.free
super.free = fb[0]
disk_write(fs_state->disk, 0, super)
else:
bno = fb[len(fb) - 1]
disk_write(fs_state->disk, super.free, subseq(fb, 0, len(fb) - 1))
release(?fs_state->super_lock)

# Initialize the file system by writing the superblock, the i-node blocks,
# and creating the free list.
def fs_init(fs_state, n_inode_blocks):
acquire(?fs_state->bitmap_lock)
bno = fs_state->n_bitmap_blocks + len(fs_state->ib_locks)
var found = False
while not found:
bno += 1
assert bno < N_BLOCKS
found = not fs_state->bitmap[bno]
fs_state->bitmap[bno] = True
release(?fs_state->bitmap_lock)

# Initialize the file system by writing the superblock, the bitmap blocks, and
# the i-node blocks,
def fs_init(d, n_bitmap_blocks, n_inode_blocks):
# Initialize the superblock
disk_write(fs_state->disk, 0, { .n_inode_blocks: n_inode_blocks, .free: None })
disk.write(d, 0,
{ .n_bitmap_block: n_bitmap_blocks, .n_inode_blocks: n_inode_blocks })

# Initialize the bitmap blocks
for bno in { 1 .. n_bitmap_blocks }:
disk.write(d, bno, [ False, ] * disk.BITS_PER_BLOCK)

# Initialize the i-node blocks
for i in { 1 .. n_inode_blocks }:
disk_write(fs_state->disk, i, [
disk.write(d, n_bitmap_blocks + i, [
{ .direct: None, .indir: None, .size: 0 }, ] * INODES_PER_BLOCK)

# Initialize the free list
let n_disk_blocks = disk_getsize(fs_state->disk):
for i in { n_inode_blocks + 1 .. n_disk_blocks - 1 }:
fs_release(fs_state, i)

# Handle a read-only request. A read lock on i-node block ib has been acquired.
def fs_query_request(fs_state, req, ib) returns result:
# Read the inode block and extract the inode
let inode_block = disk_read(fs_state->disk, 1 + ib)
let inode_block = wal.read(fs_state->disk, 1 + fs_state->n_bitmap_blocks + ib)
let inode = inode_block[req.ino % INODES_PER_BLOCK]:
if req.type == "getsize":
result = inode.size

else:
assert req.type == "read"

Expand All @@ -88,7 +64,7 @@ def fs_query_request(fs_state, req, ib) returns result:
if inode.direct == None:
result = None
else:
result = disk_read(fs_state->disk, inode.direct)
result = wal.read(fs_state->disk, inode.direct)

# Read indirectly. If there is no indirect block return None
elif inode.indir == None:
Expand All @@ -97,57 +73,65 @@ def fs_query_request(fs_state, req, ib) returns result:
# Read the indirect block and get the pointer to the data block,
# which may be None.
else:
let indir = disk_read(fs_state->disk, inode.indir):
let indir = wal.read(fs_state->disk, inode.indir):
if indir[req.offset - 1] == None:
result = None
else:
result = disk_read(fs_state->disk, indir[req.offset - 1])
result = wal.read(fs_state->disk, indir[req.offset - 1])

# Handle a write request. A write lock on i-node block ib has been acquired.
def fs_update_request(fs_state, req, ib):
assert req.type == "write"

var allocated = {} # set of allocated blocks (on disk bitmap not yet updated)
var write_set = {} # set of (block number, data) pairs to be written

# Read the inode block and extract the inode
var inode_block = disk_read(fs_state->disk, 1 + ib)
var inode_block = wal.read(fs_state->disk, 1 + fs_state->n_bitmap_blocks + ib)
var inode = inode_block[req.ino % INODES_PER_BLOCK]

# Write the direct block. Allocate one if needed, and if so update
# the inode. If not, just update the data block.
if req.offset == 0:
if inode.direct == None:
inode.direct = fs_alloc(fs_state)
allocated |= { inode.direct }
inode.size = max(inode.size, 1)
inode_block[req.ino % INODES_PER_BLOCK] = inode
disk_write(fs_state->disk, 1 + ib, inode_block)
disk_write(fs_state->disk, inode.direct, req.data)
write_set |= { (1 + fs_state->n_bitmap_blocks + ib, inode_block) }
write_set |= { (inode.direct, req.data) }

# Write a block indirectly
else:
# Allocate an indirect block first if there isn't one. Note that
# the inode block, indirect block, and data block must all be written
if inode.indir == None:
inode.indir = fs_alloc(fs_state)
allocated |= { inode.indir }
inode.size = max(inode.size, req.offset + 1)
inode_block[req.ino % INODES_PER_BLOCK] = inode
disk_write(fs_state->disk, 1 + ib, inode_block)
write_set |= { (1 + fs_state->n_bitmap_blocks + ib, inode_block) }
let bno = fs_alloc(fs_state)
let indir = [ bno if i == (req.offset - 1) else None
for i in { 0 .. INODES_PER_BLOCK - 1 } ]:
disk_write(fs_state->disk, bno, req.data)
disk_write(fs_state->disk, inode.indir, indir)
allocated |= { bno }
write_set |= { (bno, req.data), (inode.indir, indir) }

# Read the indirect block first. If needed allocate a data block,
# otherwise just overwrite the existing data block.
else:
var indir = disk_read(fs_state->disk, inode.indir)
var indir = wal.read(fs_state->disk, inode.indir)
if indir[req.offset - 1] == None:
indir[req.offset - 1] = fs_alloc(fs_state)
disk_write(fs_state->disk, inode.indir, indir)
disk_write(fs_state->disk, indir[req.offset - 1], req.data)
allocated |= { indir[req.offset - 1] }
write_set |= { (inode.indir, indir) }
write_set |= { (indir[req.offset - 1], req.data) }
if inode.size <= req.offset:
inode.size = req.offset + 1
inode_block[req.ino % INODES_PER_BLOCK] = inode
disk_write(fs_state->disk, 1 + ib, inode_block)
write_set |= { (1 + fs_state->n_bitmap_blocks + ib, inode_block) }

wal.update(fs_state->disk, allocated, write_set)

# A worker thread handles client requests
def fs_worker(fs_state):
Expand All @@ -165,31 +149,34 @@ def fs_worker(fs_state):
read_release(?fs_state->ib_locks[ib])
put(req.q, response)

# The file server. Initialize the file system and spawn worker threads
def file_server(req_q, n_inode_blocks, n_workers):
# The in-memory shared state of the file server consists of:
# disk: the shared disk
# req_q: the request queue
# super_lock: lock on the superblock (and free list in particular)
# ib_locks: read/write locks on inode blocks
let d = disk_init(N_BLOCKS)
let fs_state = malloc({
.disk: d, .req_q: req_q, .super_lock: Lock(),
.ib_locks: [ RWlock(), ] * n_inode_blocks }):

# Initialize the file system on disk
fs_init(fs_state, n_inode_blocks)

# Start worker threads to handle client requests
for i in { 1 .. n_workers }:
spawn eternal fs_worker(fs_state)

#### FILE INTERFACE ####

def file_init(n_files) returns fs:
fs = malloc(Queue())
spawn file_server(fs,
(n_files + (INODES_PER_BLOCK - 1)) / INODES_PER_BLOCK, 2)
def file_init(n_files) returns req_q:
req_q = malloc(Queue())
let n_inode_blocks = (n_files + (INODES_PER_BLOCK - 1)) / INODES_PER_BLOCK
let n_workers = 2
let d = disk.new(N_BLOCKS)
let n_bitmap_blocks = (N_BLOCKS + (disk.BITS_PER_BLOCK - 1)) / disk.BITS_PER_BLOCK:
# Initialize the file system on disk
fs_init(d, n_bitmap_blocks, n_inode_blocks)

# The in-memory shared state of the file server consists of:
# disk: the shared disk
# req_q: the request queue
# bitmap_lock: lock on the bitmap
# n_bitmap_blocks: number of bitmap blocks
# bitmap: tracks blocks in use
# ib_locks: read/write locks on inode blocks
let fs_state = malloc({
.disk: d, .req_q: req_q, .bitmap_lock: Lock(),
.n_bitmap_blocks: n_bitmap_blocks,
.bitmap: [ i <= (1 + n_bitmap_blocks + n_inode_blocks)
for i in { 0 .. N_BLOCKS - 1 } ],
.ib_locks: [ RWlock(), ] * n_inode_blocks }):

# Start worker threads to handle client requests
for i in { 1 .. n_workers }:
spawn eternal fs_worker(fs_state)

def file_getsize(req_q, ino) returns size:
let res_q = malloc(Queue()):
Expand All @@ -206,7 +193,7 @@ def file_read(req_q, ino, offset) returns data:
def file_write(req_q, ino, offset, data):
let res_q = malloc(Queue()):
put(req_q, { .type: "write", .ino: ino, .offset: offset,
.data: data, .q: res_q })
.data: data, .q: res_q })
let status = get(res_q):
assert status == "ok"
free(res_q)
26 changes: 11 additions & 15 deletions code/locktest.hny
Original file line number Diff line number Diff line change
@@ -1,21 +1,17 @@
import lock
from lock import Lock, acquire, release

thelock = lock.Lock()
const NTHREADS = 5

thelock = Lock()
count = 0
invariant 0 <= count <= 1

def thread():
lock.acquire(?thelock)

atomically count += 1

# critical section is here
assert count == 1

atomically count -= 1

lock.release(?thelock)

for i in {1..5}:
while choose { False, True }:
acquire(?thelock)
atomically count += 1
assert count == 1
atomically count -= 1
release(?thelock)

for i in {1..NTHREADS}:
spawn thread()
67 changes: 67 additions & 0 deletions code/paxos3.hny
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
import synch

const F = 1
const N = (2*F) + 1
const MAX_BALLOT = N

channels = [[],] * N

def send(p, m):
channels[p][len channels[p]] = m

def process(self, proposal):
var leader_ballot = self
var last_accepted = ()
var ballot = 0
var phase1 = True
var estimate = proposal
var max_accepted = ()

while leader_ballot < MAX_BALLOT:
for i in { 0 .. N-1 }:
if phase1:
atomically send(i, { .type: "p1a", .src: self, .ballot: leader_ballot })
else:
atomically send(i, { .type: "p2a", .src: self, .ballot: leader_ballot, .value: estimate })

var responses = {}
while len(responses) < (N - F):
atomically let msgs = channels[self] when msgs != []:
channels[self] = []
for msg in msgs:
if msg.type == "p1a":
if msg.ballot > ballot:
ballot = msg.ballot
send(msg.src, { .type: "p1b", .src: self, .ballot: ballot, .last: last_accepted })
elif msg.type == "p1b":
if phase1 and (msg.ballot >= ballot):
responses |= { msg.src }
if (max_accepted == ()) or (msg.last > max_accepted):
max_accepted = msg.last
elif msg.type == "p2a":
assert msg.ballot <= ballot, (msg.ballot, ballot)
if msg.ballot == ballot:
last_accepted = (ballot, msg.value)
send(msg.src, { .type: "p2b", .src: self, .ballot: ballot })
else:
assert msg.type == "p2b"
if msg.ballot >= ballot:
ballot = msg.ballot
if not phase1:
responses |= { msg.src }

# See if my ballot succeeded
if ballot == leader_ballot:
if phase1:
if max_accepted != ():
_, estimate = max_accepted
phase1 = False
else:
phase1 = True
leader_ballot += N
else:
phase1 = True
leader_ballot += N

for i in { 0 .. N-1 }:
spawn eternal process(i, choose { "red", "blue" })
Loading

0 comments on commit c478874

Please sign in to comment.