2018-08-02 04:26:37 +08:00
|
|
|
const std = @import("index.zig");
|
|
|
|
const builtin = @import("builtin");
|
|
|
|
const AtomicOrder = builtin.AtomicOrder;
|
|
|
|
const AtomicRmwOp = builtin.AtomicRmwOp;
|
|
|
|
const assert = std.debug.assert;
|
2018-10-04 01:19:10 +08:00
|
|
|
const SpinLock = std.SpinLock;
|
|
|
|
const linux = std.os.linux;
|
2018-08-02 04:26:37 +08:00
|
|
|
|
2018-10-04 01:19:10 +08:00
|
|
|
/// Lock may be held only once. If the same thread
|
|
|
|
/// tries to acquire the same mutex twice, it deadlocks.
|
2018-08-02 04:26:37 +08:00
|
|
|
pub const Mutex = struct {
|
2018-10-04 01:19:10 +08:00
|
|
|
/// 0: unlocked
|
|
|
|
/// 1: locked, no waiters
|
|
|
|
/// 2: locked, one or more waiters
|
|
|
|
linux_lock: @typeOf(linux_lock_init),
|
|
|
|
|
|
|
|
/// TODO better implementation than spin lock
|
|
|
|
spin_lock: @typeOf(spin_lock_init),
|
|
|
|
|
|
|
|
const linux_lock_init = if (builtin.os == builtin.Os.linux) i32(0) else {};
|
|
|
|
const spin_lock_init = if (builtin.os != builtin.Os.linux) SpinLock.init() else {};
|
2018-08-02 04:26:37 +08:00
|
|
|
|
|
|
|
pub const Held = struct {
|
|
|
|
mutex: *Mutex,
|
|
|
|
|
|
|
|
pub fn release(self: Held) void {
|
2018-10-04 01:19:10 +08:00
|
|
|
if (builtin.os == builtin.Os.linux) {
|
|
|
|
// Always unlock. If the previous state was Locked-No-Waiters, then we're done.
|
|
|
|
// Otherwise, wake a waiter up.
|
|
|
|
const prev = @atomicRmw(i32, &self.mutex.linux_lock, AtomicRmwOp.Xchg, 0, AtomicOrder.Release);
|
|
|
|
if (prev != 1) {
|
|
|
|
assert(prev == 2);
|
|
|
|
const rc = linux.futex_wake(&self.mutex.linux_lock, linux.FUTEX_WAKE, 1);
|
|
|
|
switch (linux.getErrno(rc)) {
|
|
|
|
0 => {},
|
|
|
|
linux.EINVAL => unreachable,
|
|
|
|
else => unreachable,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
SpinLock.Held.release(SpinLock.Held{ .spinlock = &self.mutex.spin_lock });
|
|
|
|
}
|
2018-08-02 04:26:37 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
pub fn init() Mutex {
|
2018-10-04 01:19:10 +08:00
|
|
|
return Mutex{
|
|
|
|
.linux_lock = linux_lock_init,
|
|
|
|
.spin_lock = spin_lock_init,
|
|
|
|
};
|
2018-08-02 04:26:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn acquire(self: *Mutex) Held {
|
2018-10-04 01:19:10 +08:00
|
|
|
if (builtin.os == builtin.Os.linux) {
|
|
|
|
// First try to go from Unlocked to Locked-No-Waiters. If this succeeds, no syscalls are needed.
|
|
|
|
// Otherwise, we need to be in the Locked-With-Waiters state. If we are already in that state,
|
|
|
|
// proceed to futex_wait. Otherwise, try to go from Locked-No-Waiters to Locked-With-Waiters.
|
|
|
|
// If that succeeds, proceed to futex_wait. Otherwise start the whole loop over again.
|
|
|
|
while (@cmpxchgWeak(i32, &self.linux_lock, 0, 1, AtomicOrder.Acquire, AtomicOrder.Monotonic)) |l| {
|
|
|
|
if (l == 2 or
|
|
|
|
@cmpxchgWeak(i32, &self.linux_lock, 1, 2, AtomicOrder.Acquire, AtomicOrder.Monotonic) == null)
|
|
|
|
{
|
|
|
|
const rc = linux.futex_wait(&self.linux_lock, linux.FUTEX_WAIT, 2, null);
|
|
|
|
switch (linux.getErrno(rc)) {
|
|
|
|
0, linux.EINTR, linux.EAGAIN => continue,
|
|
|
|
linux.EINVAL => unreachable,
|
|
|
|
else => unreachable,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
_ = self.spin_lock.acquire();
|
|
|
|
}
|
2018-08-02 04:26:37 +08:00
|
|
|
return Held{ .mutex = self };
|
|
|
|
}
|
|
|
|
};
|