I\'m trying to create a thread-safe property wrapper. I could only think of GCD queues and semaphores as being the most Swifty and reliable way. Are semaphore\'s just more p
FWIW, another option is reader-writer pattern with concurrent queue, where reads are done synchronously, but are allowed to run concurrently with respect to other reads, but writes are done asynchronously, but with a barrier (i.e. not concurrently with respect to any other reads or writes):
@propertyWrapper
class Atomic<Value> {
private var value: Value
private let queue = DispatchQueue(label: "com.domain.app.atomic", attributes: .concurrent)
var wrappedValue: Value {
get { queue.sync { value } }
set { queue.async(flags: .barrier) { self.value = newValue } }
}
init(wrappedValue value: Value) {
self.value = value
}
}
Yet another is locks:
@propertyWrapper
struct Atomic<Value> {
private var value: Value
private var lock = NSLock()
var wrappedValue: Value {
get { lock.synchronized { value } }
set { lock.synchronized { value = newValue } }
}
init(wrappedValue value: Value) {
self.value = value
}
}
where
extension NSLocking {
func synchronized<T>(block: () throws -> T) rethrows -> T {
lock()
defer { unlock() }
return try block()
}
}
We should recognize that while these, and yours, offers atomicity, it’s not going to provide thread-safe interaction.
Consider this simple experiment, where we increment an integer a million times:
@Atomic var foo = 0
func threadSafetyExperiment() {
DispatchQueue.global().async {
DispatchQueue.concurrentPerform(iterations: 1_000_000) { _ in
self.foo += 1
}
print(self.foo)
}
}
You’d expect foo
to be equal to 1,000,000, but it won’t be. It’s because the whole interaction of “retrieve the value and increment it and save it” needs to be wrapped in a single synchronization mechanism.
So, you’re back to non-property wrapper sorts of solutions, e.g.
class Synchronized<Value> {
private var _value: Value
private let lock = NSLock()
init(_ value: Value) {
self._value = value
}
var value: Value {
get { lock.synchronized { _value } }
set { lock.synchronized { _value = newValue } }
}
func synchronized(block: (inout Value) -> Void) {
lock.synchronized {
block(&_value)
}
}
}
And then this works fine:
var foo = Synchronized<Int>(0)
func threadSafetyExperiment() {
DispatchQueue.global().async {
DispatchQueue.concurrentPerform(iterations: 1_000_000) { _ in
self.foo.synchronized { value in
value += 1
}
}
print(self.foo.value)
}
}
How can they be properly tested and measured to see the difference between the two implementations and if they even work?
A few thoughts:
I’d suggest doing far more than 1000 iterations. You want to do enough iterations that the results are measured in seconds, not milliseconds. Personally I used a million iterations.
The unit testing framework is ideal at both testing for correctness as well as measuring performance using the measure
method (which repeats the performance test 10 times for each unit test and the results will be captured by the unit test reports):
So, create a project with a unit test target (or add a unit test target to existing project if you want) and then create unit tests, and execute them with command+u.
If you edit the scheme for your target, you can choose to randomize the order of your tests, to make sure the order in which they execute doesn’t affect the performance:
I’d also make the test target use a release build to make sure you’re testing an optimized build.
This is an example of a variety of different synchronization using GCD serial queue, concurrent queue, locks, unfair locks, semaphores:
class SynchronizedSerial<Value> {
private var _value: Value
private let queue = DispatchQueue(label: "com.domain.app.atomic")
required init(_ value: Value) {
self._value = value
}
var value: Value {
get { queue.sync { _value } }
set { queue.async { self._value = newValue } }
}
func synchronized<T>(block: (inout Value) throws -> T) rethrows -> T {
try queue.sync {
try block(&_value)
}
}
func writer(block: @escaping (inout Value) -> Void) -> Void {
queue.async {
block(&self._value)
}
}
}
class SynchronizedReaderWriter<Value> {
private var _value: Value
private let queue = DispatchQueue(label: "com.domain.app.atomic", attributes: .concurrent)
required init(_ value: Value) {
self._value = value
}
var value: Value {
get { queue.sync { _value } }
set { queue.async(flags: .barrier) { self._value = newValue } }
}
func synchronized<T>(block: (inout Value) throws -> T) rethrows -> T {
try queue.sync(flags: .barrier) {
try block(&_value)
}
}
func reader<T>(block: (Value) throws -> T) rethrows -> T {
try queue.sync {
try block(_value)
}
}
func writer(block: @escaping (inout Value) -> Void) -> Void {
queue.async(flags: .barrier) {
block(&self._value)
}
}
}
struct SynchronizedLock<Value> {
private var _value: Value
private let lock = NSLock()
init(_ value: Value) {
self._value = value
}
var value: Value {
get { lock.synchronized { _value } }
set { lock.synchronized { _value = newValue } }
}
mutating func synchronized<T>(block: (inout Value) throws -> T) rethrows -> T {
try lock.synchronized {
try block(&_value)
}
}
}
/// Unfair lock synchronization
///
/// - Warning: The documentation warns us: “In general, higher level synchronization primitives such as those provided by the pthread or dispatch subsystems should be preferred.”</quote>
class SynchronizedUnfairLock<Value> {
private var _value: Value
private var lock = os_unfair_lock()
required init(_ value: Value) {
self._value = value
}
var value: Value {
get { synchronized { $0 } }
set { synchronized { $0 = newValue } }
}
func synchronized<T>(block: (inout Value) throws -> T) rethrows -> T {
os_unfair_lock_lock(&lock)
defer { os_unfair_lock_unlock(&lock) }
return try block(&_value)
}
}
struct SynchronizedSemaphore<Value> {
private var _value: Value
private let semaphore = DispatchSemaphore(value: 1)
init(_ value: Value) {
self._value = value
}
var value: Value {
get { semaphore.waitAndSignal { _value } }
set { semaphore.waitAndSignal { _value = newValue } }
}
mutating func synchronized<T>(block: (inout Value) throws -> T) rethrows -> T {
try semaphore.waitAndSignal {
try block(&_value)
}
}
}
extension NSLocking {
func synchronized<T>(block: () throws -> T) rethrows -> T {
lock()
defer { unlock() }
return try block()
}
}
extension DispatchSemaphore {
func waitAndSignal<T>(block: () throws -> T) rethrows -> T {
wait()
defer { signal() }
return try block()
}
}