Stefan Hajnoczi | 2da61b6 | 2014-03-03 11:30:03 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Recursive FIFO lock |
| 3 | * |
| 4 | * Copyright Red Hat, Inc. 2013 |
| 5 | * |
| 6 | * Authors: |
| 7 | * Stefan Hajnoczi <stefanha@redhat.com> |
| 8 | * |
| 9 | * This work is licensed under the terms of the GNU LGPL, version 2 or later. |
| 10 | * See the COPYING.LIB file in the top-level directory. |
| 11 | * |
| 12 | */ |
| 13 | |
| 14 | #include <assert.h> |
| 15 | #include "qemu/rfifolock.h" |
| 16 | |
| 17 | void rfifolock_init(RFifoLock *r, void (*cb)(void *), void *opaque) |
| 18 | { |
| 19 | qemu_mutex_init(&r->lock); |
| 20 | r->head = 0; |
| 21 | r->tail = 0; |
| 22 | qemu_cond_init(&r->cond); |
| 23 | r->nesting = 0; |
| 24 | r->cb = cb; |
| 25 | r->cb_opaque = opaque; |
| 26 | } |
| 27 | |
| 28 | void rfifolock_destroy(RFifoLock *r) |
| 29 | { |
| 30 | qemu_cond_destroy(&r->cond); |
| 31 | qemu_mutex_destroy(&r->lock); |
| 32 | } |
| 33 | |
| 34 | /* |
| 35 | * Theory of operation: |
| 36 | * |
| 37 | * In order to ensure FIFO ordering, implement a ticketlock. Threads acquiring |
| 38 | * the lock enqueue themselves by incrementing the tail index. When the lock |
| 39 | * is unlocked, the head is incremented and waiting threads are notified. |
| 40 | * |
| 41 | * Recursive locking does not take a ticket since the head is only incremented |
| 42 | * when the outermost recursive caller unlocks. |
| 43 | */ |
| 44 | void rfifolock_lock(RFifoLock *r) |
| 45 | { |
| 46 | qemu_mutex_lock(&r->lock); |
| 47 | |
| 48 | /* Take a ticket */ |
| 49 | unsigned int ticket = r->tail++; |
| 50 | |
| 51 | if (r->nesting > 0 && qemu_thread_is_self(&r->owner_thread)) { |
| 52 | r->tail--; /* put ticket back, we're nesting */ |
| 53 | } else { |
| 54 | while (ticket != r->head) { |
| 55 | /* Invoke optional contention callback */ |
| 56 | if (r->cb) { |
| 57 | r->cb(r->cb_opaque); |
| 58 | } |
| 59 | qemu_cond_wait(&r->cond, &r->lock); |
| 60 | } |
| 61 | } |
| 62 | |
| 63 | qemu_thread_get_self(&r->owner_thread); |
| 64 | r->nesting++; |
| 65 | qemu_mutex_unlock(&r->lock); |
| 66 | } |
| 67 | |
| 68 | void rfifolock_unlock(RFifoLock *r) |
| 69 | { |
| 70 | qemu_mutex_lock(&r->lock); |
| 71 | assert(r->nesting > 0); |
| 72 | assert(qemu_thread_is_self(&r->owner_thread)); |
| 73 | if (--r->nesting == 0) { |
| 74 | r->head++; |
| 75 | qemu_cond_broadcast(&r->cond); |
| 76 | } |
| 77 | qemu_mutex_unlock(&r->lock); |
| 78 | } |