blob: fa32c942e4bb98b4b69da6bc28213036a291002d [file] [log] [blame]
Paolo Bonzini79117472013-05-13 13:29:47 +02001/*
2 * urcu-mb.c
3 *
4 * Userspace RCU library with explicit memory barriers
5 *
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
8 * Copyright 2015 Red Hat, Inc.
9 *
10 * Ported to QEMU by Paolo Bonzini <pbonzini@redhat.com>
11 *
12 * This library is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public
14 * License as published by the Free Software Foundation; either
15 * version 2.1 of the License, or (at your option) any later version.
16 *
17 * This library is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
21 *
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with this library; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 *
26 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
27 */
28
Peter Maydellaafd7582016-01-29 17:49:55 +000029#include "qemu/osdep.h"
Paolo Bonzini79117472013-05-13 13:29:47 +020030#include "qemu/rcu.h"
31#include "qemu/atomic.h"
Paolo Bonzini26387f82013-05-13 17:49:24 +020032#include "qemu/thread.h"
Paolo Bonzinia4649822015-02-11 17:15:18 +010033#include "qemu/main-loop.h"
Daniel Brodsky6e8a3552020-04-03 21:21:08 -070034#include "qemu/lockable.h"
Yang Zhong5a22ab72017-12-20 21:16:46 +080035#if defined(CONFIG_MALLOC_TRIM)
36#include <malloc.h>
37#endif
Paolo Bonzini79117472013-05-13 13:29:47 +020038
39/*
40 * Global grace period counter. Bit 0 is always one in rcu_gp_ctr.
41 * Bits 1 and above are defined in synchronize_rcu.
42 */
43#define RCU_GP_LOCKED (1UL << 0)
44#define RCU_GP_CTR (1UL << 1)
45
46unsigned long rcu_gp_ctr = RCU_GP_LOCKED;
47
48QemuEvent rcu_gp_event;
Greg Kurzef149762021-11-09 19:35:22 +010049static int in_drain_call_rcu;
Wen Congyangc097a602015-07-27 10:24:18 +080050static QemuMutex rcu_registry_lock;
51static QemuMutex rcu_sync_lock;
Paolo Bonzini79117472013-05-13 13:29:47 +020052
53/*
54 * Check whether a quiescent state was crossed between the beginning of
55 * update_counter_and_wait and now.
56 */
57static inline int rcu_gp_ongoing(unsigned long *ctr)
58{
59 unsigned long v;
60
Stefan Hajnoczid73415a2020-09-23 11:56:46 +010061 v = qatomic_read(ctr);
Paolo Bonzini79117472013-05-13 13:29:47 +020062 return v && (v != rcu_gp_ctr);
63}
64
65/* Written to only by each individual reader. Read by both the reader and the
66 * writers.
67 */
Stefan Hajnoczi17c78152022-02-22 14:01:49 +000068QEMU_DEFINE_CO_TLS(struct rcu_reader_data, rcu_reader)
Paolo Bonzini79117472013-05-13 13:29:47 +020069
Wen Congyangc097a602015-07-27 10:24:18 +080070/* Protected by rcu_registry_lock. */
Paolo Bonzini79117472013-05-13 13:29:47 +020071typedef QLIST_HEAD(, rcu_reader_data) ThreadList;
72static ThreadList registry = QLIST_HEAD_INITIALIZER(registry);
73
74/* Wait for previous parity/grace period to be empty of readers. */
75static void wait_for_readers(void)
76{
77 ThreadList qsreaders = QLIST_HEAD_INITIALIZER(qsreaders);
78 struct rcu_reader_data *index, *tmp;
79
80 for (;;) {
81 /* We want to be notified of changes made to rcu_gp_ongoing
82 * while we walk the list.
83 */
84 qemu_event_reset(&rcu_gp_event);
85
Paolo Bonzini79117472013-05-13 13:29:47 +020086 QLIST_FOREACH(index, &registry, node) {
Stefan Hajnoczid73415a2020-09-23 11:56:46 +010087 qatomic_set(&index->waiting, true);
Paolo Bonzini79117472013-05-13 13:29:47 +020088 }
89
Paolo Bonzini77a8b842018-02-16 09:23:31 +010090 /* Here, order the stores to index->waiting before the loads of
Paolo Bonzinic8d38772018-02-16 10:04:18 +010091 * index->ctr. Pairs with smp_mb_placeholder() in rcu_read_unlock(),
Paolo Bonzini77a8b842018-02-16 09:23:31 +010092 * ensuring that the loads of index->ctr are sequentially consistent.
Paolo Bonzini6e288b02023-03-03 13:46:03 +010093 *
94 * If this is the last iteration, this barrier also prevents
95 * frees from seeping upwards, and orders the two wait phases
96 * on architectures with 32-bit longs; see synchronize_rcu().
Paolo Bonzinie11131b2016-09-19 11:27:46 +020097 */
Paolo Bonzinic8d38772018-02-16 10:04:18 +010098 smp_mb_global();
Paolo Bonzini79117472013-05-13 13:29:47 +020099
100 QLIST_FOREACH_SAFE(index, &registry, node, tmp) {
101 if (!rcu_gp_ongoing(&index->ctr)) {
102 QLIST_REMOVE(index, node);
103 QLIST_INSERT_HEAD(&qsreaders, index, node);
104
Paolo Bonzini6e288b02023-03-03 13:46:03 +0100105 /* No need for memory barriers here, worst of all we
Paolo Bonzini79117472013-05-13 13:29:47 +0200106 * get some extra futex wakeups.
107 */
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100108 qatomic_set(&index->waiting, false);
Greg Kurzef149762021-11-09 19:35:22 +0100109 } else if (qatomic_read(&in_drain_call_rcu)) {
110 notifier_list_notify(&index->force_rcu, NULL);
Paolo Bonzini79117472013-05-13 13:29:47 +0200111 }
112 }
113
Paolo Bonzini79117472013-05-13 13:29:47 +0200114 if (QLIST_EMPTY(&registry)) {
115 break;
116 }
117
Wen Congyangc097a602015-07-27 10:24:18 +0800118 /* Wait for one thread to report a quiescent state and try again.
119 * Release rcu_registry_lock, so rcu_(un)register_thread() doesn't
120 * wait too much time.
121 *
122 * rcu_register_thread() may add nodes to &registry; it will not
123 * wake up synchronize_rcu, but that is okay because at least another
124 * thread must exit its RCU read-side critical section before
125 * synchronize_rcu is done. The next iteration of the loop will
126 * move the new thread's rcu_reader from &registry to &qsreaders,
127 * because rcu_gp_ongoing() will return false.
128 *
129 * rcu_unregister_thread() may remove nodes from &qsreaders instead
130 * of &registry if it runs during qemu_event_wait. That's okay;
131 * the node then will not be added back to &registry by QLIST_SWAP
132 * below. The invariant is that the node is part of one list when
133 * rcu_registry_lock is released.
Paolo Bonzini79117472013-05-13 13:29:47 +0200134 */
Wen Congyangc097a602015-07-27 10:24:18 +0800135 qemu_mutex_unlock(&rcu_registry_lock);
Paolo Bonzini79117472013-05-13 13:29:47 +0200136 qemu_event_wait(&rcu_gp_event);
Wen Congyangc097a602015-07-27 10:24:18 +0800137 qemu_mutex_lock(&rcu_registry_lock);
Paolo Bonzini79117472013-05-13 13:29:47 +0200138 }
139
140 /* put back the reader list in the registry */
141 QLIST_SWAP(&registry, &qsreaders, node);
142}
143
144void synchronize_rcu(void)
145{
Daniel Brodsky6e8a3552020-04-03 21:21:08 -0700146 QEMU_LOCK_GUARD(&rcu_sync_lock);
Paolo Bonzini79117472013-05-13 13:29:47 +0200147
Paolo Bonzini77a8b842018-02-16 09:23:31 +0100148 /* Write RCU-protected pointers before reading p_rcu_reader->ctr.
Paolo Bonzinic8d38772018-02-16 10:04:18 +0100149 * Pairs with smp_mb_placeholder() in rcu_read_lock().
Paolo Bonzini6e288b02023-03-03 13:46:03 +0100150 *
151 * Also orders write to RCU-protected pointers before
152 * write to rcu_gp_ctr.
Paolo Bonzini77a8b842018-02-16 09:23:31 +0100153 */
Paolo Bonzinic8d38772018-02-16 10:04:18 +0100154 smp_mb_global();
Paolo Bonzini77a8b842018-02-16 09:23:31 +0100155
Daniel Brodsky6e8a3552020-04-03 21:21:08 -0700156 QEMU_LOCK_GUARD(&rcu_registry_lock);
Paolo Bonzini79117472013-05-13 13:29:47 +0200157 if (!QLIST_EMPTY(&registry)) {
Paolo Bonzini79117472013-05-13 13:29:47 +0200158 if (sizeof(rcu_gp_ctr) < 8) {
159 /* For architectures with 32-bit longs, a two-subphases algorithm
160 * ensures we do not encounter overflow bugs.
161 *
162 * Switch parity: 0 -> 1, 1 -> 0.
163 */
Paolo Bonzini6e288b02023-03-03 13:46:03 +0100164 qatomic_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
Paolo Bonzini79117472013-05-13 13:29:47 +0200165 wait_for_readers();
Paolo Bonzini6e288b02023-03-03 13:46:03 +0100166 qatomic_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
Paolo Bonzini79117472013-05-13 13:29:47 +0200167 } else {
168 /* Increment current grace period. */
Paolo Bonzini6e288b02023-03-03 13:46:03 +0100169 qatomic_set(&rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR);
Paolo Bonzini79117472013-05-13 13:29:47 +0200170 }
171
172 wait_for_readers();
173 }
Paolo Bonzini79117472013-05-13 13:29:47 +0200174}
175
Paolo Bonzini26387f82013-05-13 17:49:24 +0200176
177#define RCU_CALL_MIN_SIZE 30
178
179/* Multi-producer, single-consumer queue based on urcu/static/wfqueue.h
180 * from liburcu. Note that head is only used by the consumer.
181 */
182static struct rcu_head dummy;
183static struct rcu_head *head = &dummy, **tail = &dummy.next;
184static int rcu_call_count;
185static QemuEvent rcu_call_ready_event;
186
187static void enqueue(struct rcu_head *node)
188{
189 struct rcu_head **old_tail;
190
191 node->next = NULL;
Paolo Bonzini8f593ba2023-03-03 11:14:15 +0100192
193 /*
194 * Make this node the tail of the list. The node will be
195 * used by further enqueue operations, but it will not
196 * be dequeued yet...
197 */
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100198 old_tail = qatomic_xchg(&tail, &node->next);
Paolo Bonzini8f593ba2023-03-03 11:14:15 +0100199
200 /*
201 * ... until it is pointed to from another item in the list.
202 * In the meantime, try_dequeue() will find a NULL next pointer
203 * and loop.
204 *
205 * Synchronizes with qatomic_load_acquire() in try_dequeue().
206 */
207 qatomic_store_release(old_tail, node);
Paolo Bonzini26387f82013-05-13 17:49:24 +0200208}
209
210static struct rcu_head *try_dequeue(void)
211{
212 struct rcu_head *node, *next;
213
214retry:
Paolo Bonzini8f593ba2023-03-03 11:14:15 +0100215 /* Head is only written by this thread, so no need for barriers. */
216 node = head;
217
218 /*
219 * If the head node has NULL in its next pointer, the value is
220 * wrong and we need to wait until its enqueuer finishes the update.
221 */
222 next = qatomic_load_acquire(&node->next);
223 if (!next) {
224 return NULL;
225 }
226
227 /*
228 * Test for an empty list, which we do not expect. Note that for
Paolo Bonzini26387f82013-05-13 17:49:24 +0200229 * the consumer head and tail are always consistent. The head
230 * is consistent because only the consumer reads/writes it.
231 * The tail, because it is the first step in the enqueuing.
232 * It is only the next pointers that might be inconsistent.
233 */
Paolo Bonzini8f593ba2023-03-03 11:14:15 +0100234 if (head == &dummy && qatomic_read(&tail) == &dummy.next) {
Paolo Bonzini26387f82013-05-13 17:49:24 +0200235 abort();
236 }
237
Paolo Bonzini8f593ba2023-03-03 11:14:15 +0100238 /*
239 * Since we are the sole consumer, and we excluded the empty case
Paolo Bonzini26387f82013-05-13 17:49:24 +0200240 * above, the queue will always have at least two nodes: the
241 * dummy node, and the one being removed. So we do not need to update
242 * the tail pointer.
243 */
244 head = next;
245
246 /* If we dequeued the dummy node, add it back at the end and retry. */
247 if (node == &dummy) {
248 enqueue(node);
249 goto retry;
250 }
251
252 return node;
253}
254
255static void *call_rcu_thread(void *opaque)
256{
257 struct rcu_head *node;
258
Paolo Bonziniab28bd22015-07-09 08:55:38 +0200259 rcu_register_thread();
260
Paolo Bonzini26387f82013-05-13 17:49:24 +0200261 for (;;) {
262 int tries = 0;
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100263 int n = qatomic_read(&rcu_call_count);
Paolo Bonzini26387f82013-05-13 17:49:24 +0200264
265 /* Heuristically wait for a decent number of callbacks to pile up.
266 * Fetch rcu_call_count now, we only must process elements that were
267 * added before synchronize_rcu() starts.
268 */
Paolo Bonzinia7d1d632015-02-11 15:51:54 +0100269 while (n == 0 || (n < RCU_CALL_MIN_SIZE && ++tries <= 5)) {
270 g_usleep(10000);
271 if (n == 0) {
272 qemu_event_reset(&rcu_call_ready_event);
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100273 n = qatomic_read(&rcu_call_count);
Paolo Bonzinia7d1d632015-02-11 15:51:54 +0100274 if (n == 0) {
Yang Zhong5a22ab72017-12-20 21:16:46 +0800275#if defined(CONFIG_MALLOC_TRIM)
276 malloc_trim(4 * 1024 * 1024);
277#endif
Paolo Bonzinia7d1d632015-02-11 15:51:54 +0100278 qemu_event_wait(&rcu_call_ready_event);
279 }
Paolo Bonzini26387f82013-05-13 17:49:24 +0200280 }
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100281 n = qatomic_read(&rcu_call_count);
Paolo Bonzini26387f82013-05-13 17:49:24 +0200282 }
283
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100284 qatomic_sub(&rcu_call_count, n);
Paolo Bonzini26387f82013-05-13 17:49:24 +0200285 synchronize_rcu();
Stefan Hajnoczi195801d2024-01-02 10:35:25 -0500286 bql_lock();
Paolo Bonzini26387f82013-05-13 17:49:24 +0200287 while (n > 0) {
288 node = try_dequeue();
289 while (!node) {
Stefan Hajnoczi195801d2024-01-02 10:35:25 -0500290 bql_unlock();
Paolo Bonzini26387f82013-05-13 17:49:24 +0200291 qemu_event_reset(&rcu_call_ready_event);
292 node = try_dequeue();
293 if (!node) {
294 qemu_event_wait(&rcu_call_ready_event);
295 node = try_dequeue();
296 }
Stefan Hajnoczi195801d2024-01-02 10:35:25 -0500297 bql_lock();
Paolo Bonzini26387f82013-05-13 17:49:24 +0200298 }
299
300 n--;
301 node->func(node);
302 }
Stefan Hajnoczi195801d2024-01-02 10:35:25 -0500303 bql_unlock();
Paolo Bonzini26387f82013-05-13 17:49:24 +0200304 }
305 abort();
306}
307
308void call_rcu1(struct rcu_head *node, void (*func)(struct rcu_head *node))
309{
310 node->func = func;
311 enqueue(node);
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100312 qatomic_inc(&rcu_call_count);
Paolo Bonzini26387f82013-05-13 17:49:24 +0200313 qemu_event_set(&rcu_call_ready_event);
314}
315
Maxim Levitskyd8166142020-09-15 20:12:53 +0800316
317struct rcu_drain {
318 struct rcu_head rcu;
319 QemuEvent drain_complete_event;
320};
321
322static void drain_rcu_callback(struct rcu_head *node)
323{
324 struct rcu_drain *event = (struct rcu_drain *)node;
325 qemu_event_set(&event->drain_complete_event);
326}
327
328/*
329 * This function ensures that all pending RCU callbacks
330 * on the current thread are done executing
331
332 * drops big qemu lock during the wait to allow RCU thread
333 * to process the callbacks
334 *
335 */
336
337void drain_call_rcu(void)
338{
339 struct rcu_drain rcu_drain;
Stefan Hajnoczi195801d2024-01-02 10:35:25 -0500340 bool locked = bql_locked();
Maxim Levitskyd8166142020-09-15 20:12:53 +0800341
342 memset(&rcu_drain, 0, sizeof(struct rcu_drain));
343 qemu_event_init(&rcu_drain.drain_complete_event, false);
344
345 if (locked) {
Stefan Hajnoczi195801d2024-01-02 10:35:25 -0500346 bql_unlock();
Maxim Levitskyd8166142020-09-15 20:12:53 +0800347 }
348
349
350 /*
351 * RCU callbacks are invoked in the same order as in which they
352 * are registered, thus we can be sure that when 'drain_rcu_callback'
353 * is called, all RCU callbacks that were registered on this thread
354 * prior to calling this function are completed.
355 *
356 * Note that since we have only one global queue of the RCU callbacks,
357 * we also end up waiting for most of RCU callbacks that were registered
Michael Tokarevd02d06f2023-08-23 09:53:15 +0300358 * on the other threads, but this is a side effect that shouldn't be
Maxim Levitskyd8166142020-09-15 20:12:53 +0800359 * assumed.
360 */
361
Greg Kurzef149762021-11-09 19:35:22 +0100362 qatomic_inc(&in_drain_call_rcu);
Maxim Levitskyd8166142020-09-15 20:12:53 +0800363 call_rcu1(&rcu_drain.rcu, drain_rcu_callback);
364 qemu_event_wait(&rcu_drain.drain_complete_event);
Greg Kurzef149762021-11-09 19:35:22 +0100365 qatomic_dec(&in_drain_call_rcu);
Maxim Levitskyd8166142020-09-15 20:12:53 +0800366
367 if (locked) {
Stefan Hajnoczi195801d2024-01-02 10:35:25 -0500368 bql_lock();
Maxim Levitskyd8166142020-09-15 20:12:53 +0800369 }
370
371}
372
Paolo Bonzini79117472013-05-13 13:29:47 +0200373void rcu_register_thread(void)
374{
Stefan Hajnoczi17c78152022-02-22 14:01:49 +0000375 assert(get_ptr_rcu_reader()->ctr == 0);
Wen Congyangc097a602015-07-27 10:24:18 +0800376 qemu_mutex_lock(&rcu_registry_lock);
Stefan Hajnoczi17c78152022-02-22 14:01:49 +0000377 QLIST_INSERT_HEAD(&registry, get_ptr_rcu_reader(), node);
Wen Congyangc097a602015-07-27 10:24:18 +0800378 qemu_mutex_unlock(&rcu_registry_lock);
Paolo Bonzini79117472013-05-13 13:29:47 +0200379}
380
381void rcu_unregister_thread(void)
382{
Wen Congyangc097a602015-07-27 10:24:18 +0800383 qemu_mutex_lock(&rcu_registry_lock);
Stefan Hajnoczi17c78152022-02-22 14:01:49 +0000384 QLIST_REMOVE(get_ptr_rcu_reader(), node);
Wen Congyangc097a602015-07-27 10:24:18 +0800385 qemu_mutex_unlock(&rcu_registry_lock);
Paolo Bonzini79117472013-05-13 13:29:47 +0200386}
387
Greg Kurzef149762021-11-09 19:35:22 +0100388void rcu_add_force_rcu_notifier(Notifier *n)
389{
390 qemu_mutex_lock(&rcu_registry_lock);
Stefan Hajnoczi17c78152022-02-22 14:01:49 +0000391 notifier_list_add(&get_ptr_rcu_reader()->force_rcu, n);
Greg Kurzef149762021-11-09 19:35:22 +0100392 qemu_mutex_unlock(&rcu_registry_lock);
393}
394
395void rcu_remove_force_rcu_notifier(Notifier *n)
396{
397 qemu_mutex_lock(&rcu_registry_lock);
398 notifier_remove(n);
399 qemu_mutex_unlock(&rcu_registry_lock);
400}
401
Paolo Bonzini21b7cf92015-03-05 16:53:48 +0100402static void rcu_init_complete(void)
Paolo Bonzini79117472013-05-13 13:29:47 +0200403{
Paolo Bonzini26387f82013-05-13 17:49:24 +0200404 QemuThread thread;
405
Wen Congyangc097a602015-07-27 10:24:18 +0800406 qemu_mutex_init(&rcu_registry_lock);
407 qemu_mutex_init(&rcu_sync_lock);
Paolo Bonzini79117472013-05-13 13:29:47 +0200408 qemu_event_init(&rcu_gp_event, true);
Paolo Bonzini26387f82013-05-13 17:49:24 +0200409
410 qemu_event_init(&rcu_call_ready_event, false);
Paolo Bonzini21b7cf92015-03-05 16:53:48 +0100411
Stefan Hajnoczia4a411f2024-01-02 10:35:28 -0500412 /* The caller is assumed to have BQL, so the call_rcu thread
Paolo Bonzini21b7cf92015-03-05 16:53:48 +0100413 * must have been quiescent even after forking, just recreate it.
414 */
Paolo Bonzini26387f82013-05-13 17:49:24 +0200415 qemu_thread_create(&thread, "call_rcu", call_rcu_thread,
416 NULL, QEMU_THREAD_DETACHED);
417
Paolo Bonzini79117472013-05-13 13:29:47 +0200418 rcu_register_thread();
419}
Paolo Bonzini21b7cf92015-03-05 16:53:48 +0100420
Paolo Bonzini73c6e402016-01-27 08:49:21 +0100421static int atfork_depth = 1;
422
423void rcu_enable_atfork(void)
424{
425 atfork_depth++;
426}
427
428void rcu_disable_atfork(void)
429{
430 atfork_depth--;
431}
432
Paolo Bonzini21b7cf92015-03-05 16:53:48 +0100433#ifdef CONFIG_POSIX
434static void rcu_init_lock(void)
435{
Paolo Bonzini73c6e402016-01-27 08:49:21 +0100436 if (atfork_depth < 1) {
437 return;
438 }
439
Wen Congyangc097a602015-07-27 10:24:18 +0800440 qemu_mutex_lock(&rcu_sync_lock);
441 qemu_mutex_lock(&rcu_registry_lock);
Paolo Bonzini21b7cf92015-03-05 16:53:48 +0100442}
443
444static void rcu_init_unlock(void)
445{
Paolo Bonzini73c6e402016-01-27 08:49:21 +0100446 if (atfork_depth < 1) {
447 return;
448 }
449
Wen Congyangc097a602015-07-27 10:24:18 +0800450 qemu_mutex_unlock(&rcu_registry_lock);
451 qemu_mutex_unlock(&rcu_sync_lock);
Paolo Bonzini21b7cf92015-03-05 16:53:48 +0100452}
453
Paolo Bonzini2a96a552016-03-25 14:00:51 +0100454static void rcu_init_child(void)
Paolo Bonzini21b7cf92015-03-05 16:53:48 +0100455{
Paolo Bonzini2a96a552016-03-25 14:00:51 +0100456 if (atfork_depth < 1) {
457 return;
458 }
459
Paolo Bonzini21b7cf92015-03-05 16:53:48 +0100460 memset(&registry, 0, sizeof(registry));
461 rcu_init_complete();
462}
Paolo Bonzini2a96a552016-03-25 14:00:51 +0100463#endif
Paolo Bonzini21b7cf92015-03-05 16:53:48 +0100464
465static void __attribute__((__constructor__)) rcu_init(void)
466{
Paolo Bonzinic8d38772018-02-16 10:04:18 +0100467 smp_mb_global_init();
Paolo Bonzini21b7cf92015-03-05 16:53:48 +0100468#ifdef CONFIG_POSIX
Paolo Bonzini2a96a552016-03-25 14:00:51 +0100469 pthread_atfork(rcu_init_lock, rcu_init_unlock, rcu_init_child);
Paolo Bonzini21b7cf92015-03-05 16:53:48 +0100470#endif
471 rcu_init_complete();
472}