blob: 70a78188b7876cc0f3db0225a3db320ecfabeaef [file] [log] [blame]
Paolo Bonzini2f0c9fe2012-09-28 17:22:47 +02001/*
2 * QEMU System Emulator block driver
3 *
4 * Copyright (c) 2011 IBM Corp.
5 * Copyright (c) 2012 Red Hat, Inc.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
Peter Maydelld38ea872016-01-29 17:50:05 +000026#include "qemu/osdep.h"
Paolo Bonzini2f0c9fe2012-09-28 17:22:47 +020027#include "qemu-common.h"
Paolo Bonzini737e1502012-12-17 18:19:44 +010028#include "block/block.h"
John Snowc87621e2016-10-27 12:07:00 -040029#include "block/blockjob_int.h"
Paolo Bonzini737e1502012-12-17 18:19:44 +010030#include "block/block_int.h"
Max Reitz373340b2015-10-19 17:53:22 +020031#include "sysemu/block-backend.h"
Markus Armbrustercc7a8ea2015-03-17 17:22:46 +010032#include "qapi/qmp/qerror.h"
Paolo Bonzini7b1b5d12012-12-17 18:19:43 +010033#include "qapi/qmp/qjson.h"
Daniel P. Berrange10817bf2015-09-01 14:48:02 +010034#include "qemu/coroutine.h"
Alberto Garcia7f0317c2016-07-05 17:28:56 +030035#include "qemu/id.h"
Paolo Bonzini2f0c9fe2012-09-28 17:22:47 +020036#include "qmp-commands.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010037#include "qemu/timer.h"
Wenchao Xia5a2d2cb2014-06-18 08:43:45 +020038#include "qapi-event.h"
Paolo Bonzini2f0c9fe2012-09-28 17:22:47 +020039
John Snow8254b6d2016-10-27 12:06:58 -040040static void block_job_event_cancelled(BlockJob *job);
41static void block_job_event_completed(BlockJob *job, const char *msg);
42
Fam Zhengc55a8322015-11-05 18:13:15 -050043/* Transactional group of block jobs */
44struct BlockJobTxn {
45
46 /* Is this txn being cancelled? */
47 bool aborting;
48
49 /* List of jobs */
50 QLIST_HEAD(, BlockJob) jobs;
51
52 /* Reference count */
53 int refcnt;
54};
55
Alberto Garciaa7112792016-04-04 16:43:51 +030056static QLIST_HEAD(, BlockJob) block_jobs = QLIST_HEAD_INITIALIZER(block_jobs);
57
Paolo Bonzini88691b32017-05-08 16:13:04 +020058/*
59 * The block job API is composed of two categories of functions.
60 *
61 * The first includes functions used by the monitor. The monitor is
62 * peculiar in that it accesses the block job list with block_job_get, and
63 * therefore needs consistency across block_job_get and the actual operation
64 * (e.g. block_job_set_speed). The consistency is achieved with
65 * aio_context_acquire/release. These functions are declared in blockjob.h.
66 *
67 * The second includes functions used by the block job drivers and sometimes
68 * by the core block layer. These do not care about locking, because the
69 * whole coroutine runs under the AioContext lock, and are declared in
70 * blockjob_int.h.
71 */
72
Alberto Garciaa7112792016-04-04 16:43:51 +030073BlockJob *block_job_next(BlockJob *job)
74{
75 if (!job) {
76 return QLIST_FIRST(&block_jobs);
77 }
78 return QLIST_NEXT(job, job_list);
79}
80
Alberto Garciaffb1f102016-07-05 17:28:54 +030081BlockJob *block_job_get(const char *id)
82{
83 BlockJob *job;
84
85 QLIST_FOREACH(job, &block_jobs, job_list) {
John Snow559b9352016-10-27 12:06:55 -040086 if (job->id && !strcmp(id, job->id)) {
Alberto Garciaffb1f102016-07-05 17:28:54 +030087 return job;
88 }
89 }
90
91 return NULL;
92}
93
Paolo Bonzinic8ab5c22017-05-08 16:13:07 +020094BlockJobTxn *block_job_txn_new(void)
95{
96 BlockJobTxn *txn = g_new0(BlockJobTxn, 1);
97 QLIST_INIT(&txn->jobs);
98 txn->refcnt = 1;
99 return txn;
100}
101
102static void block_job_txn_ref(BlockJobTxn *txn)
103{
104 txn->refcnt++;
105}
106
107void block_job_txn_unref(BlockJobTxn *txn)
108{
109 if (txn && --txn->refcnt == 0) {
110 g_free(txn);
111 }
112}
113
114void block_job_txn_add_job(BlockJobTxn *txn, BlockJob *job)
115{
116 if (!txn) {
117 return;
118 }
119
120 assert(!job->txn);
121 job->txn = txn;
122
123 QLIST_INSERT_HEAD(&txn->jobs, job, txn_list);
124 block_job_txn_ref(txn);
125}
126
Paolo Bonzinif321dcb2017-05-08 16:13:03 +0200127static void block_job_pause(BlockJob *job)
128{
129 job->pause_count++;
130}
131
132static void block_job_resume(BlockJob *job)
133{
134 assert(job->pause_count > 0);
135 job->pause_count--;
136 if (job->pause_count) {
137 return;
138 }
139 block_job_enter(job);
140}
141
sochin.jiang4172a002017-06-15 14:47:33 +0800142void block_job_ref(BlockJob *job)
Paolo Bonzini05b0d8e2017-05-08 16:13:02 +0200143{
144 ++job->refcnt;
145}
146
147static void block_job_attached_aio_context(AioContext *new_context,
148 void *opaque);
149static void block_job_detach_aio_context(void *opaque);
150
sochin.jiang4172a002017-06-15 14:47:33 +0800151void block_job_unref(BlockJob *job)
Paolo Bonzini05b0d8e2017-05-08 16:13:02 +0200152{
153 if (--job->refcnt == 0) {
154 BlockDriverState *bs = blk_bs(job->blk);
155 bs->job = NULL;
156 block_job_remove_all_bdrv(job);
157 blk_remove_aio_context_notifier(job->blk,
158 block_job_attached_aio_context,
159 block_job_detach_aio_context, job);
160 blk_unref(job->blk);
161 error_free(job->blocker);
162 g_free(job->id);
163 QLIST_REMOVE(job, job_list);
164 g_free(job);
165 }
166}
167
Stefan Hajnoczi463e0be2016-06-16 17:56:27 +0100168static void block_job_attached_aio_context(AioContext *new_context,
169 void *opaque)
170{
171 BlockJob *job = opaque;
172
173 if (job->driver->attached_aio_context) {
174 job->driver->attached_aio_context(job, new_context);
175 }
176
177 block_job_resume(job);
178}
179
Paolo Bonzinibae81962016-10-27 12:48:50 +0200180static void block_job_drain(BlockJob *job)
181{
182 /* If job is !job->busy this kicks it into the next pause point. */
183 block_job_enter(job);
184
185 blk_drain(job->blk);
186 if (job->driver->drain) {
187 job->driver->drain(job);
188 }
189}
190
Stefan Hajnoczi463e0be2016-06-16 17:56:27 +0100191static void block_job_detach_aio_context(void *opaque)
192{
193 BlockJob *job = opaque;
194
195 /* In case the job terminates during aio_poll()... */
196 block_job_ref(job);
197
198 block_job_pause(job);
199
Stefan Hajnoczi463e0be2016-06-16 17:56:27 +0100200 while (!job->paused && !job->completed) {
Paolo Bonzinibae81962016-10-27 12:48:50 +0200201 block_job_drain(job);
Stefan Hajnoczi463e0be2016-06-16 17:56:27 +0100202 }
203
204 block_job_unref(job);
205}
206
Paolo Bonzinif321dcb2017-05-08 16:13:03 +0200207static char *child_job_get_parent_desc(BdrvChild *c)
208{
209 BlockJob *job = c->opaque;
210 return g_strdup_printf("%s job '%s'",
211 BlockJobType_lookup[job->driver->job_type],
212 job->id);
213}
214
215static const BdrvChildRole child_job = {
216 .get_parent_desc = child_job_get_parent_desc,
217 .stay_at_node = true,
218};
219
220static void block_job_drained_begin(void *opaque)
221{
222 BlockJob *job = opaque;
223 block_job_pause(job);
224}
225
226static void block_job_drained_end(void *opaque)
227{
228 BlockJob *job = opaque;
229 block_job_resume(job);
230}
231
232static const BlockDevOps block_job_dev_ops = {
233 .drained_begin = block_job_drained_begin,
234 .drained_end = block_job_drained_end,
235};
236
Kevin Wolfbbc02b92017-02-28 12:45:58 +0100237void block_job_remove_all_bdrv(BlockJob *job)
238{
239 GSList *l;
240 for (l = job->nodes; l; l = l->next) {
241 BdrvChild *c = l->data;
242 bdrv_op_unblock_all(c->bs, job->blocker);
243 bdrv_root_unref_child(c);
244 }
245 g_slist_free(job->nodes);
246 job->nodes = NULL;
247}
248
Kevin Wolf76d554e2017-01-17 11:56:42 +0100249int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
250 uint64_t perm, uint64_t shared_perm, Error **errp)
Alberto Garcia23d402d2016-10-28 10:08:04 +0300251{
Kevin Wolf76d554e2017-01-17 11:56:42 +0100252 BdrvChild *c;
253
254 c = bdrv_root_attach_child(bs, name, &child_job, perm, shared_perm,
255 job, errp);
256 if (c == NULL) {
257 return -EPERM;
258 }
259
260 job->nodes = g_slist_prepend(job->nodes, c);
Alberto Garcia23d402d2016-10-28 10:08:04 +0300261 bdrv_ref(bs);
262 bdrv_op_block_all(bs, job->blocker);
Kevin Wolf76d554e2017-01-17 11:56:42 +0100263
264 return 0;
Alberto Garcia23d402d2016-10-28 10:08:04 +0300265}
266
John Snow559b9352016-10-27 12:06:55 -0400267bool block_job_is_internal(BlockJob *job)
268{
269 return (job->id == NULL);
270}
271
John Snow5ccac6f2016-11-08 01:50:37 -0500272static bool block_job_started(BlockJob *job)
273{
274 return job->co;
275}
276
John Snowe3796a22017-03-16 17:23:49 -0400277/**
278 * All jobs must allow a pause point before entering their job proper. This
279 * ensures that jobs can be paused prior to being started, then resumed later.
280 */
281static void coroutine_fn block_job_co_entry(void *opaque)
282{
283 BlockJob *job = opaque;
284
285 assert(job && job->driver && job->driver->start);
286 block_job_pause_point(job);
287 job->driver->start(job);
288}
289
John Snow5ccac6f2016-11-08 01:50:37 -0500290void block_job_start(BlockJob *job)
291{
292 assert(job && !block_job_started(job) && job->paused &&
John Snowe3796a22017-03-16 17:23:49 -0400293 job->driver && job->driver->start);
294 job->co = qemu_coroutine_create(block_job_co_entry, job);
295 job->pause_count--;
296 job->busy = true;
297 job->paused = false;
Fam Zhengaef42782017-04-10 20:12:05 +0800298 bdrv_coroutine_enter(blk_bs(job->blk), job->co);
John Snow5ccac6f2016-11-08 01:50:37 -0500299}
300
Fam Zhengc55a8322015-11-05 18:13:15 -0500301static void block_job_completed_single(BlockJob *job)
302{
Paolo Bonzini4fb588e2017-05-08 16:13:09 +0200303 assert(job->completed);
304
Fam Zhengc55a8322015-11-05 18:13:15 -0500305 if (!job->ret) {
306 if (job->driver->commit) {
307 job->driver->commit(job);
308 }
309 } else {
310 if (job->driver->abort) {
311 job->driver->abort(job);
312 }
313 }
John Snowe8a40bf2016-11-08 01:50:35 -0500314 if (job->driver->clean) {
315 job->driver->clean(job);
316 }
John Snow8254b6d2016-10-27 12:06:58 -0400317
318 if (job->cb) {
319 job->cb(job->opaque, job->ret);
320 }
John Snow5ccac6f2016-11-08 01:50:37 -0500321
322 /* Emit events only if we actually started */
323 if (block_job_started(job)) {
324 if (block_job_is_cancelled(job)) {
325 block_job_event_cancelled(job);
326 } else {
327 const char *msg = NULL;
328 if (job->ret < 0) {
329 msg = strerror(-job->ret);
330 }
331 block_job_event_completed(job, msg);
John Snow8254b6d2016-10-27 12:06:58 -0400332 }
John Snow8254b6d2016-10-27 12:06:58 -0400333 }
334
Fam Zhengc55a8322015-11-05 18:13:15 -0500335 if (job->txn) {
Vladimir Sementsov-Ogievskiy1e93b9f2016-11-08 01:50:34 -0500336 QLIST_REMOVE(job, txn_list);
Fam Zhengc55a8322015-11-05 18:13:15 -0500337 block_job_txn_unref(job->txn);
338 }
339 block_job_unref(job);
340}
341
Paolo Bonzini4c241cf2017-05-08 16:13:06 +0200342static void block_job_cancel_async(BlockJob *job)
343{
344 if (job->iostatus != BLOCK_DEVICE_IO_STATUS_OK) {
345 block_job_iostatus_reset(job);
346 }
347 if (job->user_paused) {
348 /* Do not call block_job_enter here, the caller will handle it. */
349 job->user_paused = false;
350 job->pause_count--;
351 }
352 job->cancelled = true;
353}
354
Paolo Bonzinic8ab5c22017-05-08 16:13:07 +0200355static int block_job_finish_sync(BlockJob *job,
356 void (*finish)(BlockJob *, Error **errp),
357 Error **errp)
358{
359 Error *local_err = NULL;
360 int ret;
361
362 assert(blk_bs(job->blk)->job == job);
363
364 block_job_ref(job);
365
Paolo Bonzini4fb588e2017-05-08 16:13:09 +0200366 if (finish) {
367 finish(job, &local_err);
368 }
Paolo Bonzinic8ab5c22017-05-08 16:13:07 +0200369 if (local_err) {
370 error_propagate(errp, local_err);
371 block_job_unref(job);
372 return -EBUSY;
373 }
374 /* block_job_drain calls block_job_enter, and it should be enough to
375 * induce progress until the job completes or moves to the main thread.
376 */
377 while (!job->deferred_to_main_loop && !job->completed) {
378 block_job_drain(job);
379 }
380 while (!job->completed) {
381 aio_poll(qemu_get_aio_context(), true);
382 }
383 ret = (job->cancelled && job->ret == 0) ? -ECANCELED : job->ret;
384 block_job_unref(job);
385 return ret;
386}
387
Fam Zhengc55a8322015-11-05 18:13:15 -0500388static void block_job_completed_txn_abort(BlockJob *job)
389{
390 AioContext *ctx;
391 BlockJobTxn *txn = job->txn;
Paolo Bonzini4fb588e2017-05-08 16:13:09 +0200392 BlockJob *other_job;
Fam Zhengc55a8322015-11-05 18:13:15 -0500393
394 if (txn->aborting) {
395 /*
396 * We are cancelled by another job, which will handle everything.
397 */
398 return;
399 }
400 txn->aborting = true;
Paolo Bonzini4fb588e2017-05-08 16:13:09 +0200401 block_job_txn_ref(txn);
402
Fam Zhengc55a8322015-11-05 18:13:15 -0500403 /* We are the first failed job. Cancel other jobs. */
404 QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
Kevin Wolfb6d2e592016-04-08 14:51:09 +0200405 ctx = blk_get_aio_context(other_job->blk);
Fam Zhengc55a8322015-11-05 18:13:15 -0500406 aio_context_acquire(ctx);
407 }
Paolo Bonzini4fb588e2017-05-08 16:13:09 +0200408
409 /* Other jobs are effectively cancelled by us, set the status for
410 * them; this job, however, may or may not be cancelled, depending
411 * on the caller, so leave it. */
Fam Zhengc55a8322015-11-05 18:13:15 -0500412 QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
Paolo Bonzini4fb588e2017-05-08 16:13:09 +0200413 if (other_job != job) {
414 block_job_cancel_async(other_job);
Fam Zhengc55a8322015-11-05 18:13:15 -0500415 }
Fam Zhengc55a8322015-11-05 18:13:15 -0500416 }
Paolo Bonzini4fb588e2017-05-08 16:13:09 +0200417 while (!QLIST_EMPTY(&txn->jobs)) {
418 other_job = QLIST_FIRST(&txn->jobs);
Kevin Wolfb6d2e592016-04-08 14:51:09 +0200419 ctx = blk_get_aio_context(other_job->blk);
Paolo Bonzini4fb588e2017-05-08 16:13:09 +0200420 if (!other_job->completed) {
421 assert(other_job->cancelled);
422 block_job_finish_sync(other_job, NULL, NULL);
423 }
Fam Zhengc55a8322015-11-05 18:13:15 -0500424 block_job_completed_single(other_job);
425 aio_context_release(ctx);
426 }
Paolo Bonzini4fb588e2017-05-08 16:13:09 +0200427
428 block_job_txn_unref(txn);
Fam Zhengc55a8322015-11-05 18:13:15 -0500429}
430
431static void block_job_completed_txn_success(BlockJob *job)
432{
433 AioContext *ctx;
434 BlockJobTxn *txn = job->txn;
435 BlockJob *other_job, *next;
436 /*
437 * Successful completion, see if there are other running jobs in this
438 * txn.
439 */
440 QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
441 if (!other_job->completed) {
442 return;
443 }
444 }
445 /* We are the last completed job, commit the transaction. */
446 QLIST_FOREACH_SAFE(other_job, &txn->jobs, txn_list, next) {
Kevin Wolfb6d2e592016-04-08 14:51:09 +0200447 ctx = blk_get_aio_context(other_job->blk);
Fam Zhengc55a8322015-11-05 18:13:15 -0500448 aio_context_acquire(ctx);
449 assert(other_job->ret == 0);
450 block_job_completed_single(other_job);
451 aio_context_release(ctx);
452 }
453}
454
Paolo Bonzini2f0c9fe2012-09-28 17:22:47 +0200455void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
456{
457 Error *local_err = NULL;
458
Fam Zheng3fc4b102013-10-08 17:29:38 +0800459 if (!job->driver->set_speed) {
Markus Armbrusterc6bd8c72015-03-17 11:54:50 +0100460 error_setg(errp, QERR_UNSUPPORTED);
Paolo Bonzini2f0c9fe2012-09-28 17:22:47 +0200461 return;
462 }
Fam Zheng3fc4b102013-10-08 17:29:38 +0800463 job->driver->set_speed(job, speed, &local_err);
Markus Armbruster84d18f02014-01-30 15:07:28 +0100464 if (local_err) {
Paolo Bonzini2f0c9fe2012-09-28 17:22:47 +0200465 error_propagate(errp, local_err);
466 return;
467 }
468
469 job->speed = speed;
470}
471
Paolo Bonziniaeae8832012-10-18 16:49:21 +0200472void block_job_complete(BlockJob *job, Error **errp)
473{
John Snow559b9352016-10-27 12:06:55 -0400474 /* Should not be reachable via external interface for internal jobs */
475 assert(job->id);
John Snow5ccac6f2016-11-08 01:50:37 -0500476 if (job->pause_count || job->cancelled ||
477 !block_job_started(job) || !job->driver->complete) {
Alberto Garcia9df229c2016-07-05 17:28:53 +0300478 error_setg(errp, "The active block job '%s' cannot be completed",
479 job->id);
Paolo Bonziniaeae8832012-10-18 16:49:21 +0200480 return;
481 }
482
Fam Zheng3fc4b102013-10-08 17:29:38 +0800483 job->driver->complete(job, errp);
Paolo Bonziniaeae8832012-10-18 16:49:21 +0200484}
485
John Snow0df4ba52016-10-27 12:06:59 -0400486void block_job_user_pause(BlockJob *job)
487{
488 job->user_paused = true;
489 block_job_pause(job);
490}
491
John Snow0df4ba52016-10-27 12:06:59 -0400492bool block_job_user_paused(BlockJob *job)
493{
Paolo Bonzini6573d9c2017-05-08 16:13:00 +0200494 return job->user_paused;
John Snow0df4ba52016-10-27 12:06:59 -0400495}
496
John Snow0df4ba52016-10-27 12:06:59 -0400497void block_job_user_resume(BlockJob *job)
498{
499 if (job && job->user_paused && job->pause_count > 0) {
Paolo Bonzini2caf63a2017-05-08 16:13:05 +0200500 block_job_iostatus_reset(job);
Paolo Bonzini4c241cf2017-05-08 16:13:06 +0200501 job->user_paused = false;
John Snow0df4ba52016-10-27 12:06:59 -0400502 block_job_resume(job);
503 }
504}
505
Paolo Bonzini8acc72a2012-09-28 17:22:50 +0200506void block_job_cancel(BlockJob *job)
507{
John Snow5ccac6f2016-11-08 01:50:37 -0500508 if (block_job_started(job)) {
Paolo Bonzini4c241cf2017-05-08 16:13:06 +0200509 block_job_cancel_async(job);
John Snow5ccac6f2016-11-08 01:50:37 -0500510 block_job_enter(job);
511 } else {
512 block_job_completed(job, -ECANCELED);
513 }
Paolo Bonzini8acc72a2012-09-28 17:22:50 +0200514}
515
Max Reitz345f9e12014-10-24 15:57:33 +0200516/* A wrapper around block_job_cancel() taking an Error ** parameter so it may be
517 * used with block_job_finish_sync() without the need for (rather nasty)
518 * function pointer casts there. */
519static void block_job_cancel_err(BlockJob *job, Error **errp)
520{
521 block_job_cancel(job);
522}
523
524int block_job_cancel_sync(BlockJob *job)
525{
526 return block_job_finish_sync(job, &block_job_cancel_err, NULL);
527}
528
Kevin Wolfa1a2af02016-04-08 18:26:37 +0200529void block_job_cancel_sync_all(void)
530{
531 BlockJob *job;
532 AioContext *aio_context;
533
534 while ((job = QLIST_FIRST(&block_jobs))) {
Kevin Wolfb6d2e592016-04-08 14:51:09 +0200535 aio_context = blk_get_aio_context(job->blk);
Kevin Wolfa1a2af02016-04-08 18:26:37 +0200536 aio_context_acquire(aio_context);
537 block_job_cancel_sync(job);
538 aio_context_release(aio_context);
539 }
540}
541
Max Reitz345f9e12014-10-24 15:57:33 +0200542int block_job_complete_sync(BlockJob *job, Error **errp)
543{
544 return block_job_finish_sync(job, &block_job_complete, errp);
545}
546
John Snow559b9352016-10-27 12:06:55 -0400547BlockJobInfo *block_job_query(BlockJob *job, Error **errp)
Paolo Bonzini30e628b2012-09-28 17:22:48 +0200548{
John Snow559b9352016-10-27 12:06:55 -0400549 BlockJobInfo *info;
550
551 if (block_job_is_internal(job)) {
552 error_setg(errp, "Cannot query QEMU internal jobs");
553 return NULL;
554 }
555 info = g_new0(BlockJobInfo, 1);
Fam Zheng79e14bf2013-10-08 17:29:40 +0800556 info->type = g_strdup(BlockJobType_lookup[job->driver->job_type]);
Kevin Wolf8ccb9562015-09-16 13:34:54 +0200557 info->device = g_strdup(job->id);
Paolo Bonzini32c81a42012-09-28 17:22:58 +0200558 info->len = job->len;
559 info->busy = job->busy;
Fam Zheng751ebd72015-04-03 22:05:18 +0800560 info->paused = job->pause_count > 0;
Paolo Bonzini32c81a42012-09-28 17:22:58 +0200561 info->offset = job->offset;
562 info->speed = job->speed;
563 info->io_status = job->iostatus;
Max Reitzef6dbf12014-10-24 15:57:34 +0200564 info->ready = job->ready;
Paolo Bonzini30e628b2012-09-28 17:22:48 +0200565 return info;
566}
Paolo Bonzini32c81a42012-09-28 17:22:58 +0200567
568static void block_job_iostatus_set_err(BlockJob *job, int error)
569{
570 if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
571 job->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
572 BLOCK_DEVICE_IO_STATUS_FAILED;
573 }
574}
575
John Snow8254b6d2016-10-27 12:06:58 -0400576static void block_job_event_cancelled(BlockJob *job)
Paolo Bonzinia66a2a32012-07-23 15:15:47 +0200577{
John Snow559b9352016-10-27 12:06:55 -0400578 if (block_job_is_internal(job)) {
579 return;
580 }
581
Wenchao Xiabcada37b2014-06-18 08:43:47 +0200582 qapi_event_send_block_job_cancelled(job->driver->job_type,
Kevin Wolf8ccb9562015-09-16 13:34:54 +0200583 job->id,
Wenchao Xiabcada37b2014-06-18 08:43:47 +0200584 job->len,
585 job->offset,
586 job->speed,
587 &error_abort);
Paolo Bonzinia66a2a32012-07-23 15:15:47 +0200588}
589
John Snow8254b6d2016-10-27 12:06:58 -0400590static void block_job_event_completed(BlockJob *job, const char *msg)
Paolo Bonzinia66a2a32012-07-23 15:15:47 +0200591{
John Snow559b9352016-10-27 12:06:55 -0400592 if (block_job_is_internal(job)) {
593 return;
594 }
595
Wenchao Xiabcada37b2014-06-18 08:43:47 +0200596 qapi_event_send_block_job_completed(job->driver->job_type,
Kevin Wolf8ccb9562015-09-16 13:34:54 +0200597 job->id,
Wenchao Xiabcada37b2014-06-18 08:43:47 +0200598 job->len,
599 job->offset,
600 job->speed,
601 !!msg,
602 msg,
603 &error_abort);
604}
605
Paolo Bonzini88691b32017-05-08 16:13:04 +0200606/*
607 * API for block job drivers and the block layer. These functions are
608 * declared in blockjob_int.h.
609 */
610
611void *block_job_create(const char *job_id, const BlockJobDriver *driver,
612 BlockDriverState *bs, uint64_t perm,
613 uint64_t shared_perm, int64_t speed, int flags,
614 BlockCompletionFunc *cb, void *opaque, Error **errp)
615{
616 BlockBackend *blk;
617 BlockJob *job;
618 int ret;
619
620 if (bs->job) {
621 error_setg(errp, QERR_DEVICE_IN_USE, bdrv_get_device_name(bs));
622 return NULL;
623 }
624
625 if (job_id == NULL && !(flags & BLOCK_JOB_INTERNAL)) {
626 job_id = bdrv_get_device_name(bs);
627 if (!*job_id) {
628 error_setg(errp, "An explicit job ID is required for this node");
629 return NULL;
630 }
631 }
632
633 if (job_id) {
634 if (flags & BLOCK_JOB_INTERNAL) {
635 error_setg(errp, "Cannot specify job ID for internal block job");
636 return NULL;
637 }
638
639 if (!id_wellformed(job_id)) {
640 error_setg(errp, "Invalid job ID '%s'", job_id);
641 return NULL;
642 }
643
644 if (block_job_get(job_id)) {
645 error_setg(errp, "Job ID '%s' already in use", job_id);
646 return NULL;
647 }
648 }
649
650 blk = blk_new(perm, shared_perm);
651 ret = blk_insert_bs(blk, bs, errp);
652 if (ret < 0) {
653 blk_unref(blk);
654 return NULL;
655 }
656
657 job = g_malloc0(driver->instance_size);
658 job->driver = driver;
659 job->id = g_strdup(job_id);
660 job->blk = blk;
661 job->cb = cb;
662 job->opaque = opaque;
663 job->busy = false;
664 job->paused = true;
665 job->pause_count = 1;
666 job->refcnt = 1;
667
668 error_setg(&job->blocker, "block device is in use by block job: %s",
669 BlockJobType_lookup[driver->job_type]);
670 block_job_add_bdrv(job, "main node", bs, 0, BLK_PERM_ALL, &error_abort);
671 bs->job = job;
672
673 blk_set_dev_ops(blk, &block_job_dev_ops, job);
674 bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker);
675
676 QLIST_INSERT_HEAD(&block_jobs, job, job_list);
677
678 blk_add_aio_context_notifier(blk, block_job_attached_aio_context,
679 block_job_detach_aio_context, job);
680
681 /* Only set speed when necessary to avoid NotSupported error */
682 if (speed != 0) {
683 Error *local_err = NULL;
684
685 block_job_set_speed(job, speed, &local_err);
686 if (local_err) {
687 block_job_unref(job);
688 error_propagate(errp, local_err);
689 return NULL;
690 }
691 }
692 return job;
693}
694
Paolo Bonzinif321dcb2017-05-08 16:13:03 +0200695void block_job_pause_all(void)
696{
697 BlockJob *job = NULL;
698 while ((job = block_job_next(job))) {
699 AioContext *aio_context = blk_get_aio_context(job->blk);
700
701 aio_context_acquire(aio_context);
702 block_job_pause(job);
703 aio_context_release(aio_context);
704 }
705}
706
Paolo Bonzini88691b32017-05-08 16:13:04 +0200707void block_job_early_fail(BlockJob *job)
708{
709 block_job_unref(job);
710}
711
712void block_job_completed(BlockJob *job, int ret)
713{
714 assert(blk_bs(job->blk)->job == job);
715 assert(!job->completed);
716 job->completed = true;
717 job->ret = ret;
718 if (!job->txn) {
719 block_job_completed_single(job);
720 } else if (ret < 0 || block_job_is_cancelled(job)) {
721 block_job_completed_txn_abort(job);
722 } else {
723 block_job_completed_txn_success(job);
724 }
725}
726
727static bool block_job_should_pause(BlockJob *job)
728{
729 return job->pause_count > 0;
730}
731
732void coroutine_fn block_job_pause_point(BlockJob *job)
733{
734 assert(job && block_job_started(job));
735
736 if (!block_job_should_pause(job)) {
737 return;
738 }
739 if (block_job_is_cancelled(job)) {
740 return;
741 }
742
743 if (job->driver->pause) {
744 job->driver->pause(job);
745 }
746
747 if (block_job_should_pause(job) && !block_job_is_cancelled(job)) {
748 job->paused = true;
749 job->busy = false;
750 qemu_coroutine_yield(); /* wait for block_job_resume() */
751 job->busy = true;
752 job->paused = false;
753 }
754
755 if (job->driver->resume) {
756 job->driver->resume(job);
757 }
758}
759
Paolo Bonzinif321dcb2017-05-08 16:13:03 +0200760void block_job_resume_all(void)
761{
762 BlockJob *job = NULL;
763 while ((job = block_job_next(job))) {
764 AioContext *aio_context = blk_get_aio_context(job->blk);
765
766 aio_context_acquire(aio_context);
767 block_job_resume(job);
768 aio_context_release(aio_context);
769 }
770}
771
Paolo Bonzini88691b32017-05-08 16:13:04 +0200772void block_job_enter(BlockJob *job)
773{
Paolo Bonzinieb05e012017-05-08 16:13:10 +0200774 if (!block_job_started(job)) {
775 return;
776 }
777 if (job->deferred_to_main_loop) {
778 return;
779 }
780
781 if (!job->busy) {
Paolo Bonzini88691b32017-05-08 16:13:04 +0200782 bdrv_coroutine_enter(blk_bs(job->blk), job->co);
783 }
784}
785
786bool block_job_is_cancelled(BlockJob *job)
787{
788 return job->cancelled;
789}
790
791void block_job_sleep_ns(BlockJob *job, QEMUClockType type, int64_t ns)
792{
793 assert(job->busy);
794
795 /* Check cancellation *before* setting busy = false, too! */
796 if (block_job_is_cancelled(job)) {
797 return;
798 }
799
800 job->busy = false;
801 if (!block_job_should_pause(job)) {
802 co_aio_sleep_ns(blk_get_aio_context(job->blk), type, ns);
803 }
804 job->busy = true;
805
806 block_job_pause_point(job);
807}
808
809void block_job_yield(BlockJob *job)
810{
811 assert(job->busy);
812
813 /* Check cancellation *before* setting busy = false, too! */
814 if (block_job_is_cancelled(job)) {
815 return;
816 }
817
818 job->busy = false;
819 if (!block_job_should_pause(job)) {
820 qemu_coroutine_yield();
821 }
822 job->busy = true;
823
824 block_job_pause_point(job);
825}
826
Paolo Bonzini2caf63a2017-05-08 16:13:05 +0200827void block_job_iostatus_reset(BlockJob *job)
828{
Paolo Bonzini4c241cf2017-05-08 16:13:06 +0200829 if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
830 return;
831 }
832 assert(job->user_paused && job->pause_count > 0);
Paolo Bonzini2caf63a2017-05-08 16:13:05 +0200833 job->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
834}
835
Wenchao Xiabcada37b2014-06-18 08:43:47 +0200836void block_job_event_ready(BlockJob *job)
837{
Max Reitzef6dbf12014-10-24 15:57:34 +0200838 job->ready = true;
839
John Snow559b9352016-10-27 12:06:55 -0400840 if (block_job_is_internal(job)) {
841 return;
842 }
843
Markus Armbruster518848a2014-06-27 19:24:13 +0200844 qapi_event_send_block_job_ready(job->driver->job_type,
Kevin Wolf8ccb9562015-09-16 13:34:54 +0200845 job->id,
Markus Armbruster518848a2014-06-27 19:24:13 +0200846 job->len,
847 job->offset,
848 job->speed, &error_abort);
Paolo Bonzinia66a2a32012-07-23 15:15:47 +0200849}
850
Kevin Wolf81e254d2016-04-18 11:36:38 +0200851BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err,
Paolo Bonzini32c81a42012-09-28 17:22:58 +0200852 int is_read, int error)
853{
854 BlockErrorAction action;
855
856 switch (on_err) {
857 case BLOCKDEV_ON_ERROR_ENOSPC:
Kevin Wolf8c398252016-06-29 17:41:35 +0200858 case BLOCKDEV_ON_ERROR_AUTO:
Wenchao Xiaa5895692014-06-18 08:43:30 +0200859 action = (error == ENOSPC) ?
860 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
Paolo Bonzini32c81a42012-09-28 17:22:58 +0200861 break;
862 case BLOCKDEV_ON_ERROR_STOP:
Wenchao Xiaa5895692014-06-18 08:43:30 +0200863 action = BLOCK_ERROR_ACTION_STOP;
Paolo Bonzini32c81a42012-09-28 17:22:58 +0200864 break;
865 case BLOCKDEV_ON_ERROR_REPORT:
Wenchao Xiaa5895692014-06-18 08:43:30 +0200866 action = BLOCK_ERROR_ACTION_REPORT;
Paolo Bonzini32c81a42012-09-28 17:22:58 +0200867 break;
868 case BLOCKDEV_ON_ERROR_IGNORE:
Wenchao Xiaa5895692014-06-18 08:43:30 +0200869 action = BLOCK_ERROR_ACTION_IGNORE;
Paolo Bonzini32c81a42012-09-28 17:22:58 +0200870 break;
871 default:
872 abort();
873 }
John Snow559b9352016-10-27 12:06:55 -0400874 if (!block_job_is_internal(job)) {
875 qapi_event_send_block_job_error(job->id,
876 is_read ? IO_OPERATION_TYPE_READ :
877 IO_OPERATION_TYPE_WRITE,
878 action, &error_abort);
879 }
Wenchao Xiaa5895692014-06-18 08:43:30 +0200880 if (action == BLOCK_ERROR_ACTION_STOP) {
Fam Zheng751ebd72015-04-03 22:05:18 +0800881 /* make the pause user visible, which will be resumed from QMP. */
John Snow0df4ba52016-10-27 12:06:59 -0400882 block_job_user_pause(job);
Paolo Bonzini32c81a42012-09-28 17:22:58 +0200883 block_job_iostatus_set_err(job, error);
Paolo Bonzini32c81a42012-09-28 17:22:58 +0200884 }
885 return action;
886}
Stefan Hajnoczidec7d422014-10-21 12:03:54 +0100887
888typedef struct {
889 BlockJob *job;
Stefan Hajnoczidec7d422014-10-21 12:03:54 +0100890 AioContext *aio_context;
891 BlockJobDeferToMainLoopFn *fn;
892 void *opaque;
893} BlockJobDeferToMainLoopData;
894
895static void block_job_defer_to_main_loop_bh(void *opaque)
896{
897 BlockJobDeferToMainLoopData *data = opaque;
898 AioContext *aio_context;
899
Stefan Hajnoczidec7d422014-10-21 12:03:54 +0100900 /* Prevent race with block_job_defer_to_main_loop() */
901 aio_context_acquire(data->aio_context);
902
903 /* Fetch BDS AioContext again, in case it has changed */
Kevin Wolfb6d2e592016-04-08 14:51:09 +0200904 aio_context = blk_get_aio_context(data->job->blk);
Paolo Bonzinid79df2a2017-03-21 18:48:10 +0100905 if (aio_context != data->aio_context) {
906 aio_context_acquire(aio_context);
907 }
Stefan Hajnoczidec7d422014-10-21 12:03:54 +0100908
909 data->fn(data->job, data->opaque);
910
Paolo Bonzinid79df2a2017-03-21 18:48:10 +0100911 if (aio_context != data->aio_context) {
912 aio_context_release(aio_context);
913 }
Stefan Hajnoczidec7d422014-10-21 12:03:54 +0100914
915 aio_context_release(data->aio_context);
916
917 g_free(data);
918}
919
920void block_job_defer_to_main_loop(BlockJob *job,
921 BlockJobDeferToMainLoopFn *fn,
922 void *opaque)
923{
924 BlockJobDeferToMainLoopData *data = g_malloc(sizeof(*data));
925 data->job = job;
Kevin Wolfb6d2e592016-04-08 14:51:09 +0200926 data->aio_context = blk_get_aio_context(job->blk);
Stefan Hajnoczidec7d422014-10-21 12:03:54 +0100927 data->fn = fn;
928 data->opaque = opaque;
Fam Zheng794f0142016-02-02 10:12:24 +0800929 job->deferred_to_main_loop = true;
Stefan Hajnoczidec7d422014-10-21 12:03:54 +0100930
Paolo Bonzinifffb6e12016-10-03 18:14:16 +0200931 aio_bh_schedule_oneshot(qemu_get_aio_context(),
932 block_job_defer_to_main_loop_bh, data);
Stefan Hajnoczidec7d422014-10-21 12:03:54 +0100933}