job: Add job_yield()
This moves block_job_yield() to the Job layer.
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
diff --git a/block/backup.c b/block/backup.c
index b13f91d..6f4f3df 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -444,7 +444,7 @@
while (!job_is_cancelled(&job->common.job)) {
/* Yield until the job is cancelled. We just let our before_write
* notify callback service CoW requests. */
- block_job_yield(&job->common);
+ job_yield(&job->common.job);
}
} else if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
ret = backup_run_incremental(job);
diff --git a/block/mirror.c b/block/mirror.c
index c63cf7c..687f955 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -731,7 +731,7 @@
block_job_event_ready(&s->common);
s->synced = true;
while (!job_is_cancelled(&s->common.job) && !s->should_complete) {
- block_job_yield(&s->common);
+ job_yield(&s->common.job);
}
s->common.job.cancelled = false;
goto immediate_exit;
diff --git a/blockjob.c b/blockjob.c
index 438baa1..f146fe0 100644
--- a/blockjob.c
+++ b/blockjob.c
@@ -431,22 +431,6 @@
return job;
}
-void block_job_yield(BlockJob *job)
-{
- assert(job->job.busy);
-
- /* Check cancellation *before* setting busy = false, too! */
- if (job_is_cancelled(&job->job)) {
- return;
- }
-
- if (!job_should_pause(&job->job)) {
- job_do_yield(&job->job, -1);
- }
-
- job_pause_point(&job->job);
-}
-
void block_job_iostatus_reset(BlockJob *job)
{
if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
diff --git a/include/block/blockjob_int.h b/include/block/blockjob_int.h
index 7df07b2..806ac64 100644
--- a/include/block/blockjob_int.h
+++ b/include/block/blockjob_int.h
@@ -108,14 +108,6 @@
void block_job_drain(Job *job);
/**
- * block_job_yield:
- * @job: The job that calls the function.
- *
- * Yield the block job coroutine.
- */
-void block_job_yield(BlockJob *job);
-
-/**
* block_job_ratelimit_get_delay:
*
* Calculate and return delay for the next request in ns. See the documentation
diff --git a/include/qemu/job.h b/include/qemu/job.h
index bbe1b0c..94900ec 100644
--- a/include/qemu/job.h
+++ b/include/qemu/job.h
@@ -339,6 +339,13 @@
/**
* @job: The job that calls the function.
+ *
+ * Yield the job coroutine.
+ */
+void job_yield(Job *job);
+
+/**
+ * @job: The job that calls the function.
* @ns: How many nanoseconds to stop for.
*
* Put the job to sleep (assuming that it wasn't canceled) for @ns
@@ -508,8 +515,6 @@
/* TODO To be removed from the public interface */
void job_state_transition(Job *job, JobStatus s1);
-void coroutine_fn job_do_yield(Job *job, uint64_t ns);
-bool job_should_pause(Job *job);
void job_do_dismiss(Job *job);
#endif
diff --git a/job.c b/job.c
index 2e453f6..eede680 100644
--- a/job.c
+++ b/job.c
@@ -226,7 +226,7 @@
return job->co;
}
-bool job_should_pause(Job *job)
+static bool job_should_pause(Job *job)
{
return job->pause_count > 0;
}
@@ -396,7 +396,7 @@
*
* If @ns is (uint64_t) -1, no timer is scheduled and job_enter() must be
* called explicitly. */
-void coroutine_fn job_do_yield(Job *job, uint64_t ns)
+static void coroutine_fn job_do_yield(Job *job, uint64_t ns)
{
job_lock();
if (ns != -1) {
@@ -441,6 +441,22 @@
}
}
+void job_yield(Job *job)
+{
+ assert(job->busy);
+
+ /* Check cancellation *before* setting busy = false, too! */
+ if (job_is_cancelled(job)) {
+ return;
+ }
+
+ if (!job_should_pause(job)) {
+ job_do_yield(job, -1);
+ }
+
+ job_pause_point(job);
+}
+
void coroutine_fn job_sleep_ns(Job *job, int64_t ns)
{
assert(job->busy);
diff --git a/tests/test-blockjob-txn.c b/tests/test-blockjob-txn.c
index 34ee179..fce8366 100644
--- a/tests/test-blockjob-txn.c
+++ b/tests/test-blockjob-txn.c
@@ -47,7 +47,7 @@
if (s->use_timer) {
job_sleep_ns(&job->job, 0);
} else {
- block_job_yield(job);
+ job_yield(&job->job);
}
if (job_is_cancelled(&job->job)) {