Merge tag 'pull-target-arm-20230306' of https://git.linaro.org/people/pmaydell/qemu-arm into staging
target-arm queue:
* allwinner-h3: Fix I2C controller model for Sun6i SoCs
* allwinner-h3: Add missing i2c controllers
* Expose M-profile system registers to gdbstub
* Expose pauth information to gdbstub
* Support direct boot for Linux/arm64 EFI zboot images
* Fix incorrect stage 2 MMU setup validation
# -----BEGIN PGP SIGNATURE-----
#
# iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmQGB+wZHHBldGVyLm1h
# eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3gdQEACVfgbs77mxbOb6u8yWHKGZ
# tVnQr9KZMv2lmwt5H3ROJPXznchrIIAwdMeRgKnbI+lC5jTq9L+Q8RJch3t/EbAd
# f0VMyiPe3DzCbCrAR9cW6EWzbYnEVo3Ioj4k7qjxK6u1BIKhXz99DLYd1KRdTxnx
# BAYmcl857Uir1q2FrBVMZ/ItCLbk4ejn+YaDIawNue2/s1oGa+we473x9rosCFvp
# L9bzT3R46e0o+Mfkn1OYRmgCmURTalWPpWAxyOUFR9YbrzXleLgAKEB3o3PPcvls
# u26uxztyRMqje1q06VjUzwaLw7zN9XPhmir+NXX7KXp2/x9PZjApOpPtt0kl+6qe
# FbByKfl24O9w/OKewsJw+udCBYdYrRPm6tWv2D71iAwjBUzBJgNGe5VPRdPFtPDn
# uSRO65o34w1nPzRpAheUciZueiabYrVmIgVltFxj0JlrKGfgiYHPLVyU0Uu0K/A7
# F2kUEQIzIcWdo+c8SlvlWOEA2ojVd/KoLVLgndqr40Tk5pbc65TRS08kkVVl4cMT
# jUGscl7Dyxe+yo8+nHdycAJpnKYDllJOh2JbGv3r2FqCy5FMuIqW4hHeuUxwpE+O
# nxm7lzjnaVHSAFHdzhk9x4E4uH/GTcdWzX1EsmpgGqe5oejLJOrCINb+Dj44+Y8h
# 8aGRvE7kxMs11upxc7BcAw==
# =KIMt
# -----END PGP SIGNATURE-----
# gpg: Signature made Mon 06 Mar 2023 15:34:04 GMT
# gpg: using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE
# gpg: issuer "peter.maydell@linaro.org"
# gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [ultimate]
# gpg: aka "Peter Maydell <pmaydell@gmail.com>" [ultimate]
# gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [ultimate]
# gpg: aka "Peter Maydell <peter@archaic.org.uk>" [ultimate]
# Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE
* tag 'pull-target-arm-20230306' of https://git.linaro.org/people/pmaydell/qemu-arm: (21 commits)
hw: arm: allwinner-h3: Fix and complete H3 i2c devices
hw: allwinner-i2c: Fix TWI_CNTR_INT_FLAG on SUN6i SoCs
hw: arm: Support direct boot for Linux/arm64 EFI zboot images
target/arm: Rewrite check_s2_mmu_setup
target/arm: Diagnose incorrect usage of arm_is_secure subroutines
target/arm: Stub arm_hcr_el2_eff for m-profile
target/arm: Handle m-profile in arm_is_secure
target/arm: Implement gdbstub m-profile systemreg and secext
target/arm: Export arm_v7m_get_sp_ptr
target/arm: Export arm_v7m_mrs_control
target/arm: Implement gdbstub pauth extension
target/arm: Create pauth_ptr_mask
target/arm: Simplify iteration over bit widths
target/arm: Add name argument to output_vector_union_type
target/arm: Fix svep width in arm_gen_dynamic_svereg_xml
target/arm: Hoist pred_width in arm_gen_dynamic_svereg_xml
target/arm: Simplify register counting in arm_gen_dynamic_svereg_xml
target/arm: Split out output_vector_union_type
target/arm: Move arm_gen_dynamic_svereg_xml to gdbstub64.c
target/arm: Unexport arm_gen_dynamic_sysreg_xml
...
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
diff --git a/MAINTAINERS b/MAINTAINERS
index 011fd85..da29661 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2490,6 +2490,7 @@
----------
Overall Audio backends
M: Gerd Hoffmann <kraxel@redhat.com>
+M: Marc-André Lureau <marcandre.lureau@redhat.com>
S: Odd Fixes
F: audio/
X: audio/alsaaudio.c
@@ -2785,6 +2786,7 @@
Graphics
M: Gerd Hoffmann <kraxel@redhat.com>
+M: Marc-André Lureau <marcandre.lureau@redhat.com>
S: Odd Fixes
F: ui/
F: include/ui/
diff --git a/audio/alsaaudio.c b/audio/alsaaudio.c
index 714bfb6..057571d 100644
--- a/audio/alsaaudio.c
+++ b/audio/alsaaudio.c
@@ -222,11 +222,7 @@
return -1;
}
- pfds = audio_calloc ("alsa_poll_helper", count, sizeof (*pfds));
- if (!pfds) {
- dolog ("Could not initialize poll mode\n");
- return -1;
- }
+ pfds = g_new0(struct pollfd, count);
err = snd_pcm_poll_descriptors (handle, pfds, count);
if (err < 0) {
@@ -917,28 +913,23 @@
alsa_init_per_direction(aopts->in);
alsa_init_per_direction(aopts->out);
- /*
- * need to define them, as otherwise alsa produces no sound
- * doesn't set has_* so alsa_open can identify it wasn't set by the user
- */
+ /* don't set has_* so alsa_open can identify it wasn't set by the user */
if (!dev->u.alsa.out->has_period_length) {
- /* 1024 frames assuming 44100Hz */
- dev->u.alsa.out->period_length = 1024 * 1000000 / 44100;
+ /* 256 frames assuming 44100Hz */
+ dev->u.alsa.out->period_length = 5805;
}
if (!dev->u.alsa.out->has_buffer_length) {
/* 4096 frames assuming 44100Hz */
- dev->u.alsa.out->buffer_length = 4096ll * 1000000 / 44100;
+ dev->u.alsa.out->buffer_length = 92880;
}
- /*
- * OptsVisitor sets unspecified optional fields to zero, but do not depend
- * on it...
- */
if (!dev->u.alsa.in->has_period_length) {
- dev->u.alsa.in->period_length = 0;
+ /* 256 frames assuming 44100Hz */
+ dev->u.alsa.in->period_length = 5805;
}
if (!dev->u.alsa.in->has_buffer_length) {
- dev->u.alsa.in->buffer_length = 0;
+ /* 4096 frames assuming 44100Hz */
+ dev->u.alsa.in->buffer_length = 92880;
}
return dev;
diff --git a/audio/audio.c b/audio/audio.c
index 4290309..70b0967 100644
--- a/audio/audio.c
+++ b/audio/audio.c
@@ -33,6 +33,7 @@
#include "qapi/qapi-visit-audio.h"
#include "qapi/qapi-commands-audio.h"
#include "qemu/cutils.h"
+#include "qemu/log.h"
#include "qemu/module.h"
#include "qemu/help_option.h"
#include "sysemu/sysemu.h"
@@ -148,26 +149,6 @@
}
}
-void *audio_calloc (const char *funcname, int nmemb, size_t size)
-{
- int cond;
- size_t len;
-
- len = nmemb * size;
- cond = !nmemb || !size;
- cond |= nmemb < 0;
- cond |= len < size;
-
- if (audio_bug ("audio_calloc", cond)) {
- AUD_log (NULL, "%s passed invalid arguments to audio_calloc\n",
- funcname);
- AUD_log (NULL, "nmemb=%d size=%zu (len=%zu)\n", nmemb, size, len);
- return NULL;
- }
-
- return g_malloc0 (len);
-}
-
void AUD_vlog (const char *cap, const char *fmt, va_list ap)
{
if (cap) {
@@ -400,13 +381,6 @@
/*
* Capture
*/
-static void noop_conv (struct st_sample *dst, const void *src, int samples)
-{
- (void) src;
- (void) dst;
- (void) samples;
-}
-
static CaptureVoiceOut *audio_pcm_capture_find_specific(AudioState *s,
struct audsettings *as)
{
@@ -504,15 +478,8 @@
sw->info = hw->info;
sw->empty = 1;
sw->active = hw->enabled;
- sw->conv = noop_conv;
- sw->ratio = ((int64_t) hw_cap->info.freq << 32) / sw->info.freq;
sw->vol = nominal_volume;
sw->rate = st_rate_start (sw->info.freq, hw_cap->info.freq);
- if (!sw->rate) {
- dolog ("Could not start rate conversion for `%s'\n", SW_NAME (sw));
- g_free (sw);
- return -1;
- }
QLIST_INSERT_HEAD (&hw_cap->sw_head, sw, entries);
QLIST_INSERT_HEAD (&hw->cap_head, sc, entries);
#ifdef DEBUG_CAPTURE
@@ -547,8 +514,8 @@
static size_t audio_pcm_hw_get_live_in(HWVoiceIn *hw)
{
size_t live = hw->total_samples_captured - audio_pcm_hw_find_min_in (hw);
- if (audio_bug(__func__, live > hw->conv_buf->size)) {
- dolog("live=%zu hw->conv_buf->size=%zu\n", live, hw->conv_buf->size);
+ if (audio_bug(__func__, live > hw->conv_buf.size)) {
+ dolog("live=%zu hw->conv_buf.size=%zu\n", live, hw->conv_buf.size);
return 0;
}
return live;
@@ -557,13 +524,13 @@
static size_t audio_pcm_hw_conv_in(HWVoiceIn *hw, void *pcm_buf, size_t samples)
{
size_t conv = 0;
- STSampleBuffer *conv_buf = hw->conv_buf;
+ STSampleBuffer *conv_buf = &hw->conv_buf;
while (samples) {
uint8_t *src = advance(pcm_buf, conv * hw->info.bytes_per_frame);
size_t proc = MIN(samples, conv_buf->size - conv_buf->pos);
- hw->conv(conv_buf->samples + conv_buf->pos, src, proc);
+ hw->conv(conv_buf->buffer + conv_buf->pos, src, proc);
conv_buf->pos = (conv_buf->pos + proc) % conv_buf->size;
samples -= proc;
conv += proc;
@@ -575,56 +542,65 @@
/*
* Soft voice (capture)
*/
-static size_t audio_pcm_sw_read(SWVoiceIn *sw, void *buf, size_t size)
+static void audio_pcm_sw_resample_in(SWVoiceIn *sw,
+ size_t frames_in_max, size_t frames_out_max,
+ size_t *total_in, size_t *total_out)
{
HWVoiceIn *hw = sw->hw;
- size_t samples, live, ret = 0, swlim, isamp, osamp, rpos, total = 0;
- struct st_sample *src, *dst = sw->buf;
+ struct st_sample *src, *dst;
+ size_t live, rpos, frames_in, frames_out;
+
+ live = hw->total_samples_captured - sw->total_hw_samples_acquired;
+ rpos = audio_ring_posb(hw->conv_buf.pos, live, hw->conv_buf.size);
+
+ /* resample conv_buf from rpos to end of buffer */
+ src = hw->conv_buf.buffer + rpos;
+ frames_in = MIN(frames_in_max, hw->conv_buf.size - rpos);
+ dst = sw->resample_buf.buffer;
+ frames_out = frames_out_max;
+ st_rate_flow(sw->rate, src, dst, &frames_in, &frames_out);
+ rpos += frames_in;
+ *total_in = frames_in;
+ *total_out = frames_out;
+
+ /* resample conv_buf from start of buffer if there are input frames left */
+ if (frames_in_max - frames_in && rpos == hw->conv_buf.size) {
+ src = hw->conv_buf.buffer;
+ frames_in = frames_in_max - frames_in;
+ dst += frames_out;
+ frames_out = frames_out_max - frames_out;
+ st_rate_flow(sw->rate, src, dst, &frames_in, &frames_out);
+ *total_in += frames_in;
+ *total_out += frames_out;
+ }
+}
+
+static size_t audio_pcm_sw_read(SWVoiceIn *sw, void *buf, size_t buf_len)
+{
+ HWVoiceIn *hw = sw->hw;
+ size_t live, frames_out_max, total_in, total_out;
live = hw->total_samples_captured - sw->total_hw_samples_acquired;
if (!live) {
return 0;
}
- if (audio_bug(__func__, live > hw->conv_buf->size)) {
- dolog("live_in=%zu hw->conv_buf->size=%zu\n", live, hw->conv_buf->size);
+ if (audio_bug(__func__, live > hw->conv_buf.size)) {
+ dolog("live_in=%zu hw->conv_buf.size=%zu\n", live, hw->conv_buf.size);
return 0;
}
- rpos = audio_ring_posb(hw->conv_buf->pos, live, hw->conv_buf->size);
+ frames_out_max = MIN(buf_len / sw->info.bytes_per_frame,
+ sw->resample_buf.size);
- samples = size / sw->info.bytes_per_frame;
-
- swlim = (live * sw->ratio) >> 32;
- swlim = MIN (swlim, samples);
-
- while (swlim) {
- src = hw->conv_buf->samples + rpos;
- if (hw->conv_buf->pos > rpos) {
- isamp = hw->conv_buf->pos - rpos;
- } else {
- isamp = hw->conv_buf->size - rpos;
- }
-
- if (!isamp) {
- break;
- }
- osamp = swlim;
-
- st_rate_flow (sw->rate, src, dst, &isamp, &osamp);
- swlim -= osamp;
- rpos = (rpos + isamp) % hw->conv_buf->size;
- dst += osamp;
- ret += osamp;
- total += isamp;
- }
+ audio_pcm_sw_resample_in(sw, live, frames_out_max, &total_in, &total_out);
if (!hw->pcm_ops->volume_in) {
- mixeng_volume (sw->buf, ret, &sw->vol);
+ mixeng_volume(sw->resample_buf.buffer, total_out, &sw->vol);
}
+ sw->clip(buf, sw->resample_buf.buffer, total_out);
- sw->clip (buf, sw->buf, ret);
- sw->total_hw_samples_acquired += total;
- return ret * sw->info.bytes_per_frame;
+ sw->total_hw_samples_acquired += total_in;
+ return total_out * sw->info.bytes_per_frame;
}
/*
@@ -660,8 +636,8 @@
if (nb_live1) {
size_t live = smin;
- if (audio_bug(__func__, live > hw->mix_buf->size)) {
- dolog("live=%zu hw->mix_buf->size=%zu\n", live, hw->mix_buf->size);
+ if (audio_bug(__func__, live > hw->mix_buf.size)) {
+ dolog("live=%zu hw->mix_buf.size=%zu\n", live, hw->mix_buf.size);
return 0;
}
return live;
@@ -678,17 +654,17 @@
static void audio_pcm_hw_clip_out(HWVoiceOut *hw, void *pcm_buf, size_t len)
{
size_t clipped = 0;
- size_t pos = hw->mix_buf->pos;
+ size_t pos = hw->mix_buf.pos;
while (len) {
- st_sample *src = hw->mix_buf->samples + pos;
+ st_sample *src = hw->mix_buf.buffer + pos;
uint8_t *dst = advance(pcm_buf, clipped * hw->info.bytes_per_frame);
- size_t samples_till_end_of_buf = hw->mix_buf->size - pos;
+ size_t samples_till_end_of_buf = hw->mix_buf.size - pos;
size_t samples_to_clip = MIN(len, samples_till_end_of_buf);
hw->clip(dst, src, samples_to_clip);
- pos = (pos + samples_to_clip) % hw->mix_buf->size;
+ pos = (pos + samples_to_clip) % hw->mix_buf.size;
len -= samples_to_clip;
clipped += samples_to_clip;
}
@@ -697,84 +673,113 @@
/*
* Soft voice (playback)
*/
-static size_t audio_pcm_sw_write(SWVoiceOut *sw, void *buf, size_t size)
+static void audio_pcm_sw_resample_out(SWVoiceOut *sw,
+ size_t frames_in_max, size_t frames_out_max,
+ size_t *total_in, size_t *total_out)
{
- size_t hwsamples, samples, isamp, osamp, wpos, live, dead, left, blck;
- size_t hw_free;
- size_t ret = 0, pos = 0, total = 0;
-
- if (!sw) {
- return size;
- }
-
- hwsamples = sw->hw->mix_buf->size;
+ HWVoiceOut *hw = sw->hw;
+ struct st_sample *src, *dst;
+ size_t live, wpos, frames_in, frames_out;
live = sw->total_hw_samples_mixed;
- if (audio_bug(__func__, live > hwsamples)) {
- dolog("live=%zu hw->mix_buf->size=%zu\n", live, hwsamples);
+ wpos = (hw->mix_buf.pos + live) % hw->mix_buf.size;
+
+ /* write to mix_buf from wpos to end of buffer */
+ src = sw->resample_buf.buffer;
+ frames_in = frames_in_max;
+ dst = hw->mix_buf.buffer + wpos;
+ frames_out = MIN(frames_out_max, hw->mix_buf.size - wpos);
+ st_rate_flow_mix(sw->rate, src, dst, &frames_in, &frames_out);
+ wpos += frames_out;
+ *total_in = frames_in;
+ *total_out = frames_out;
+
+ /* write to mix_buf from start of buffer if there are input frames left */
+ if (frames_in_max - frames_in > 0 && wpos == hw->mix_buf.size) {
+ src += frames_in;
+ frames_in = frames_in_max - frames_in;
+ dst = hw->mix_buf.buffer;
+ frames_out = frames_out_max - frames_out;
+ st_rate_flow_mix(sw->rate, src, dst, &frames_in, &frames_out);
+ *total_in += frames_in;
+ *total_out += frames_out;
+ }
+}
+
+static size_t audio_pcm_sw_write(SWVoiceOut *sw, void *buf, size_t buf_len)
+{
+ HWVoiceOut *hw = sw->hw;
+ size_t live, dead, hw_free, sw_max, fe_max;
+ size_t frames_in_max, frames_out_max, total_in, total_out;
+
+ live = sw->total_hw_samples_mixed;
+ if (audio_bug(__func__, live > hw->mix_buf.size)) {
+ dolog("live=%zu hw->mix_buf.size=%zu\n", live, hw->mix_buf.size);
return 0;
}
- if (live == hwsamples) {
+ if (live == hw->mix_buf.size) {
#ifdef DEBUG_OUT
dolog ("%s is full %zu\n", sw->name, live);
#endif
return 0;
}
- wpos = (sw->hw->mix_buf->pos + live) % hwsamples;
-
- dead = hwsamples - live;
- hw_free = audio_pcm_hw_get_free(sw->hw);
+ dead = hw->mix_buf.size - live;
+ hw_free = audio_pcm_hw_get_free(hw);
hw_free = hw_free > live ? hw_free - live : 0;
- samples = ((int64_t)MIN(dead, hw_free) << 32) / sw->ratio;
- samples = MIN(samples, size / sw->info.bytes_per_frame);
- if (samples) {
- sw->conv(sw->buf, buf, samples);
+ frames_out_max = MIN(dead, hw_free);
+ sw_max = st_rate_frames_in(sw->rate, frames_out_max);
+ fe_max = MIN(buf_len / sw->info.bytes_per_frame + sw->resample_buf.pos,
+ sw->resample_buf.size);
+ frames_in_max = MIN(sw_max, fe_max);
+ if (!frames_in_max) {
+ return 0;
+ }
+
+ if (frames_in_max > sw->resample_buf.pos) {
+ sw->conv(sw->resample_buf.buffer + sw->resample_buf.pos,
+ buf, frames_in_max - sw->resample_buf.pos);
if (!sw->hw->pcm_ops->volume_out) {
- mixeng_volume(sw->buf, samples, &sw->vol);
+ mixeng_volume(sw->resample_buf.buffer + sw->resample_buf.pos,
+ frames_in_max - sw->resample_buf.pos, &sw->vol);
}
}
- while (samples) {
- dead = hwsamples - live;
- left = hwsamples - wpos;
- blck = MIN (dead, left);
- if (!blck) {
- break;
- }
- isamp = samples;
- osamp = blck;
- st_rate_flow_mix (
- sw->rate,
- sw->buf + pos,
- sw->hw->mix_buf->samples + wpos,
- &isamp,
- &osamp
- );
- ret += isamp;
- samples -= isamp;
- pos += isamp;
- live += osamp;
- wpos = (wpos + osamp) % hwsamples;
- total += osamp;
- }
+ audio_pcm_sw_resample_out(sw, frames_in_max, frames_out_max,
+ &total_in, &total_out);
- sw->total_hw_samples_mixed += total;
+ sw->total_hw_samples_mixed += total_out;
sw->empty = sw->total_hw_samples_mixed == 0;
+ /*
+ * Upsampling may leave one audio frame in the resample buffer. Decrement
+ * total_in by one if there was a leftover frame from the previous resample
+ * pass in the resample buffer. Increment total_in by one if the current
+ * resample pass left one frame in the resample buffer.
+ */
+ if (frames_in_max - total_in == 1) {
+ /* copy one leftover audio frame to the beginning of the buffer */
+ *sw->resample_buf.buffer = *(sw->resample_buf.buffer + total_in);
+ total_in += 1 - sw->resample_buf.pos;
+ sw->resample_buf.pos = 1;
+ } else if (total_in >= sw->resample_buf.pos) {
+ total_in -= sw->resample_buf.pos;
+ sw->resample_buf.pos = 0;
+ }
+
#ifdef DEBUG_OUT
dolog (
- "%s: write size %zu ret %zu total sw %zu\n",
- SW_NAME (sw),
- size / sw->info.bytes_per_frame,
- ret,
+ "%s: write size %zu written %zu total mixed %zu\n",
+ SW_NAME(sw),
+ buf_len / sw->info.bytes_per_frame,
+ total_in,
sw->total_hw_samples_mixed
);
#endif
- return ret * sw->info.bytes_per_frame;
+ return total_in * sw->info.bytes_per_frame;
}
#ifdef DEBUG_AUDIO
@@ -992,18 +997,6 @@
}
}
-/**
- * audio_frontend_frames_in() - returns the number of frames the resampling
- * code generates from frames_in frames
- *
- * @sw: audio recording frontend
- * @frames_in: number of frames
- */
-static size_t audio_frontend_frames_in(SWVoiceIn *sw, size_t frames_in)
-{
- return (int64_t)frames_in * sw->ratio >> 32;
-}
-
static size_t audio_get_avail (SWVoiceIn *sw)
{
size_t live;
@@ -1013,33 +1006,21 @@
}
live = sw->hw->total_samples_captured - sw->total_hw_samples_acquired;
- if (audio_bug(__func__, live > sw->hw->conv_buf->size)) {
- dolog("live=%zu sw->hw->conv_buf->size=%zu\n", live,
- sw->hw->conv_buf->size);
+ if (audio_bug(__func__, live > sw->hw->conv_buf.size)) {
+ dolog("live=%zu sw->hw->conv_buf.size=%zu\n", live,
+ sw->hw->conv_buf.size);
return 0;
}
ldebug (
- "%s: get_avail live %zu frontend frames %zu\n",
+ "%s: get_avail live %zu frontend frames %u\n",
SW_NAME (sw),
- live, audio_frontend_frames_in(sw, live)
+ live, st_rate_frames_out(sw->rate, live)
);
return live;
}
-/**
- * audio_frontend_frames_out() - returns the number of frames needed to
- * get frames_out frames after resampling
- *
- * @sw: audio playback frontend
- * @frames_out: number of frames
- */
-static size_t audio_frontend_frames_out(SWVoiceOut *sw, size_t frames_out)
-{
- return ((int64_t)frames_out << 32) / sw->ratio;
-}
-
static size_t audio_get_free(SWVoiceOut *sw)
{
size_t live, dead;
@@ -1050,17 +1031,17 @@
live = sw->total_hw_samples_mixed;
- if (audio_bug(__func__, live > sw->hw->mix_buf->size)) {
- dolog("live=%zu sw->hw->mix_buf->size=%zu\n", live,
- sw->hw->mix_buf->size);
+ if (audio_bug(__func__, live > sw->hw->mix_buf.size)) {
+ dolog("live=%zu sw->hw->mix_buf.size=%zu\n", live,
+ sw->hw->mix_buf.size);
return 0;
}
- dead = sw->hw->mix_buf->size - live;
+ dead = sw->hw->mix_buf.size - live;
#ifdef DEBUG_OUT
- dolog("%s: get_free live %zu dead %zu frontend frames %zu\n",
- SW_NAME(sw), live, dead, audio_frontend_frames_out(sw, dead));
+ dolog("%s: get_free live %zu dead %zu frontend frames %u\n",
+ SW_NAME(sw), live, dead, st_rate_frames_in(sw->rate, dead));
#endif
return dead;
@@ -1076,32 +1057,40 @@
for (sc = hw->cap_head.lh_first; sc; sc = sc->entries.le_next) {
SWVoiceOut *sw = &sc->sw;
- int rpos2 = rpos;
+ size_t rpos2 = rpos;
n = samples;
while (n) {
- size_t till_end_of_hw = hw->mix_buf->size - rpos2;
- size_t to_write = MIN(till_end_of_hw, n);
- size_t bytes = to_write * hw->info.bytes_per_frame;
- size_t written;
+ size_t till_end_of_hw = hw->mix_buf.size - rpos2;
+ size_t to_read = MIN(till_end_of_hw, n);
+ size_t live, frames_in, frames_out;
- sw->buf = hw->mix_buf->samples + rpos2;
- written = audio_pcm_sw_write (sw, NULL, bytes);
- if (written - bytes) {
- dolog("Could not mix %zu bytes into a capture "
+ sw->resample_buf.buffer = hw->mix_buf.buffer + rpos2;
+ sw->resample_buf.size = to_read;
+ live = sw->total_hw_samples_mixed;
+
+ audio_pcm_sw_resample_out(sw,
+ to_read, sw->hw->mix_buf.size - live,
+ &frames_in, &frames_out);
+
+ sw->total_hw_samples_mixed += frames_out;
+ sw->empty = sw->total_hw_samples_mixed == 0;
+
+ if (to_read - frames_in) {
+ dolog("Could not mix %zu frames into a capture "
"buffer, mixed %zu\n",
- bytes, written);
+ to_read, frames_in);
break;
}
- n -= to_write;
- rpos2 = (rpos2 + to_write) % hw->mix_buf->size;
+ n -= to_read;
+ rpos2 = (rpos2 + to_read) % hw->mix_buf.size;
}
}
}
- n = MIN(samples, hw->mix_buf->size - rpos);
- mixeng_clear(hw->mix_buf->samples + rpos, n);
- mixeng_clear(hw->mix_buf->samples, samples - n);
+ n = MIN(samples, hw->mix_buf.size - rpos);
+ mixeng_clear(hw->mix_buf.buffer + rpos, n);
+ mixeng_clear(hw->mix_buf.buffer, samples - n);
}
static size_t audio_pcm_hw_run_out(HWVoiceOut *hw, size_t live)
@@ -1127,7 +1116,7 @@
live -= proc;
clipped += proc;
- hw->mix_buf->pos = (hw->mix_buf->pos + proc) % hw->mix_buf->size;
+ hw->mix_buf.pos = (hw->mix_buf.pos + proc) % hw->mix_buf.size;
if (proc == 0 || proc < decr) {
break;
@@ -1181,12 +1170,14 @@
size_t free;
if (hw_free > sw->total_hw_samples_mixed) {
- free = audio_frontend_frames_out(sw,
+ free = st_rate_frames_in(sw->rate,
MIN(sw_free, hw_free - sw->total_hw_samples_mixed));
} else {
free = 0;
}
- if (free > 0) {
+ if (free > sw->resample_buf.pos) {
+ free = MIN(free, sw->resample_buf.size)
+ - sw->resample_buf.pos;
sw->callback.fn(sw->callback.opaque,
free * sw->info.bytes_per_frame);
}
@@ -1198,8 +1189,8 @@
live = 0;
}
- if (audio_bug(__func__, live > hw->mix_buf->size)) {
- dolog("live=%zu hw->mix_buf->size=%zu\n", live, hw->mix_buf->size);
+ if (audio_bug(__func__, live > hw->mix_buf.size)) {
+ dolog("live=%zu hw->mix_buf.size=%zu\n", live, hw->mix_buf.size);
continue;
}
@@ -1227,13 +1218,13 @@
continue;
}
- prev_rpos = hw->mix_buf->pos;
+ prev_rpos = hw->mix_buf.pos;
played = audio_pcm_hw_run_out(hw, live);
replay_audio_out(&played);
- if (audio_bug(__func__, hw->mix_buf->pos >= hw->mix_buf->size)) {
- dolog("hw->mix_buf->pos=%zu hw->mix_buf->size=%zu played=%zu\n",
- hw->mix_buf->pos, hw->mix_buf->size, played);
- hw->mix_buf->pos = 0;
+ if (audio_bug(__func__, hw->mix_buf.pos >= hw->mix_buf.size)) {
+ dolog("hw->mix_buf.pos=%zu hw->mix_buf.size=%zu played=%zu\n",
+ hw->mix_buf.pos, hw->mix_buf.size, played);
+ hw->mix_buf.pos = 0;
}
#ifdef DEBUG_OUT
@@ -1314,10 +1305,10 @@
if (replay_mode != REPLAY_MODE_PLAY) {
captured = audio_pcm_hw_run_in(
- hw, hw->conv_buf->size - audio_pcm_hw_get_live_in(hw));
+ hw, hw->conv_buf.size - audio_pcm_hw_get_live_in(hw));
}
- replay_audio_in(&captured, hw->conv_buf->samples, &hw->conv_buf->pos,
- hw->conv_buf->size);
+ replay_audio_in(&captured, hw->conv_buf.buffer, &hw->conv_buf.pos,
+ hw->conv_buf.size);
min = audio_pcm_hw_find_min_in (hw);
hw->total_samples_captured += captured - min;
@@ -1330,8 +1321,9 @@
size_t sw_avail = audio_get_avail(sw);
size_t avail;
- avail = audio_frontend_frames_in(sw, sw_avail);
+ avail = st_rate_frames_out(sw->rate, sw_avail);
if (avail > 0) {
+ avail = MIN(avail, sw->resample_buf.size);
sw->callback.fn(sw->callback.opaque,
avail * sw->info.bytes_per_frame);
}
@@ -1350,14 +1342,14 @@
SWVoiceOut *sw;
captured = live = audio_pcm_hw_get_live_out (hw, NULL);
- rpos = hw->mix_buf->pos;
+ rpos = hw->mix_buf.pos;
while (live) {
- size_t left = hw->mix_buf->size - rpos;
+ size_t left = hw->mix_buf.size - rpos;
size_t to_capture = MIN(live, left);
struct st_sample *src;
struct capture_callback *cb;
- src = hw->mix_buf->samples + rpos;
+ src = hw->mix_buf.buffer + rpos;
hw->clip (cap->buf, src, to_capture);
mixeng_clear (src, to_capture);
@@ -1365,10 +1357,10 @@
cb->ops.capture (cb->opaque, cap->buf,
to_capture * hw->info.bytes_per_frame);
}
- rpos = (rpos + to_capture) % hw->mix_buf->size;
+ rpos = (rpos + to_capture) % hw->mix_buf.size;
live -= to_capture;
}
- hw->mix_buf->pos = rpos;
+ hw->mix_buf.pos = rpos;
for (sw = hw->sw_head.lh_first; sw; sw = sw->entries.le_next) {
if (!sw->active && sw->empty) {
@@ -1927,7 +1919,7 @@
audio_pcm_init_info (&hw->info, as);
- cap->buf = g_malloc0_n(hw->mix_buf->size, hw->info.bytes_per_frame);
+ cap->buf = g_malloc0_n(hw->mix_buf.size, hw->info.bytes_per_frame);
if (hw->info.is_float) {
hw->clip = mixeng_clip_float[hw->info.nchannels == 2];
@@ -1979,7 +1971,7 @@
sw = sw1;
}
QLIST_REMOVE (cap, entries);
- g_free (cap->hw.mix_buf);
+ g_free(cap->hw.mix_buf.buffer);
g_free (cap->buf);
g_free (cap);
}
diff --git a/audio/audio_int.h b/audio/audio_int.h
index e87ce01..d51d63f 100644
--- a/audio/audio_int.h
+++ b/audio/audio_int.h
@@ -58,7 +58,7 @@
typedef struct STSampleBuffer {
size_t pos, size;
- st_sample samples[];
+ st_sample *buffer;
} STSampleBuffer;
typedef struct HWVoiceOut {
@@ -71,7 +71,7 @@
f_sample *clip;
uint64_t ts_helper;
- STSampleBuffer *mix_buf;
+ STSampleBuffer mix_buf;
void *buf_emul;
size_t pos_emul, pending_emul, size_emul;
@@ -93,7 +93,7 @@
size_t total_samples_captured;
uint64_t ts_helper;
- STSampleBuffer *conv_buf;
+ STSampleBuffer conv_buf;
void *buf_emul;
size_t pos_emul, pending_emul, size_emul;
@@ -108,8 +108,7 @@
AudioState *s;
struct audio_pcm_info info;
t_sample *conv;
- int64_t ratio;
- struct st_sample *buf;
+ STSampleBuffer resample_buf;
void *rate;
size_t total_hw_samples_mixed;
int active;
@@ -126,10 +125,9 @@
AudioState *s;
int active;
struct audio_pcm_info info;
- int64_t ratio;
void *rate;
size_t total_hw_samples_acquired;
- struct st_sample *buf;
+ STSampleBuffer resample_buf;
f_sample *clip;
HWVoiceIn *hw;
char *name;
@@ -151,8 +149,8 @@
int can_be_default;
int max_voices_out;
int max_voices_in;
- int voice_size_out;
- int voice_size_in;
+ size_t voice_size_out;
+ size_t voice_size_in;
QLIST_ENTRY(audio_driver) next;
};
@@ -251,7 +249,6 @@
void audio_pcm_info_clear_buf (struct audio_pcm_info *info, void *buf, int len);
int audio_bug (const char *funcname, int cond);
-void *audio_calloc (const char *funcname, int nmemb, size_t size);
void audio_run(AudioState *s, const char *msg);
@@ -294,9 +291,6 @@
#define ldebug(fmt, ...) (void)0
#endif
-#define AUDIO_STRINGIFY_(n) #n
-#define AUDIO_STRINGIFY(n) AUDIO_STRINGIFY_(n)
-
typedef struct AudiodevListEntry {
Audiodev *dev;
QSIMPLEQ_ENTRY(AudiodevListEntry) next;
diff --git a/audio/audio_template.h b/audio/audio_template.h
index 42b4712..e42326c 100644
--- a/audio/audio_template.h
+++ b/audio/audio_template.h
@@ -40,7 +40,7 @@
struct audio_driver *drv)
{
int max_voices = glue (drv->max_voices_, TYPE);
- int voice_size = glue (drv->voice_size_, TYPE);
+ size_t voice_size = glue(drv->voice_size_, TYPE);
if (glue (s->nb_hw_voices_, TYPE) > max_voices) {
if (!max_voices) {
@@ -63,16 +63,17 @@
}
if (audio_bug(__func__, voice_size && !max_voices)) {
- dolog ("drv=`%s' voice_size=%d max_voices=0\n",
- drv->name, voice_size);
+ dolog("drv=`%s' voice_size=%zu max_voices=0\n",
+ drv->name, voice_size);
}
}
static void glue (audio_pcm_hw_free_resources_, TYPE) (HW *hw)
{
g_free(hw->buf_emul);
- g_free (HWBUF);
- HWBUF = NULL;
+ g_free(HWBUF.buffer);
+ HWBUF.buffer = NULL;
+ HWBUF.size = 0;
}
static void glue(audio_pcm_hw_alloc_resources_, TYPE)(HW *hw)
@@ -83,56 +84,67 @@
dolog("Attempted to allocate empty buffer\n");
}
- HWBUF = g_malloc0(sizeof(STSampleBuffer) + sizeof(st_sample) * samples);
- HWBUF->size = samples;
+ HWBUF.buffer = g_new0(st_sample, samples);
+ HWBUF.size = samples;
+ HWBUF.pos = 0;
} else {
- HWBUF = NULL;
+ HWBUF.buffer = NULL;
+ HWBUF.size = 0;
}
}
static void glue (audio_pcm_sw_free_resources_, TYPE) (SW *sw)
{
- g_free (sw->buf);
+ g_free(sw->resample_buf.buffer);
+ sw->resample_buf.buffer = NULL;
+ sw->resample_buf.size = 0;
if (sw->rate) {
st_rate_stop (sw->rate);
}
-
- sw->buf = NULL;
sw->rate = NULL;
}
static int glue (audio_pcm_sw_alloc_resources_, TYPE) (SW *sw)
{
- int samples;
+ HW *hw = sw->hw;
+ uint64_t samples;
if (!glue(audio_get_pdo_, TYPE)(sw->s->dev)->mixing_engine) {
return 0;
}
-#ifdef DAC
- samples = ((int64_t) sw->HWBUF->size << 32) / sw->ratio;
-#else
- samples = (int64_t)sw->HWBUF->size * sw->ratio >> 32;
-#endif
+ samples = muldiv64(HWBUF.size, sw->info.freq, hw->info.freq);
+ if (samples == 0) {
+ uint64_t f_fe_min;
+ uint64_t f_be = (uint32_t)hw->info.freq;
- sw->buf = audio_calloc(__func__, samples, sizeof(struct st_sample));
- if (!sw->buf) {
- dolog ("Could not allocate buffer for `%s' (%d samples)\n",
- SW_NAME (sw), samples);
+ /* f_fe_min = ceil(1 [frames] * f_be [Hz] / size_be [frames]) */
+ f_fe_min = (f_be + HWBUF.size - 1) / HWBUF.size;
+ qemu_log_mask(LOG_UNIMP,
+ AUDIO_CAP ": The guest selected a " NAME " sample rate"
+ " of %d Hz for %s. Only sample rates >= %" PRIu64 " Hz"
+ " are supported.\n",
+ sw->info.freq, sw->name, f_fe_min);
return -1;
}
+ /*
+ * Allocate one additional audio frame that is needed for upsampling
+ * if the resample buffer size is small. For large buffer sizes take
+ * care of overflows and truncation.
+ */
+ samples = samples < SIZE_MAX ? samples + 1 : SIZE_MAX;
+ sw->resample_buf.buffer = g_new0(st_sample, samples);
+ sw->resample_buf.size = samples;
+ sw->resample_buf.pos = 0;
+
#ifdef DAC
- sw->rate = st_rate_start (sw->info.freq, sw->hw->info.freq);
+ sw->rate = st_rate_start(sw->info.freq, hw->info.freq);
#else
- sw->rate = st_rate_start (sw->hw->info.freq, sw->info.freq);
+ sw->rate = st_rate_start(hw->info.freq, sw->info.freq);
#endif
- if (!sw->rate) {
- g_free (sw->buf);
- sw->buf = NULL;
- return -1;
- }
+
return 0;
}
@@ -149,11 +161,8 @@
sw->hw = hw;
sw->active = 0;
#ifdef DAC
- sw->ratio = ((int64_t) sw->hw->info.freq << 32) / sw->info.freq;
sw->total_hw_samples_mixed = 0;
sw->empty = 1;
-#else
- sw->ratio = ((int64_t) sw->info.freq << 32) / sw->hw->info.freq;
#endif
if (sw->info.is_float) {
@@ -264,13 +273,11 @@
return NULL;
}
- hw = audio_calloc(__func__, 1, glue(drv->voice_size_, TYPE));
- if (!hw) {
- dolog ("Can not allocate voice `%s' size %d\n",
- drv->name, glue (drv->voice_size_, TYPE));
- return NULL;
- }
-
+ /*
+ * Since glue(s->nb_hw_voices_, TYPE) is != 0, glue(drv->voice_size_, TYPE)
+ * is guaranteed to be != 0. See the audio_init_nb_voices_* functions.
+ */
+ hw = g_malloc0(glue(drv->voice_size_, TYPE));
hw->s = s;
hw->pcm_ops = drv->pcm_ops;
@@ -418,33 +425,28 @@
hw_as = *as;
}
- sw = audio_calloc(__func__, 1, sizeof(*sw));
- if (!sw) {
- dolog ("Could not allocate soft voice `%s' (%zu bytes)\n",
- sw_name ? sw_name : "unknown", sizeof (*sw));
- goto err1;
- }
+ sw = g_new0(SW, 1);
sw->s = s;
hw = glue(audio_pcm_hw_add_, TYPE)(s, &hw_as);
if (!hw) {
- goto err2;
+ dolog("Could not create a backend for voice `%s'\n", sw_name);
+ goto err1;
}
glue (audio_pcm_hw_add_sw_, TYPE) (hw, sw);
if (glue (audio_pcm_sw_init_, TYPE) (sw, hw, sw_name, as)) {
- goto err3;
+ goto err2;
}
return sw;
-err3:
+err2:
glue (audio_pcm_hw_del_sw_, TYPE) (sw);
glue (audio_pcm_hw_gc_, TYPE) (&hw);
-err2:
- g_free (sw);
err1:
+ g_free(sw);
return NULL;
}
@@ -515,8 +517,8 @@
HW *hw = sw->hw;
if (!hw) {
- dolog ("Internal logic error voice `%s' has no hardware store\n",
- SW_NAME (sw));
+ dolog("Internal logic error: voice `%s' has no backend\n",
+ SW_NAME(sw));
goto fail;
}
@@ -527,7 +529,6 @@
} else {
sw = glue(audio_pcm_create_voice_pair_, TYPE)(s, name, as);
if (!sw) {
- dolog ("Failed to create voice `%s'\n", name);
return NULL;
}
}
diff --git a/audio/mixeng.c b/audio/mixeng.c
index 100a306..69f6549 100644
--- a/audio/mixeng.c
+++ b/audio/mixeng.c
@@ -414,12 +414,7 @@
*/
void *st_rate_start (int inrate, int outrate)
{
- struct rate *rate = audio_calloc(__func__, 1, sizeof(*rate));
-
- if (!rate) {
- dolog ("Could not allocate resampler (%zu bytes)\n", sizeof (*rate));
- return NULL;
- }
+ struct rate *rate = g_new0(struct rate, 1);
rate->opos = 0;
@@ -445,6 +440,86 @@
g_free (opaque);
}
+/**
+ * st_rate_frames_out() - returns the number of frames the resampling code
+ * generates from frames_in frames
+ *
+ * @opaque: pointer to struct rate
+ * @frames_in: number of frames
+ *
+ * When upsampling, there may be more than one correct result. In this case,
+ * the function returns the maximum number of output frames the resampling
+ * code can generate.
+ */
+uint32_t st_rate_frames_out(void *opaque, uint32_t frames_in)
+{
+ struct rate *rate = opaque;
+ uint64_t opos_end, opos_delta;
+ uint32_t ipos_end;
+ uint32_t frames_out;
+
+ if (rate->opos_inc == 1ULL << 32) {
+ return frames_in;
+ }
+
+ /* no output frame without at least one input frame */
+ if (!frames_in) {
+ return 0;
+ }
+
+ /* last frame read was at rate->ipos - 1 */
+ ipos_end = rate->ipos - 1 + frames_in;
+ opos_end = (uint64_t)ipos_end << 32;
+
+ /* last frame written was at rate->opos - rate->opos_inc */
+ if (opos_end + rate->opos_inc <= rate->opos) {
+ return 0;
+ }
+ opos_delta = opos_end - rate->opos + rate->opos_inc;
+ frames_out = opos_delta / rate->opos_inc;
+
+ return opos_delta % rate->opos_inc ? frames_out : frames_out - 1;
+}
+
+/**
+ * st_rate_frames_in() - returns the number of frames needed to
+ * get frames_out frames after resampling
+ *
+ * @opaque: pointer to struct rate
+ * @frames_out: number of frames
+ *
+ * When downsampling, there may be more than one correct result. In this
+ * case, the function returns the maximum number of input frames needed.
+ */
+uint32_t st_rate_frames_in(void *opaque, uint32_t frames_out)
+{
+ struct rate *rate = opaque;
+ uint64_t opos_start, opos_end;
+ uint32_t ipos_start, ipos_end;
+
+ if (rate->opos_inc == 1ULL << 32) {
+ return frames_out;
+ }
+
+ if (frames_out) {
+ opos_start = rate->opos;
+ ipos_start = rate->ipos;
+ } else {
+ uint64_t offset;
+
+ /* add offset = ceil(opos_inc) to opos and ipos to avoid an underflow */
+ offset = (rate->opos_inc + (1ULL << 32) - 1) & ~((1ULL << 32) - 1);
+ opos_start = rate->opos + offset;
+ ipos_start = rate->ipos + (offset >> 32);
+ }
+ /* last frame written was at opos_start - rate->opos_inc */
+ opos_end = opos_start - rate->opos_inc + rate->opos_inc * frames_out;
+ ipos_end = (opos_end >> 32) + 1;
+
+ /* last frame read was at ipos_start - 1 */
+ return ipos_end + 1 > ipos_start ? ipos_end + 1 - ipos_start : 0;
+}
+
void mixeng_clear (struct st_sample *buf, int len)
{
memset (buf, 0, len * sizeof (struct st_sample));
diff --git a/audio/mixeng.h b/audio/mixeng.h
index 2dcd6df..f9de7cf 100644
--- a/audio/mixeng.h
+++ b/audio/mixeng.h
@@ -52,6 +52,8 @@
void st_rate_flow_mix(void *opaque, st_sample *ibuf, st_sample *obuf,
size_t *isamp, size_t *osamp);
void st_rate_stop (void *opaque);
+uint32_t st_rate_frames_out(void *opaque, uint32_t frames_in);
+uint32_t st_rate_frames_in(void *opaque, uint32_t frames_out);
void mixeng_clear (struct st_sample *buf, int len);
void mixeng_volume (struct st_sample *buf, int len, struct mixeng_volume *vol);
diff --git a/audio/rate_template.h b/audio/rate_template.h
index b432719..6648f0d 100644
--- a/audio/rate_template.h
+++ b/audio/rate_template.h
@@ -40,8 +40,6 @@
int64_t t;
#endif
- ilast = rate->ilast;
-
istart = ibuf;
iend = ibuf + *isamp;
@@ -59,15 +57,17 @@
return;
}
- while (obuf < oend) {
+ /* without input samples, there's nothing to do */
+ if (ibuf >= iend) {
+ *osamp = 0;
+ return;
+ }
- /* Safety catch to make sure we have input samples. */
- if (ibuf >= iend) {
- break;
- }
+ ilast = rate->ilast;
+
+ while (true) {
/* read as many input samples so that ipos > opos */
-
while (rate->ipos <= (rate->opos >> 32)) {
ilast = *ibuf++;
rate->ipos++;
@@ -78,6 +78,11 @@
}
}
+ /* make sure that the next output sample can be written */
+ if (obuf >= oend) {
+ break;
+ }
+
icur = *ibuf;
/* wrap ipos and opos around long before they overflow */
diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c
index 2f16011..4bf15f9 100644
--- a/hw/i386/pc_piix.c
+++ b/hw/i386/pc_piix.c
@@ -422,6 +422,7 @@
}
pc_xen_hvm_init_pci(machine);
+ xen_igd_reserve_slot(pcms->bus);
pci_create_simple(pcms->bus, -1, "xen-platform");
}
#endif
diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
index f25cc2c..49c1210 100644
--- a/hw/nvme/ctrl.c
+++ b/hw/nvme/ctrl.c
@@ -238,6 +238,8 @@
[NVME_TIMESTAMP] = true,
[NVME_HOST_BEHAVIOR_SUPPORT] = true,
[NVME_COMMAND_SET_PROFILE] = true,
+ [NVME_FDP_MODE] = true,
+ [NVME_FDP_EVENTS] = true,
};
static const uint32_t nvme_feature_cap[NVME_FID_MAX] = {
@@ -249,6 +251,8 @@
[NVME_TIMESTAMP] = NVME_FEAT_CAP_CHANGE,
[NVME_HOST_BEHAVIOR_SUPPORT] = NVME_FEAT_CAP_CHANGE,
[NVME_COMMAND_SET_PROFILE] = NVME_FEAT_CAP_CHANGE,
+ [NVME_FDP_MODE] = NVME_FEAT_CAP_CHANGE,
+ [NVME_FDP_EVENTS] = NVME_FEAT_CAP_CHANGE | NVME_FEAT_CAP_NS,
};
static const uint32_t nvme_cse_acs[256] = {
@@ -266,6 +270,8 @@
[NVME_ADM_CMD_VIRT_MNGMT] = NVME_CMD_EFF_CSUPP,
[NVME_ADM_CMD_DBBUF_CONFIG] = NVME_CMD_EFF_CSUPP,
[NVME_ADM_CMD_FORMAT_NVM] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
+ [NVME_ADM_CMD_DIRECTIVE_RECV] = NVME_CMD_EFF_CSUPP,
+ [NVME_ADM_CMD_DIRECTIVE_SEND] = NVME_CMD_EFF_CSUPP,
};
static const uint32_t nvme_cse_iocs_none[256];
@@ -279,6 +285,8 @@
[NVME_CMD_VERIFY] = NVME_CMD_EFF_CSUPP,
[NVME_CMD_COPY] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
[NVME_CMD_COMPARE] = NVME_CMD_EFF_CSUPP,
+ [NVME_CMD_IO_MGMT_RECV] = NVME_CMD_EFF_CSUPP,
+ [NVME_CMD_IO_MGMT_SEND] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
};
static const uint32_t nvme_cse_iocs_zoned[256] = {
@@ -297,12 +305,66 @@
static void nvme_process_sq(void *opaque);
static void nvme_ctrl_reset(NvmeCtrl *n, NvmeResetType rst);
+static inline uint64_t nvme_get_timestamp(const NvmeCtrl *n);
static uint16_t nvme_sqid(NvmeRequest *req)
{
return le16_to_cpu(req->sq->sqid);
}
+static inline uint16_t nvme_make_pid(NvmeNamespace *ns, uint16_t rg,
+ uint16_t ph)
+{
+ uint16_t rgif = ns->endgrp->fdp.rgif;
+
+ if (!rgif) {
+ return ph;
+ }
+
+ return (rg << (16 - rgif)) | ph;
+}
+
+static inline bool nvme_ph_valid(NvmeNamespace *ns, uint16_t ph)
+{
+ return ph < ns->fdp.nphs;
+}
+
+static inline bool nvme_rg_valid(NvmeEnduranceGroup *endgrp, uint16_t rg)
+{
+ return rg < endgrp->fdp.nrg;
+}
+
+static inline uint16_t nvme_pid2ph(NvmeNamespace *ns, uint16_t pid)
+{
+ uint16_t rgif = ns->endgrp->fdp.rgif;
+
+ if (!rgif) {
+ return pid;
+ }
+
+ return pid & ((1 << (15 - rgif)) - 1);
+}
+
+static inline uint16_t nvme_pid2rg(NvmeNamespace *ns, uint16_t pid)
+{
+ uint16_t rgif = ns->endgrp->fdp.rgif;
+
+ if (!rgif) {
+ return 0;
+ }
+
+ return pid >> (16 - rgif);
+}
+
+static inline bool nvme_parse_pid(NvmeNamespace *ns, uint16_t pid,
+ uint16_t *ph, uint16_t *rg)
+{
+ *rg = nvme_pid2rg(ns, pid);
+ *ph = nvme_pid2ph(ns, pid);
+
+ return nvme_ph_valid(ns, *ph) && nvme_rg_valid(ns->endgrp, *rg);
+}
+
static void nvme_assign_zone_state(NvmeNamespace *ns, NvmeZone *zone,
NvmeZoneState state)
{
@@ -376,6 +438,69 @@
return nvme_zns_check_resources(ns, act, opn, 0);
}
+static NvmeFdpEvent *nvme_fdp_alloc_event(NvmeCtrl *n, NvmeFdpEventBuffer *ebuf)
+{
+ NvmeFdpEvent *ret = NULL;
+ bool is_full = ebuf->next == ebuf->start && ebuf->nelems;
+
+ ret = &ebuf->events[ebuf->next++];
+ if (unlikely(ebuf->next == NVME_FDP_MAX_EVENTS)) {
+ ebuf->next = 0;
+ }
+ if (is_full) {
+ ebuf->start = ebuf->next;
+ } else {
+ ebuf->nelems++;
+ }
+
+ memset(ret, 0, sizeof(NvmeFdpEvent));
+ ret->timestamp = nvme_get_timestamp(n);
+
+ return ret;
+}
+
+static inline int log_event(NvmeRuHandle *ruh, uint8_t event_type)
+{
+ return (ruh->event_filter >> nvme_fdp_evf_shifts[event_type]) & 0x1;
+}
+
+static bool nvme_update_ruh(NvmeCtrl *n, NvmeNamespace *ns, uint16_t pid)
+{
+ NvmeEnduranceGroup *endgrp = ns->endgrp;
+ NvmeRuHandle *ruh;
+ NvmeReclaimUnit *ru;
+ NvmeFdpEvent *e = NULL;
+ uint16_t ph, rg, ruhid;
+
+ if (!nvme_parse_pid(ns, pid, &ph, &rg)) {
+ return false;
+ }
+
+ ruhid = ns->fdp.phs[ph];
+
+ ruh = &endgrp->fdp.ruhs[ruhid];
+ ru = &ruh->rus[rg];
+
+ if (ru->ruamw) {
+ if (log_event(ruh, FDP_EVT_RU_NOT_FULLY_WRITTEN)) {
+ e = nvme_fdp_alloc_event(n, &endgrp->fdp.host_events);
+ e->type = FDP_EVT_RU_NOT_FULLY_WRITTEN;
+ e->flags = FDPEF_PIV | FDPEF_NSIDV | FDPEF_LV;
+ e->pid = cpu_to_le16(pid);
+ e->nsid = cpu_to_le32(ns->params.nsid);
+ e->rgid = cpu_to_le16(rg);
+ e->ruhid = cpu_to_le16(ruhid);
+ }
+
+ /* log (eventual) GC overhead of prematurely swapping the RU */
+ nvme_fdp_stat_inc(&endgrp->fdp.mbmw, nvme_l2b(ns, ru->ruamw));
+ }
+
+ ru->ruamw = ruh->ruamw;
+
+ return true;
+}
+
static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr)
{
hwaddr hi, lo;
@@ -3320,6 +3445,41 @@
return status | NVME_DNR;
}
+static void nvme_do_write_fdp(NvmeCtrl *n, NvmeRequest *req, uint64_t slba,
+ uint32_t nlb)
+{
+ NvmeNamespace *ns = req->ns;
+ NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
+ uint64_t data_size = nvme_l2b(ns, nlb);
+ uint32_t dw12 = le32_to_cpu(req->cmd.cdw12);
+ uint8_t dtype = (dw12 >> 20) & 0xf;
+ uint16_t pid = le16_to_cpu(rw->dspec);
+ uint16_t ph, rg, ruhid;
+ NvmeReclaimUnit *ru;
+
+ if (dtype != NVME_DIRECTIVE_DATA_PLACEMENT ||
+ !nvme_parse_pid(ns, pid, &ph, &rg)) {
+ ph = 0;
+ rg = 0;
+ }
+
+ ruhid = ns->fdp.phs[ph];
+ ru = &ns->endgrp->fdp.ruhs[ruhid].rus[rg];
+
+ nvme_fdp_stat_inc(&ns->endgrp->fdp.hbmw, data_size);
+ nvme_fdp_stat_inc(&ns->endgrp->fdp.mbmw, data_size);
+
+ while (nlb) {
+ if (nlb < ru->ruamw) {
+ ru->ruamw -= nlb;
+ break;
+ }
+
+ nlb -= ru->ruamw;
+ nvme_update_ruh(n, ns, pid);
+ }
+}
+
static uint16_t nvme_do_write(NvmeCtrl *n, NvmeRequest *req, bool append,
bool wrz)
{
@@ -3429,6 +3589,8 @@
if (!(zone->d.za & NVME_ZA_ZRWA_VALID)) {
zone->w_ptr += nlb;
}
+ } else if (ns->endgrp && ns->endgrp->fdp.enabled) {
+ nvme_do_write_fdp(n, req, slba, nlb);
}
data_offset = nvme_l2b(ns, slba);
@@ -4086,6 +4248,126 @@
return status;
}
+static uint16_t nvme_io_mgmt_recv_ruhs(NvmeCtrl *n, NvmeRequest *req,
+ size_t len)
+{
+ NvmeNamespace *ns = req->ns;
+ NvmeEnduranceGroup *endgrp;
+ NvmeRuhStatus *hdr;
+ NvmeRuhStatusDescr *ruhsd;
+ unsigned int nruhsd;
+ uint16_t rg, ph, *ruhid;
+ size_t trans_len;
+ g_autofree uint8_t *buf = NULL;
+
+ if (!n->subsys) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ if (ns->params.nsid == 0 || ns->params.nsid == 0xffffffff) {
+ return NVME_INVALID_NSID | NVME_DNR;
+ }
+
+ if (!n->subsys->endgrp.fdp.enabled) {
+ return NVME_FDP_DISABLED | NVME_DNR;
+ }
+
+ endgrp = ns->endgrp;
+
+ nruhsd = ns->fdp.nphs * endgrp->fdp.nrg;
+ trans_len = sizeof(NvmeRuhStatus) + nruhsd * sizeof(NvmeRuhStatusDescr);
+ buf = g_malloc(trans_len);
+
+ trans_len = MIN(trans_len, len);
+
+ hdr = (NvmeRuhStatus *)buf;
+ ruhsd = (NvmeRuhStatusDescr *)(buf + sizeof(NvmeRuhStatus));
+
+ hdr->nruhsd = cpu_to_le16(nruhsd);
+
+ ruhid = ns->fdp.phs;
+
+ for (ph = 0; ph < ns->fdp.nphs; ph++, ruhid++) {
+ NvmeRuHandle *ruh = &endgrp->fdp.ruhs[*ruhid];
+
+ for (rg = 0; rg < endgrp->fdp.nrg; rg++, ruhsd++) {
+ uint16_t pid = nvme_make_pid(ns, rg, ph);
+
+ ruhsd->pid = cpu_to_le16(pid);
+ ruhsd->ruhid = *ruhid;
+ ruhsd->earutr = 0;
+ ruhsd->ruamw = cpu_to_le64(ruh->rus[rg].ruamw);
+ }
+ }
+
+ return nvme_c2h(n, buf, trans_len, req);
+}
+
+static uint16_t nvme_io_mgmt_recv(NvmeCtrl *n, NvmeRequest *req)
+{
+ NvmeCmd *cmd = &req->cmd;
+ uint32_t cdw10 = le32_to_cpu(cmd->cdw10);
+ uint32_t numd = le32_to_cpu(cmd->cdw11);
+ uint8_t mo = (cdw10 & 0xff);
+ size_t len = (numd + 1) << 2;
+
+ switch (mo) {
+ case NVME_IOMR_MO_NOP:
+ return 0;
+ case NVME_IOMR_MO_RUH_STATUS:
+ return nvme_io_mgmt_recv_ruhs(n, req, len);
+ default:
+ return NVME_INVALID_FIELD | NVME_DNR;
+ };
+}
+
+static uint16_t nvme_io_mgmt_send_ruh_update(NvmeCtrl *n, NvmeRequest *req)
+{
+ NvmeCmd *cmd = &req->cmd;
+ NvmeNamespace *ns = req->ns;
+ uint32_t cdw10 = le32_to_cpu(cmd->cdw10);
+ uint16_t ret = NVME_SUCCESS;
+ uint32_t npid = (cdw10 >> 1) + 1;
+ unsigned int i = 0;
+ g_autofree uint16_t *pids = NULL;
+ uint32_t maxnpid = n->subsys->endgrp.fdp.nrg * n->subsys->endgrp.fdp.nruh;
+
+ if (unlikely(npid >= MIN(NVME_FDP_MAXPIDS, maxnpid))) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ pids = g_new(uint16_t, npid);
+
+ ret = nvme_h2c(n, pids, npid * sizeof(uint16_t), req);
+ if (ret) {
+ return ret;
+ }
+
+ for (; i < npid; i++) {
+ if (!nvme_update_ruh(n, ns, pids[i])) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+ }
+
+ return ret;
+}
+
+static uint16_t nvme_io_mgmt_send(NvmeCtrl *n, NvmeRequest *req)
+{
+ NvmeCmd *cmd = &req->cmd;
+ uint32_t cdw10 = le32_to_cpu(cmd->cdw10);
+ uint8_t mo = (cdw10 & 0xff);
+
+ switch (mo) {
+ case NVME_IOMS_MO_NOP:
+ return 0;
+ case NVME_IOMS_MO_RUH_UPDATE:
+ return nvme_io_mgmt_send_ruh_update(n, req);
+ default:
+ return NVME_INVALID_FIELD | NVME_DNR;
+ };
+}
+
static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req)
{
NvmeNamespace *ns;
@@ -4162,6 +4444,10 @@
return nvme_zone_mgmt_send(n, req);
case NVME_CMD_ZONE_MGMT_RECV:
return nvme_zone_mgmt_recv(n, req);
+ case NVME_CMD_IO_MGMT_RECV:
+ return nvme_io_mgmt_recv(n, req);
+ case NVME_CMD_IO_MGMT_SEND:
+ return nvme_io_mgmt_send(n, req);
default:
assert(false);
}
@@ -4386,8 +4672,8 @@
{
BlockAcctStats *s = blk_get_stats(ns->blkconf.blk);
- stats->units_read += s->nr_bytes[BLOCK_ACCT_READ] >> BDRV_SECTOR_BITS;
- stats->units_written += s->nr_bytes[BLOCK_ACCT_WRITE] >> BDRV_SECTOR_BITS;
+ stats->units_read += s->nr_bytes[BLOCK_ACCT_READ];
+ stats->units_written += s->nr_bytes[BLOCK_ACCT_WRITE];
stats->read_commands += s->nr_ops[BLOCK_ACCT_READ];
stats->write_commands += s->nr_ops[BLOCK_ACCT_WRITE];
}
@@ -4401,6 +4687,7 @@
uint32_t trans_len;
NvmeNamespace *ns;
time_t current_ms;
+ uint64_t u_read, u_written;
if (off >= sizeof(smart)) {
return NVME_INVALID_FIELD | NVME_DNR;
@@ -4427,10 +4714,11 @@
trans_len = MIN(sizeof(smart) - off, buf_len);
smart.critical_warning = n->smart_critical_warning;
- smart.data_units_read[0] = cpu_to_le64(DIV_ROUND_UP(stats.units_read,
- 1000));
- smart.data_units_written[0] = cpu_to_le64(DIV_ROUND_UP(stats.units_written,
- 1000));
+ u_read = DIV_ROUND_UP(stats.units_read >> BDRV_SECTOR_BITS, 1000);
+ u_written = DIV_ROUND_UP(stats.units_written >> BDRV_SECTOR_BITS, 1000);
+
+ smart.data_units_read[0] = cpu_to_le64(u_read);
+ smart.data_units_written[0] = cpu_to_le64(u_written);
smart.host_read_commands[0] = cpu_to_le64(stats.read_commands);
smart.host_write_commands[0] = cpu_to_le64(stats.write_commands);
@@ -4452,6 +4740,48 @@
return nvme_c2h(n, (uint8_t *) &smart + off, trans_len, req);
}
+static uint16_t nvme_endgrp_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
+ uint64_t off, NvmeRequest *req)
+{
+ uint32_t dw11 = le32_to_cpu(req->cmd.cdw11);
+ uint16_t endgrpid = (dw11 >> 16) & 0xffff;
+ struct nvme_stats stats = {};
+ NvmeEndGrpLog info = {};
+ int i;
+
+ if (!n->subsys || endgrpid != 0x1) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ if (off >= sizeof(info)) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ for (i = 1; i <= NVME_MAX_NAMESPACES; i++) {
+ NvmeNamespace *ns = nvme_subsys_ns(n->subsys, i);
+ if (!ns) {
+ continue;
+ }
+
+ nvme_set_blk_stats(ns, &stats);
+ }
+
+ info.data_units_read[0] =
+ cpu_to_le64(DIV_ROUND_UP(stats.units_read / 1000000000, 1000000000));
+ info.data_units_written[0] =
+ cpu_to_le64(DIV_ROUND_UP(stats.units_written / 1000000000, 1000000000));
+ info.media_units_written[0] =
+ cpu_to_le64(DIV_ROUND_UP(stats.units_written / 1000000000, 1000000000));
+
+ info.host_read_commands[0] = cpu_to_le64(stats.read_commands);
+ info.host_write_commands[0] = cpu_to_le64(stats.write_commands);
+
+ buf_len = MIN(sizeof(info) - off, buf_len);
+
+ return nvme_c2h(n, (uint8_t *)&info + off, buf_len, req);
+}
+
+
static uint16_t nvme_fw_log_info(NvmeCtrl *n, uint32_t buf_len, uint64_t off,
NvmeRequest *req)
{
@@ -4577,6 +4907,207 @@
return nvme_c2h(n, ((uint8_t *)&log) + off, trans_len, req);
}
+static size_t sizeof_fdp_conf_descr(size_t nruh, size_t vss)
+{
+ size_t entry_siz = sizeof(NvmeFdpDescrHdr) + nruh * sizeof(NvmeRuhDescr)
+ + vss;
+ return ROUND_UP(entry_siz, 8);
+}
+
+static uint16_t nvme_fdp_confs(NvmeCtrl *n, uint32_t endgrpid, uint32_t buf_len,
+ uint64_t off, NvmeRequest *req)
+{
+ uint32_t log_size, trans_len;
+ g_autofree uint8_t *buf = NULL;
+ NvmeFdpDescrHdr *hdr;
+ NvmeRuhDescr *ruhd;
+ NvmeEnduranceGroup *endgrp;
+ NvmeFdpConfsHdr *log;
+ size_t nruh, fdp_descr_size;
+ int i;
+
+ if (endgrpid != 1 || !n->subsys) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ endgrp = &n->subsys->endgrp;
+
+ if (endgrp->fdp.enabled) {
+ nruh = endgrp->fdp.nruh;
+ } else {
+ nruh = 1;
+ }
+
+ fdp_descr_size = sizeof_fdp_conf_descr(nruh, FDPVSS);
+ log_size = sizeof(NvmeFdpConfsHdr) + fdp_descr_size;
+
+ if (off >= log_size) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ trans_len = MIN(log_size - off, buf_len);
+
+ buf = g_malloc0(log_size);
+ log = (NvmeFdpConfsHdr *)buf;
+ hdr = (NvmeFdpDescrHdr *)(log + 1);
+ ruhd = (NvmeRuhDescr *)(buf + sizeof(*log) + sizeof(*hdr));
+
+ log->num_confs = cpu_to_le16(0);
+ log->size = cpu_to_le32(log_size);
+
+ hdr->descr_size = cpu_to_le16(fdp_descr_size);
+ if (endgrp->fdp.enabled) {
+ hdr->fdpa = FIELD_DP8(hdr->fdpa, FDPA, VALID, 1);
+ hdr->fdpa = FIELD_DP8(hdr->fdpa, FDPA, RGIF, endgrp->fdp.rgif);
+ hdr->nrg = cpu_to_le16(endgrp->fdp.nrg);
+ hdr->nruh = cpu_to_le16(endgrp->fdp.nruh);
+ hdr->maxpids = cpu_to_le16(NVME_FDP_MAXPIDS - 1);
+ hdr->nnss = cpu_to_le32(NVME_MAX_NAMESPACES);
+ hdr->runs = cpu_to_le64(endgrp->fdp.runs);
+
+ for (i = 0; i < nruh; i++) {
+ ruhd->ruht = NVME_RUHT_INITIALLY_ISOLATED;
+ ruhd++;
+ }
+ } else {
+ /* 1 bit for RUH in PIF -> 2 RUHs max. */
+ hdr->nrg = cpu_to_le16(1);
+ hdr->nruh = cpu_to_le16(1);
+ hdr->maxpids = cpu_to_le16(NVME_FDP_MAXPIDS - 1);
+ hdr->nnss = cpu_to_le32(1);
+ hdr->runs = cpu_to_le64(96 * MiB);
+
+ ruhd->ruht = NVME_RUHT_INITIALLY_ISOLATED;
+ }
+
+ return nvme_c2h(n, (uint8_t *)buf + off, trans_len, req);
+}
+
+static uint16_t nvme_fdp_ruh_usage(NvmeCtrl *n, uint32_t endgrpid,
+ uint32_t dw10, uint32_t dw12,
+ uint32_t buf_len, uint64_t off,
+ NvmeRequest *req)
+{
+ NvmeRuHandle *ruh;
+ NvmeRuhuLog *hdr;
+ NvmeRuhuDescr *ruhud;
+ NvmeEnduranceGroup *endgrp;
+ g_autofree uint8_t *buf = NULL;
+ uint32_t log_size, trans_len;
+ uint16_t i;
+
+ if (endgrpid != 1 || !n->subsys) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ endgrp = &n->subsys->endgrp;
+
+ if (!endgrp->fdp.enabled) {
+ return NVME_FDP_DISABLED | NVME_DNR;
+ }
+
+ log_size = sizeof(NvmeRuhuLog) + endgrp->fdp.nruh * sizeof(NvmeRuhuDescr);
+
+ if (off >= log_size) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ trans_len = MIN(log_size - off, buf_len);
+
+ buf = g_malloc0(log_size);
+ hdr = (NvmeRuhuLog *)buf;
+ ruhud = (NvmeRuhuDescr *)(hdr + 1);
+
+ ruh = endgrp->fdp.ruhs;
+ hdr->nruh = cpu_to_le16(endgrp->fdp.nruh);
+
+ for (i = 0; i < endgrp->fdp.nruh; i++, ruhud++, ruh++) {
+ ruhud->ruha = ruh->ruha;
+ }
+
+ return nvme_c2h(n, (uint8_t *)buf + off, trans_len, req);
+}
+
+static uint16_t nvme_fdp_stats(NvmeCtrl *n, uint32_t endgrpid, uint32_t buf_len,
+ uint64_t off, NvmeRequest *req)
+{
+ NvmeEnduranceGroup *endgrp;
+ NvmeFdpStatsLog log = {};
+ uint32_t trans_len;
+
+ if (off >= sizeof(NvmeFdpStatsLog)) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ if (endgrpid != 1 || !n->subsys) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ if (!n->subsys->endgrp.fdp.enabled) {
+ return NVME_FDP_DISABLED | NVME_DNR;
+ }
+
+ endgrp = &n->subsys->endgrp;
+
+ trans_len = MIN(sizeof(log) - off, buf_len);
+
+ /* spec value is 128 bit, we only use 64 bit */
+ log.hbmw[0] = cpu_to_le64(endgrp->fdp.hbmw);
+ log.mbmw[0] = cpu_to_le64(endgrp->fdp.mbmw);
+ log.mbe[0] = cpu_to_le64(endgrp->fdp.mbe);
+
+ return nvme_c2h(n, (uint8_t *)&log + off, trans_len, req);
+}
+
+static uint16_t nvme_fdp_events(NvmeCtrl *n, uint32_t endgrpid,
+ uint32_t buf_len, uint64_t off,
+ NvmeRequest *req)
+{
+ NvmeEnduranceGroup *endgrp;
+ NvmeCmd *cmd = &req->cmd;
+ bool host_events = (cmd->cdw10 >> 8) & 0x1;
+ uint32_t log_size, trans_len;
+ NvmeFdpEventBuffer *ebuf;
+ g_autofree NvmeFdpEventsLog *elog = NULL;
+ NvmeFdpEvent *event;
+
+ if (endgrpid != 1 || !n->subsys) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ endgrp = &n->subsys->endgrp;
+
+ if (!endgrp->fdp.enabled) {
+ return NVME_FDP_DISABLED | NVME_DNR;
+ }
+
+ if (host_events) {
+ ebuf = &endgrp->fdp.host_events;
+ } else {
+ ebuf = &endgrp->fdp.ctrl_events;
+ }
+
+ log_size = sizeof(NvmeFdpEventsLog) + ebuf->nelems * sizeof(NvmeFdpEvent);
+ trans_len = MIN(log_size - off, buf_len);
+ elog = g_malloc0(log_size);
+ elog->num_events = cpu_to_le32(ebuf->nelems);
+ event = (NvmeFdpEvent *)(elog + 1);
+
+ if (ebuf->nelems && ebuf->start == ebuf->next) {
+ unsigned int nelems = (NVME_FDP_MAX_EVENTS - ebuf->start);
+ /* wrap over, copy [start;NVME_FDP_MAX_EVENTS[ and [0; next[ */
+ memcpy(event, &ebuf->events[ebuf->start],
+ sizeof(NvmeFdpEvent) * nelems);
+ memcpy(event + nelems, ebuf->events,
+ sizeof(NvmeFdpEvent) * ebuf->next);
+ } else if (ebuf->start < ebuf->next) {
+ memcpy(event, &ebuf->events[ebuf->start],
+ sizeof(NvmeFdpEvent) * (ebuf->next - ebuf->start));
+ }
+
+ return nvme_c2h(n, (uint8_t *)elog + off, trans_len, req);
+}
+
static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest *req)
{
NvmeCmd *cmd = &req->cmd;
@@ -4589,13 +5120,14 @@
uint8_t lsp = (dw10 >> 8) & 0xf;
uint8_t rae = (dw10 >> 15) & 0x1;
uint8_t csi = le32_to_cpu(cmd->cdw14) >> 24;
- uint32_t numdl, numdu;
+ uint32_t numdl, numdu, lspi;
uint64_t off, lpol, lpou;
size_t len;
uint16_t status;
numdl = (dw10 >> 16);
numdu = (dw11 & 0xffff);
+ lspi = (dw11 >> 16);
lpol = dw12;
lpou = dw13;
@@ -4624,6 +5156,16 @@
return nvme_changed_nslist(n, rae, len, off, req);
case NVME_LOG_CMD_EFFECTS:
return nvme_cmd_effects(n, csi, len, off, req);
+ case NVME_LOG_ENDGRP:
+ return nvme_endgrp_info(n, rae, len, off, req);
+ case NVME_LOG_FDP_CONFS:
+ return nvme_fdp_confs(n, lspi, len, off, req);
+ case NVME_LOG_FDP_RUH_USAGE:
+ return nvme_fdp_ruh_usage(n, lspi, dw10, dw12, len, off, req);
+ case NVME_LOG_FDP_STATS:
+ return nvme_fdp_stats(n, lspi, len, off, req);
+ case NVME_LOG_FDP_EVENTS:
+ return nvme_fdp_events(n, lspi, len, off, req);
default:
trace_pci_nvme_err_invalid_log_page(nvme_cid(req), lid);
return NVME_INVALID_FIELD | NVME_DNR;
@@ -5210,6 +5752,84 @@
return nvme_c2h(n, (uint8_t *)×tamp, sizeof(timestamp), req);
}
+static int nvme_get_feature_fdp(NvmeCtrl *n, uint32_t endgrpid,
+ uint32_t *result)
+{
+ *result = 0;
+
+ if (!n->subsys || !n->subsys->endgrp.fdp.enabled) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ *result = FIELD_DP16(0, FEAT_FDP, FDPE, 1);
+ *result = FIELD_DP16(*result, FEAT_FDP, CONF_NDX, 0);
+
+ return NVME_SUCCESS;
+}
+
+static uint16_t nvme_get_feature_fdp_events(NvmeCtrl *n, NvmeNamespace *ns,
+ NvmeRequest *req, uint32_t *result)
+{
+ NvmeCmd *cmd = &req->cmd;
+ uint32_t cdw11 = le32_to_cpu(cmd->cdw11);
+ uint16_t ph = cdw11 & 0xffff;
+ uint8_t noet = (cdw11 >> 16) & 0xff;
+ uint16_t ruhid, ret;
+ uint32_t nentries = 0;
+ uint8_t s_events_ndx = 0;
+ size_t s_events_siz = sizeof(NvmeFdpEventDescr) * noet;
+ g_autofree NvmeFdpEventDescr *s_events = g_malloc0(s_events_siz);
+ NvmeRuHandle *ruh;
+ NvmeFdpEventDescr *s_event;
+
+ if (!n->subsys || !n->subsys->endgrp.fdp.enabled) {
+ return NVME_FDP_DISABLED | NVME_DNR;
+ }
+
+ if (!nvme_ph_valid(ns, ph)) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ ruhid = ns->fdp.phs[ph];
+ ruh = &n->subsys->endgrp.fdp.ruhs[ruhid];
+
+ assert(ruh);
+
+ if (unlikely(noet == 0)) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ for (uint8_t event_type = 0; event_type < FDP_EVT_MAX; event_type++) {
+ uint8_t shift = nvme_fdp_evf_shifts[event_type];
+ if (!shift && event_type) {
+ /*
+ * only first entry (event_type == 0) has a shift value of 0
+ * other entries are simply unpopulated.
+ */
+ continue;
+ }
+
+ nentries++;
+
+ s_event = &s_events[s_events_ndx];
+ s_event->evt = event_type;
+ s_event->evta = (ruh->event_filter >> shift) & 0x1;
+
+ /* break if all `noet` entries are filled */
+ if ((++s_events_ndx) == noet) {
+ break;
+ }
+ }
+
+ ret = nvme_c2h(n, s_events, s_events_siz, req);
+ if (ret) {
+ return ret;
+ }
+
+ *result = nentries;
+ return NVME_SUCCESS;
+}
+
static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeRequest *req)
{
NvmeCmd *cmd = &req->cmd;
@@ -5222,6 +5842,7 @@
uint16_t iv;
NvmeNamespace *ns;
int i;
+ uint16_t endgrpid = 0, ret = NVME_SUCCESS;
static const uint32_t nvme_feature_default[NVME_FID_MAX] = {
[NVME_ARBITRATION] = NVME_ARB_AB_NOLIMIT,
@@ -5319,6 +5940,33 @@
case NVME_HOST_BEHAVIOR_SUPPORT:
return nvme_c2h(n, (uint8_t *)&n->features.hbs,
sizeof(n->features.hbs), req);
+ case NVME_FDP_MODE:
+ endgrpid = dw11 & 0xff;
+
+ if (endgrpid != 0x1) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ ret = nvme_get_feature_fdp(n, endgrpid, &result);
+ if (ret) {
+ return ret;
+ }
+ goto out;
+ case NVME_FDP_EVENTS:
+ if (!nvme_nsid_valid(n, nsid)) {
+ return NVME_INVALID_NSID | NVME_DNR;
+ }
+
+ ns = nvme_ns(n, nsid);
+ if (unlikely(!ns)) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ ret = nvme_get_feature_fdp_events(n, ns, req, &result);
+ if (ret) {
+ return ret;
+ }
+ goto out;
default:
break;
}
@@ -5352,6 +6000,20 @@
result |= NVME_INTVC_NOCOALESCING;
}
break;
+ case NVME_FDP_MODE:
+ endgrpid = dw11 & 0xff;
+
+ if (endgrpid != 0x1) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ ret = nvme_get_feature_fdp(n, endgrpid, &result);
+ if (ret) {
+ return ret;
+ }
+ goto out;
+
+ break;
default:
result = nvme_feature_default[fid];
break;
@@ -5359,7 +6021,7 @@
out:
req->cqe.result = cpu_to_le32(result);
- return NVME_SUCCESS;
+ return ret;
}
static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, NvmeRequest *req)
@@ -5377,6 +6039,51 @@
return NVME_SUCCESS;
}
+static uint16_t nvme_set_feature_fdp_events(NvmeCtrl *n, NvmeNamespace *ns,
+ NvmeRequest *req)
+{
+ NvmeCmd *cmd = &req->cmd;
+ uint32_t cdw11 = le32_to_cpu(cmd->cdw11);
+ uint16_t ph = cdw11 & 0xffff;
+ uint8_t noet = (cdw11 >> 16) & 0xff;
+ uint16_t ret, ruhid;
+ uint8_t enable = le32_to_cpu(cmd->cdw12) & 0x1;
+ uint8_t event_mask = 0;
+ unsigned int i;
+ g_autofree uint8_t *events = g_malloc0(noet);
+ NvmeRuHandle *ruh = NULL;
+
+ assert(ns);
+
+ if (!n->subsys || !n->subsys->endgrp.fdp.enabled) {
+ return NVME_FDP_DISABLED | NVME_DNR;
+ }
+
+ if (!nvme_ph_valid(ns, ph)) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ ruhid = ns->fdp.phs[ph];
+ ruh = &n->subsys->endgrp.fdp.ruhs[ruhid];
+
+ ret = nvme_h2c(n, events, noet, req);
+ if (ret) {
+ return ret;
+ }
+
+ for (i = 0; i < noet; i++) {
+ event_mask |= (1 << nvme_fdp_evf_shifts[events[i]]);
+ }
+
+ if (enable) {
+ ruh->event_filter |= event_mask;
+ } else {
+ ruh->event_filter = ruh->event_filter & ~event_mask;
+ }
+
+ return NVME_SUCCESS;
+}
+
static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeRequest *req)
{
NvmeNamespace *ns = NULL;
@@ -5536,6 +6243,11 @@
return NVME_CMD_SET_CMB_REJECTED | NVME_DNR;
}
break;
+ case NVME_FDP_MODE:
+ /* spec: abort with cmd seq err if there's one or more NS' in endgrp */
+ return NVME_CMD_SEQ_ERROR | NVME_DNR;
+ case NVME_FDP_EVENTS:
+ return nvme_set_feature_fdp_events(n, ns, req);
default:
return NVME_FEAT_NOT_CHANGEABLE | NVME_DNR;
}
@@ -6104,6 +6816,61 @@
return NVME_SUCCESS;
}
+static uint16_t nvme_directive_send(NvmeCtrl *n, NvmeRequest *req)
+{
+ return NVME_INVALID_FIELD | NVME_DNR;
+}
+
+static uint16_t nvme_directive_receive(NvmeCtrl *n, NvmeRequest *req)
+{
+ NvmeNamespace *ns;
+ uint32_t dw10 = le32_to_cpu(req->cmd.cdw10);
+ uint32_t dw11 = le32_to_cpu(req->cmd.cdw11);
+ uint32_t nsid = le32_to_cpu(req->cmd.nsid);
+ uint8_t doper, dtype;
+ uint32_t numd, trans_len;
+ NvmeDirectiveIdentify id = {
+ .supported = 1 << NVME_DIRECTIVE_IDENTIFY,
+ .enabled = 1 << NVME_DIRECTIVE_IDENTIFY,
+ };
+
+ numd = dw10 + 1;
+ doper = dw11 & 0xff;
+ dtype = (dw11 >> 8) & 0xff;
+
+ trans_len = MIN(sizeof(NvmeDirectiveIdentify), numd << 2);
+
+ if (nsid == NVME_NSID_BROADCAST || dtype != NVME_DIRECTIVE_IDENTIFY ||
+ doper != NVME_DIRECTIVE_RETURN_PARAMS) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ ns = nvme_ns(n, nsid);
+ if (!ns) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ switch (dtype) {
+ case NVME_DIRECTIVE_IDENTIFY:
+ switch (doper) {
+ case NVME_DIRECTIVE_RETURN_PARAMS:
+ if (ns->endgrp->fdp.enabled) {
+ id.supported |= 1 << NVME_DIRECTIVE_DATA_PLACEMENT;
+ id.enabled |= 1 << NVME_DIRECTIVE_DATA_PLACEMENT;
+ id.persistent |= 1 << NVME_DIRECTIVE_DATA_PLACEMENT;
+ }
+
+ return nvme_c2h(n, (uint8_t *)&id, trans_len, req);
+
+ default:
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ default:
+ return NVME_INVALID_FIELD;
+ }
+}
+
static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req)
{
trace_pci_nvme_admin_cmd(nvme_cid(req), nvme_sqid(req), req->cmd.opcode,
@@ -6152,6 +6919,10 @@
return nvme_dbbuf_config(n, req);
case NVME_ADM_CMD_FORMAT_NVM:
return nvme_format(n, req);
+ case NVME_ADM_CMD_DIRECTIVE_SEND:
+ return nvme_directive_send(n, req);
+ case NVME_ADM_CMD_DIRECTIVE_RECV:
+ return nvme_directive_receive(n, req);
default:
assert(false);
}
@@ -7380,6 +8151,7 @@
uint8_t *pci_conf = pci_dev->config;
uint64_t cap = ldq_le_p(&n->bar.cap);
NvmeSecCtrlEntry *sctrl = nvme_sctrl(n);
+ uint32_t ctratt;
id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID));
id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID));
@@ -7390,7 +8162,7 @@
id->cntlid = cpu_to_le16(n->cntlid);
id->oaes = cpu_to_le32(NVME_OAES_NS_ATTR);
- id->ctratt |= cpu_to_le32(NVME_CTRATT_ELBAS);
+ ctratt = NVME_CTRATT_ELBAS;
id->rab = 6;
@@ -7407,7 +8179,8 @@
id->mdts = n->params.mdts;
id->ver = cpu_to_le32(NVME_SPEC_VER);
id->oacs =
- cpu_to_le16(NVME_OACS_NS_MGMT | NVME_OACS_FORMAT | NVME_OACS_DBBUF);
+ cpu_to_le16(NVME_OACS_NS_MGMT | NVME_OACS_FORMAT | NVME_OACS_DBBUF |
+ NVME_OACS_DIRECTIVES);
id->cntrltype = 0x1;
/*
@@ -7457,8 +8230,17 @@
if (n->subsys) {
id->cmic |= NVME_CMIC_MULTI_CTRL;
+ ctratt |= NVME_CTRATT_ENDGRPS;
+
+ id->endgidmax = cpu_to_le16(0x1);
+
+ if (n->subsys->endgrp.fdp.enabled) {
+ ctratt |= NVME_CTRATT_FDPS;
+ }
}
+ id->ctratt = cpu_to_le32(ctratt);
+
NVME_CAP_SET_MQES(cap, 0x7ff);
NVME_CAP_SET_CQR(cap, 1);
NVME_CAP_SET_TO(cap, 0xf);
diff --git a/hw/nvme/ns.c b/hw/nvme/ns.c
index 62a1f97..cfac960 100644
--- a/hw/nvme/ns.c
+++ b/hw/nvme/ns.c
@@ -14,8 +14,10 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
+#include "qemu/cutils.h"
#include "qemu/error-report.h"
#include "qapi/error.h"
+#include "qemu/bitops.h"
#include "sysemu/sysemu.h"
#include "sysemu/block-backend.h"
@@ -377,6 +379,130 @@
assert(ns->nr_open_zones == 0);
}
+static NvmeRuHandle *nvme_find_ruh_by_attr(NvmeEnduranceGroup *endgrp,
+ uint8_t ruha, uint16_t *ruhid)
+{
+ for (uint16_t i = 0; i < endgrp->fdp.nruh; i++) {
+ NvmeRuHandle *ruh = &endgrp->fdp.ruhs[i];
+
+ if (ruh->ruha == ruha) {
+ *ruhid = i;
+ return ruh;
+ }
+ }
+
+ return NULL;
+}
+
+static bool nvme_ns_init_fdp(NvmeNamespace *ns, Error **errp)
+{
+ NvmeEnduranceGroup *endgrp = ns->endgrp;
+ NvmeRuHandle *ruh;
+ uint8_t lbafi = NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas);
+ unsigned int *ruhid, *ruhids;
+ char *r, *p, *token;
+ uint16_t *ph;
+
+ if (!ns->params.fdp.ruhs) {
+ ns->fdp.nphs = 1;
+ ph = ns->fdp.phs = g_new(uint16_t, 1);
+
+ ruh = nvme_find_ruh_by_attr(endgrp, NVME_RUHA_CTRL, ph);
+ if (!ruh) {
+ ruh = nvme_find_ruh_by_attr(endgrp, NVME_RUHA_UNUSED, ph);
+ if (!ruh) {
+ error_setg(errp, "no unused reclaim unit handles left");
+ return false;
+ }
+
+ ruh->ruha = NVME_RUHA_CTRL;
+ ruh->lbafi = lbafi;
+ ruh->ruamw = endgrp->fdp.runs >> ns->lbaf.ds;
+
+ for (uint16_t rg = 0; rg < endgrp->fdp.nrg; rg++) {
+ ruh->rus[rg].ruamw = ruh->ruamw;
+ }
+ } else if (ruh->lbafi != lbafi) {
+ error_setg(errp, "lba format index of controller assigned "
+ "reclaim unit handle does not match namespace lba "
+ "format index");
+ return false;
+ }
+
+ return true;
+ }
+
+ ruhid = ruhids = g_new0(unsigned int, endgrp->fdp.nruh);
+ r = p = strdup(ns->params.fdp.ruhs);
+
+ /* parse the placement handle identifiers */
+ while ((token = qemu_strsep(&p, ";")) != NULL) {
+ ns->fdp.nphs += 1;
+ if (ns->fdp.nphs > NVME_FDP_MAXPIDS ||
+ ns->fdp.nphs == endgrp->fdp.nruh) {
+ error_setg(errp, "too many placement handles");
+ free(r);
+ return false;
+ }
+
+ if (qemu_strtoui(token, NULL, 0, ruhid++) < 0) {
+ error_setg(errp, "cannot parse reclaim unit handle identifier");
+ free(r);
+ return false;
+ }
+ }
+
+ free(r);
+
+ ph = ns->fdp.phs = g_new(uint16_t, ns->fdp.nphs);
+
+ ruhid = ruhids;
+
+ /* verify the identifiers */
+ for (unsigned int i = 0; i < ns->fdp.nphs; i++, ruhid++, ph++) {
+ if (*ruhid >= endgrp->fdp.nruh) {
+ error_setg(errp, "invalid reclaim unit handle identifier");
+ return false;
+ }
+
+ ruh = &endgrp->fdp.ruhs[*ruhid];
+
+ switch (ruh->ruha) {
+ case NVME_RUHA_UNUSED:
+ ruh->ruha = NVME_RUHA_HOST;
+ ruh->lbafi = lbafi;
+ ruh->ruamw = endgrp->fdp.runs >> ns->lbaf.ds;
+
+ for (uint16_t rg = 0; rg < endgrp->fdp.nrg; rg++) {
+ ruh->rus[rg].ruamw = ruh->ruamw;
+ }
+
+ break;
+
+ case NVME_RUHA_HOST:
+ if (ruh->lbafi != lbafi) {
+ error_setg(errp, "lba format index of host assigned"
+ "reclaim unit handle does not match namespace "
+ "lba format index");
+ return false;
+ }
+
+ break;
+
+ case NVME_RUHA_CTRL:
+ error_setg(errp, "reclaim unit handle is controller assigned");
+ return false;
+
+ default:
+ abort();
+ }
+
+ *ph = *ruhid;
+ }
+
+ return true;
+}
+
static int nvme_ns_check_constraints(NvmeNamespace *ns, Error **errp)
{
unsigned int pi_size;
@@ -417,6 +543,11 @@
return -1;
}
+ if (ns->params.zoned && ns->endgrp && ns->endgrp->fdp.enabled) {
+ error_setg(errp, "cannot be a zoned- in an FDP configuration");
+ return -1;
+ }
+
if (ns->params.zoned) {
if (ns->params.max_active_zones) {
if (ns->params.max_open_zones > ns->params.max_active_zones) {
@@ -502,6 +633,12 @@
nvme_ns_init_zoned(ns);
}
+ if (ns->endgrp && ns->endgrp->fdp.enabled) {
+ if (!nvme_ns_init_fdp(ns, errp)) {
+ return -1;
+ }
+ }
+
return 0;
}
@@ -525,6 +662,10 @@
g_free(ns->zone_array);
g_free(ns->zd_extensions);
}
+
+ if (ns->endgrp && ns->endgrp->fdp.enabled) {
+ g_free(ns->fdp.phs);
+ }
}
static void nvme_ns_unrealize(DeviceState *dev)
@@ -561,6 +702,8 @@
if (!qdev_set_parent_bus(dev, &subsys->bus.parent_bus, errp)) {
return;
}
+ ns->subsys = subsys;
+ ns->endgrp = &subsys->endgrp;
}
if (nvme_ns_setup(ns, errp)) {
@@ -591,6 +734,8 @@
if (subsys) {
subsys->namespaces[nsid] = ns;
+ ns->id_ns.endgid = cpu_to_le16(0x1);
+
if (ns->params.detached) {
return;
}
@@ -606,6 +751,7 @@
return;
}
+
}
nvme_attach_ns(n, ns);
@@ -644,6 +790,7 @@
DEFINE_PROP_SIZE("zoned.zrwafg", NvmeNamespace, params.zrwafg, -1),
DEFINE_PROP_BOOL("eui64-default", NvmeNamespace, params.eui64_default,
false),
+ DEFINE_PROP_STRING("fdp.ruhs", NvmeNamespace, params.fdp.ruhs),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/nvme/nvme.h b/hw/nvme/nvme.h
index 16da27a..209e8f5 100644
--- a/hw/nvme/nvme.h
+++ b/hw/nvme/nvme.h
@@ -27,6 +27,8 @@
#define NVME_MAX_CONTROLLERS 256
#define NVME_MAX_NAMESPACES 256
#define NVME_EUI64_DEFAULT ((uint64_t)0x5254000000000000)
+#define NVME_FDP_MAX_EVENTS 63
+#define NVME_FDP_MAXPIDS 128
QEMU_BUILD_BUG_ON(NVME_MAX_NAMESPACES > NVME_NSID_BROADCAST - 1);
@@ -45,17 +47,68 @@
OBJECT_CHECK(NvmeSubsystem, (obj), TYPE_NVME_SUBSYS)
#define SUBSYS_SLOT_RSVD (void *)0xFFFF
+typedef struct NvmeReclaimUnit {
+ uint64_t ruamw;
+} NvmeReclaimUnit;
+
+typedef struct NvmeRuHandle {
+ uint8_t ruht;
+ uint8_t ruha;
+ uint64_t event_filter;
+ uint8_t lbafi;
+ uint64_t ruamw;
+
+ /* reclaim units indexed by reclaim group */
+ NvmeReclaimUnit *rus;
+} NvmeRuHandle;
+
+typedef struct NvmeFdpEventBuffer {
+ NvmeFdpEvent events[NVME_FDP_MAX_EVENTS];
+ unsigned int nelems;
+ unsigned int start;
+ unsigned int next;
+} NvmeFdpEventBuffer;
+
+typedef struct NvmeEnduranceGroup {
+ uint8_t event_conf;
+
+ struct {
+ NvmeFdpEventBuffer host_events, ctrl_events;
+
+ uint16_t nruh;
+ uint16_t nrg;
+ uint8_t rgif;
+ uint64_t runs;
+
+ uint64_t hbmw;
+ uint64_t mbmw;
+ uint64_t mbe;
+
+ bool enabled;
+
+ NvmeRuHandle *ruhs;
+ } fdp;
+} NvmeEnduranceGroup;
+
typedef struct NvmeSubsystem {
DeviceState parent_obj;
NvmeBus bus;
uint8_t subnqn[256];
char *serial;
- NvmeCtrl *ctrls[NVME_MAX_CONTROLLERS];
- NvmeNamespace *namespaces[NVME_MAX_NAMESPACES + 1];
+ NvmeCtrl *ctrls[NVME_MAX_CONTROLLERS];
+ NvmeNamespace *namespaces[NVME_MAX_NAMESPACES + 1];
+ NvmeEnduranceGroup endgrp;
struct {
char *nqn;
+
+ struct {
+ bool enabled;
+ uint64_t runs;
+ uint16_t nruh;
+ uint32_t nrg;
+ } fdp;
} params;
} NvmeSubsystem;
@@ -96,6 +149,21 @@
QTAILQ_ENTRY(NvmeZone) entry;
} NvmeZone;
+#define FDP_EVT_MAX 0xff
+#define NVME_FDP_MAX_NS_RUHS 32u
+#define FDPVSS 0
+
+static const uint8_t nvme_fdp_evf_shifts[FDP_EVT_MAX] = {
+ /* Host events */
+ [FDP_EVT_RU_NOT_FULLY_WRITTEN] = 0,
+ [FDP_EVT_RU_ATL_EXCEEDED] = 1,
+ [FDP_EVT_CTRL_RESET_RUH] = 2,
+ [FDP_EVT_INVALID_PID] = 3,
+ /* CTRL events */
+ [FDP_EVT_MEDIA_REALLOC] = 32,
+ [FDP_EVT_RUH_IMPLICIT_RU_CHANGE] = 33,
+};
+
typedef struct NvmeNamespaceParams {
bool detached;
bool shared;
@@ -125,6 +193,10 @@
uint32_t numzrwa;
uint64_t zrwas;
uint64_t zrwafg;
+
+ struct {
+ char *ruhs;
+ } fdp;
} NvmeNamespaceParams;
typedef struct NvmeNamespace {
@@ -167,10 +239,18 @@
int32_t nr_active_zones;
NvmeNamespaceParams params;
+ NvmeSubsystem *subsys;
+ NvmeEnduranceGroup *endgrp;
struct {
uint32_t err_rec;
} features;
+
+ struct {
+ uint16_t nphs;
+ /* reclaim unit handle identifiers indexed by placement handle */
+ uint16_t *phs;
+ } fdp;
} NvmeNamespace;
static inline uint32_t nvme_nsid(NvmeNamespace *ns)
@@ -274,6 +354,12 @@
assert(ns->nr_active_zones >= 0);
}
+static inline void nvme_fdp_stat_inc(uint64_t *a, uint64_t b)
+{
+ uint64_t ret = *a + b;
+ *a = ret < *a ? UINT64_MAX : ret;
+}
+
void nvme_ns_init_format(NvmeNamespace *ns);
int nvme_ns_setup(NvmeNamespace *ns, Error **errp);
void nvme_ns_drain(NvmeNamespace *ns);
@@ -340,7 +426,9 @@
case NVME_ADM_CMD_GET_FEATURES: return "NVME_ADM_CMD_GET_FEATURES";
case NVME_ADM_CMD_ASYNC_EV_REQ: return "NVME_ADM_CMD_ASYNC_EV_REQ";
case NVME_ADM_CMD_NS_ATTACHMENT: return "NVME_ADM_CMD_NS_ATTACHMENT";
+ case NVME_ADM_CMD_DIRECTIVE_SEND: return "NVME_ADM_CMD_DIRECTIVE_SEND";
case NVME_ADM_CMD_VIRT_MNGMT: return "NVME_ADM_CMD_VIRT_MNGMT";
+ case NVME_ADM_CMD_DIRECTIVE_RECV: return "NVME_ADM_CMD_DIRECTIVE_RECV";
case NVME_ADM_CMD_DBBUF_CONFIG: return "NVME_ADM_CMD_DBBUF_CONFIG";
case NVME_ADM_CMD_FORMAT_NVM: return "NVME_ADM_CMD_FORMAT_NVM";
default: return "NVME_ADM_CMD_UNKNOWN";
diff --git a/hw/nvme/subsys.c b/hw/nvme/subsys.c
index 9d26436..24ddec8 100644
--- a/hw/nvme/subsys.c
+++ b/hw/nvme/subsys.c
@@ -7,10 +7,13 @@
*/
#include "qemu/osdep.h"
+#include "qemu/units.h"
#include "qapi/error.h"
#include "nvme.h"
+#define NVME_DEFAULT_RU_SIZE (96 * MiB)
+
static int nvme_subsys_reserve_cntlids(NvmeCtrl *n, int start, int num)
{
NvmeSubsystem *subsys = n->subsys;
@@ -109,13 +112,95 @@
n->cntlid = -1;
}
-static void nvme_subsys_setup(NvmeSubsystem *subsys)
+static bool nvme_calc_rgif(uint16_t nruh, uint16_t nrg, uint8_t *rgif)
+{
+ uint16_t val;
+ unsigned int i;
+
+ if (unlikely(nrg == 1)) {
+ /* PIDRG_NORGI scenario, all of pid is used for PHID */
+ *rgif = 0;
+ return true;
+ }
+
+ val = nrg;
+ i = 0;
+ while (val) {
+ val >>= 1;
+ i++;
+ }
+ *rgif = i;
+
+ /* ensure remaining bits suffice to represent number of phids in a RG */
+ if (unlikely((UINT16_MAX >> i) < nruh)) {
+ *rgif = 0;
+ return false;
+ }
+
+ return true;
+}
+
+static bool nvme_subsys_setup_fdp(NvmeSubsystem *subsys, Error **errp)
+{
+ NvmeEnduranceGroup *endgrp = &subsys->endgrp;
+
+ if (!subsys->params.fdp.runs) {
+ error_setg(errp, "fdp.runs must be non-zero");
+ return false;
+ }
+
+ endgrp->fdp.runs = subsys->params.fdp.runs;
+
+ if (!subsys->params.fdp.nrg) {
+ error_setg(errp, "fdp.nrg must be non-zero");
+ return false;
+ }
+
+ endgrp->fdp.nrg = subsys->params.fdp.nrg;
+
+ if (!subsys->params.fdp.nruh) {
+ error_setg(errp, "fdp.nruh must be non-zero");
+ return false;
+ }
+
+ endgrp->fdp.nruh = subsys->params.fdp.nruh;
+
+ if (!nvme_calc_rgif(endgrp->fdp.nruh, endgrp->fdp.nrg, &endgrp->fdp.rgif)) {
+ error_setg(errp,
+ "cannot derive a valid rgif (nruh %"PRIu16" nrg %"PRIu32")",
+ endgrp->fdp.nruh, endgrp->fdp.nrg);
+ return false;
+ }
+
+ endgrp->fdp.ruhs = g_new(NvmeRuHandle, endgrp->fdp.nruh);
+
+ for (uint16_t ruhid = 0; ruhid < endgrp->fdp.nruh; ruhid++) {
+ endgrp->fdp.ruhs[ruhid] = (NvmeRuHandle) {
+ .ruht = NVME_RUHT_INITIALLY_ISOLATED,
+ .ruha = NVME_RUHA_UNUSED,
+ };
+
+ endgrp->fdp.ruhs[ruhid].rus = g_new(NvmeReclaimUnit, endgrp->fdp.nrg);
+ }
+
+ endgrp->fdp.enabled = true;
+
+ return true;
+}
+
+static bool nvme_subsys_setup(NvmeSubsystem *subsys, Error **errp)
{
const char *nqn = subsys->params.nqn ?
subsys->params.nqn : subsys->parent_obj.id;
snprintf((char *)subsys->subnqn, sizeof(subsys->subnqn),
"nqn.2019-08.org.qemu:%s", nqn);
+
+ if (subsys->params.fdp.enabled && !nvme_subsys_setup_fdp(subsys, errp)) {
+ return false;
+ }
+
+ return true;
}
static void nvme_subsys_realize(DeviceState *dev, Error **errp)
@@ -124,11 +209,16 @@
qbus_init(&subsys->bus, sizeof(NvmeBus), TYPE_NVME_BUS, dev, dev->id);
- nvme_subsys_setup(subsys);
+ nvme_subsys_setup(subsys, errp);
}
static Property nvme_subsystem_props[] = {
DEFINE_PROP_STRING("nqn", NvmeSubsystem, params.nqn),
+ DEFINE_PROP_BOOL("fdp", NvmeSubsystem, params.fdp.enabled, false),
+ DEFINE_PROP_SIZE("fdp.runs", NvmeSubsystem, params.fdp.runs,
+ NVME_DEFAULT_RU_SIZE),
+ DEFINE_PROP_UINT32("fdp.nrg", NvmeSubsystem, params.fdp.nrg, 1),
+ DEFINE_PROP_UINT16("fdp.nruh", NvmeSubsystem, params.fdp.nruh, 0),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/nvme/trace-events b/hw/nvme/trace-events
index b16f226..7f7837e 100644
--- a/hw/nvme/trace-events
+++ b/hw/nvme/trace-events
@@ -117,6 +117,7 @@
pci_nvme_zoned_zrwa_implicit_flush(uint64_t zslba, uint32_t nlb) "zslba 0x%"PRIx64" nlb %"PRIu32""
pci_nvme_pci_reset(void) "PCI Function Level Reset"
pci_nvme_virt_mngmt(uint16_t cid, uint16_t act, uint16_t cntlid, const char* rt, uint16_t nr) "cid %"PRIu16", act=0x%"PRIx16", ctrlid=%"PRIu16" %s nr=%"PRIu16""
+pci_nvme_fdp_ruh_change(uint16_t rgid, uint16_t ruhid) "change RU on RUH rgid=%"PRIu16", ruhid=%"PRIu16""
# error conditions
pci_nvme_err_mdts(size_t len) "len %zu"
diff --git a/hw/xen/xen_pt.c b/hw/xen/xen_pt.c
index 8db0532..85c93cf 100644
--- a/hw/xen/xen_pt.c
+++ b/hw/xen/xen_pt.c
@@ -57,6 +57,7 @@
#include <sys/ioctl.h>
#include "hw/pci/pci.h"
+#include "hw/pci/pci_bus.h"
#include "hw/qdev-properties.h"
#include "hw/qdev-properties-system.h"
#include "hw/xen/xen.h"
@@ -780,15 +781,6 @@
s->hostaddr.bus, s->hostaddr.slot, s->hostaddr.function,
s->dev.devfn);
- xen_host_pci_device_get(&s->real_device,
- s->hostaddr.domain, s->hostaddr.bus,
- s->hostaddr.slot, s->hostaddr.function,
- errp);
- if (*errp) {
- error_append_hint(errp, "Failed to \"open\" the real pci device");
- return;
- }
-
s->is_virtfn = s->real_device.is_virtfn;
if (s->is_virtfn) {
XEN_PT_LOG(d, "%04x:%02x:%02x.%d is a SR-IOV Virtual Function\n",
@@ -803,8 +795,10 @@
s->io_listener = xen_pt_io_listener;
/* Setup VGA bios for passthrough GFX */
- if ((s->real_device.domain == 0) && (s->real_device.bus == 0) &&
- (s->real_device.dev == 2) && (s->real_device.func == 0)) {
+ if ((s->real_device.domain == XEN_PCI_IGD_DOMAIN) &&
+ (s->real_device.bus == XEN_PCI_IGD_BUS) &&
+ (s->real_device.dev == XEN_PCI_IGD_DEV) &&
+ (s->real_device.func == XEN_PCI_IGD_FN)) {
if (!is_igd_vga_passthrough(&s->real_device)) {
error_setg(errp, "Need to enable igd-passthru if you're trying"
" to passthrough IGD GFX");
@@ -950,11 +944,58 @@
PCI_DEVICE(obj)->cap_present |= QEMU_PCI_CAP_EXPRESS;
}
+void xen_igd_reserve_slot(PCIBus *pci_bus)
+{
+ if (!xen_igd_gfx_pt_enabled()) {
+ return;
+ }
+
+ XEN_PT_LOG(0, "Reserving PCI slot 2 for IGD\n");
+ pci_bus->slot_reserved_mask |= XEN_PCI_IGD_SLOT_MASK;
+}
+
+static void xen_igd_clear_slot(DeviceState *qdev, Error **errp)
+{
+ ERRP_GUARD();
+ PCIDevice *pci_dev = (PCIDevice *)qdev;
+ XenPCIPassthroughState *s = XEN_PT_DEVICE(pci_dev);
+ XenPTDeviceClass *xpdc = XEN_PT_DEVICE_GET_CLASS(s);
+ PCIBus *pci_bus = pci_get_bus(pci_dev);
+
+ xen_host_pci_device_get(&s->real_device,
+ s->hostaddr.domain, s->hostaddr.bus,
+ s->hostaddr.slot, s->hostaddr.function,
+ errp);
+ if (*errp) {
+ error_append_hint(errp, "Failed to \"open\" the real pci device");
+ return;
+ }
+
+ if (!(pci_bus->slot_reserved_mask & XEN_PCI_IGD_SLOT_MASK)) {
+ xpdc->pci_qdev_realize(qdev, errp);
+ return;
+ }
+
+ if (is_igd_vga_passthrough(&s->real_device) &&
+ s->real_device.domain == XEN_PCI_IGD_DOMAIN &&
+ s->real_device.bus == XEN_PCI_IGD_BUS &&
+ s->real_device.dev == XEN_PCI_IGD_DEV &&
+ s->real_device.func == XEN_PCI_IGD_FN &&
+ s->real_device.vendor_id == PCI_VENDOR_ID_INTEL) {
+ pci_bus->slot_reserved_mask &= ~XEN_PCI_IGD_SLOT_MASK;
+ XEN_PT_LOG(pci_dev, "Intel IGD found, using slot 2\n");
+ }
+ xpdc->pci_qdev_realize(qdev, errp);
+}
+
static void xen_pci_passthrough_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+ XenPTDeviceClass *xpdc = XEN_PT_DEVICE_CLASS(klass);
+ xpdc->pci_qdev_realize = dc->realize;
+ dc->realize = xen_igd_clear_slot;
k->realize = xen_pt_realize;
k->exit = xen_pt_unregister_device;
k->config_read = xen_pt_pci_read_config;
@@ -977,6 +1018,7 @@
.instance_size = sizeof(XenPCIPassthroughState),
.instance_finalize = xen_pci_passthrough_finalize,
.class_init = xen_pci_passthrough_class_init,
+ .class_size = sizeof(XenPTDeviceClass),
.instance_init = xen_pci_passthrough_instance_init,
.interfaces = (InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
diff --git a/hw/xen/xen_pt.h b/hw/xen/xen_pt.h
index cf10fc7..e184699 100644
--- a/hw/xen/xen_pt.h
+++ b/hw/xen/xen_pt.h
@@ -40,7 +40,20 @@
#define TYPE_XEN_PT_DEVICE "xen-pci-passthrough"
OBJECT_DECLARE_SIMPLE_TYPE(XenPCIPassthroughState, XEN_PT_DEVICE)
+#define XEN_PT_DEVICE_CLASS(klass) \
+ OBJECT_CLASS_CHECK(XenPTDeviceClass, klass, TYPE_XEN_PT_DEVICE)
+#define XEN_PT_DEVICE_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(XenPTDeviceClass, obj, TYPE_XEN_PT_DEVICE)
+
+typedef void (*XenPTQdevRealize)(DeviceState *qdev, Error **errp);
+
+typedef struct XenPTDeviceClass {
+ PCIDeviceClass parent_class;
+ XenPTQdevRealize pci_qdev_realize;
+} XenPTDeviceClass;
+
uint32_t igd_read_opregion(XenPCIPassthroughState *s);
+void xen_igd_reserve_slot(PCIBus *pci_bus);
void igd_write_opregion(XenPCIPassthroughState *s, uint32_t val);
void xen_igd_passthrough_isa_bridge_create(XenPCIPassthroughState *s,
XenHostPCIDevice *dev);
@@ -75,6 +88,13 @@
#define XEN_PCI_INTEL_OPREGION 0xfc
+#define XEN_PCI_IGD_DOMAIN 0
+#define XEN_PCI_IGD_BUS 0
+#define XEN_PCI_IGD_DEV 2
+#define XEN_PCI_IGD_FN 0
+#define XEN_PCI_IGD_SLOT_MASK \
+ (1UL << PCI_SLOT(PCI_DEVFN(XEN_PCI_IGD_DEV, XEN_PCI_IGD_FN)))
+
typedef enum {
XEN_PT_GRP_TYPE_HARDWIRED = 0, /* 0 Hardwired reg group */
XEN_PT_GRP_TYPE_EMU, /* emul reg group */
diff --git a/hw/xen/xen_pt_config_init.c b/hw/xen/xen_pt_config_init.c
index cde898b..8b9b554 100644
--- a/hw/xen/xen_pt_config_init.c
+++ b/hw/xen/xen_pt_config_init.c
@@ -1924,7 +1924,7 @@
if (reg->init) {
uint32_t host_mask, size_mask;
unsigned int offset;
- uint32_t val;
+ uint32_t val = 0;
/* initialize emulate register */
rc = reg->init(s, reg_entry->reg,
diff --git a/hw/xen/xen_pt_stub.c b/hw/xen/xen_pt_stub.c
index 2d8cac8..5c10844 100644
--- a/hw/xen/xen_pt_stub.c
+++ b/hw/xen/xen_pt_stub.c
@@ -20,3 +20,7 @@
error_setg(errp, "Xen PCI passthrough support not built in");
}
}
+
+void xen_igd_reserve_slot(PCIBus *pci_bus)
+{
+}
diff --git a/include/block/nvme.h b/include/block/nvme.h
index 8027b71..bb231d0 100644
--- a/include/block/nvme.h
+++ b/include/block/nvme.h
@@ -1,6 +1,8 @@
#ifndef BLOCK_NVME_H
#define BLOCK_NVME_H
+#include "hw/registerfields.h"
+
typedef struct QEMU_PACKED NvmeBar {
uint64_t cap;
uint32_t vs;
@@ -58,6 +60,24 @@
NVME_REG_PMRMSCU = offsetof(NvmeBar, pmrmscu),
};
+typedef struct QEMU_PACKED NvmeEndGrpLog {
+ uint8_t critical_warning;
+ uint8_t rsvd[2];
+ uint8_t avail_spare;
+ uint8_t avail_spare_thres;
+ uint8_t percet_used;
+ uint8_t rsvd1[26];
+ uint64_t end_estimate[2];
+ uint64_t data_units_read[2];
+ uint64_t data_units_written[2];
+ uint64_t media_units_written[2];
+ uint64_t host_read_commands[2];
+ uint64_t host_write_commands[2];
+ uint64_t media_integrity_errors[2];
+ uint64_t no_err_info_log_entries[2];
+ uint8_t rsvd2[352];
+} NvmeEndGrpLog;
+
enum NvmeCapShift {
CAP_MQES_SHIFT = 0,
CAP_CQR_SHIFT = 16,
@@ -595,7 +615,9 @@
NVME_ADM_CMD_ACTIVATE_FW = 0x10,
NVME_ADM_CMD_DOWNLOAD_FW = 0x11,
NVME_ADM_CMD_NS_ATTACHMENT = 0x15,
+ NVME_ADM_CMD_DIRECTIVE_SEND = 0x19,
NVME_ADM_CMD_VIRT_MNGMT = 0x1c,
+ NVME_ADM_CMD_DIRECTIVE_RECV = 0x1a,
NVME_ADM_CMD_DBBUF_CONFIG = 0x7c,
NVME_ADM_CMD_FORMAT_NVM = 0x80,
NVME_ADM_CMD_SECURITY_SEND = 0x81,
@@ -611,7 +633,9 @@
NVME_CMD_WRITE_ZEROES = 0x08,
NVME_CMD_DSM = 0x09,
NVME_CMD_VERIFY = 0x0c,
+ NVME_CMD_IO_MGMT_RECV = 0x12,
NVME_CMD_COPY = 0x19,
+ NVME_CMD_IO_MGMT_SEND = 0x1d,
NVME_CMD_ZONE_MGMT_SEND = 0x79,
NVME_CMD_ZONE_MGMT_RECV = 0x7a,
NVME_CMD_ZONE_APPEND = 0x7d,
@@ -704,7 +728,9 @@
uint64_t slba;
uint16_t nlb;
uint16_t control;
- uint32_t dsmgmt;
+ uint8_t dsmgmt;
+ uint8_t rsvd;
+ uint16_t dspec;
uint32_t reftag;
uint16_t apptag;
uint16_t appmask;
@@ -875,6 +901,8 @@
NVME_INVALID_PRP_OFFSET = 0x0013,
NVME_CMD_SET_CMB_REJECTED = 0x002b,
NVME_INVALID_CMD_SET = 0x002c,
+ NVME_FDP_DISABLED = 0x0029,
+ NVME_INVALID_PHID_LIST = 0x002a,
NVME_LBA_RANGE = 0x0080,
NVME_CAP_EXCEEDED = 0x0081,
NVME_NS_NOT_READY = 0x0082,
@@ -1005,11 +1033,16 @@
};
enum NvmeLogIdentifier {
- NVME_LOG_ERROR_INFO = 0x01,
- NVME_LOG_SMART_INFO = 0x02,
- NVME_LOG_FW_SLOT_INFO = 0x03,
- NVME_LOG_CHANGED_NSLIST = 0x04,
- NVME_LOG_CMD_EFFECTS = 0x05,
+ NVME_LOG_ERROR_INFO = 0x01,
+ NVME_LOG_SMART_INFO = 0x02,
+ NVME_LOG_FW_SLOT_INFO = 0x03,
+ NVME_LOG_CHANGED_NSLIST = 0x04,
+ NVME_LOG_CMD_EFFECTS = 0x05,
+ NVME_LOG_ENDGRP = 0x09,
+ NVME_LOG_FDP_CONFS = 0x20,
+ NVME_LOG_FDP_RUH_USAGE = 0x21,
+ NVME_LOG_FDP_STATS = 0x22,
+ NVME_LOG_FDP_EVENTS = 0x23,
};
typedef struct QEMU_PACKED NvmePSD {
@@ -1091,7 +1124,10 @@
uint16_t mntmt;
uint16_t mxtmt;
uint32_t sanicap;
- uint8_t rsvd332[180];
+ uint8_t rsvd332[6];
+ uint16_t nsetidmax;
+ uint16_t endgidmax;
+ uint8_t rsvd342[170];
uint8_t sqes;
uint8_t cqes;
uint16_t maxcmd;
@@ -1134,15 +1170,18 @@
};
enum NvmeIdCtrlCtratt {
+ NVME_CTRATT_ENDGRPS = 1 << 4,
NVME_CTRATT_ELBAS = 1 << 15,
+ NVME_CTRATT_FDPS = 1 << 19,
};
enum NvmeIdCtrlOacs {
- NVME_OACS_SECURITY = 1 << 0,
- NVME_OACS_FORMAT = 1 << 1,
- NVME_OACS_FW = 1 << 2,
- NVME_OACS_NS_MGMT = 1 << 3,
- NVME_OACS_DBBUF = 1 << 8,
+ NVME_OACS_SECURITY = 1 << 0,
+ NVME_OACS_FORMAT = 1 << 1,
+ NVME_OACS_FW = 1 << 2,
+ NVME_OACS_NS_MGMT = 1 << 3,
+ NVME_OACS_DIRECTIVES = 1 << 5,
+ NVME_OACS_DBBUF = 1 << 8,
};
enum NvmeIdCtrlOncs {
@@ -1227,6 +1266,7 @@
#define NVME_AEC_SMART(aec) (aec & 0xff)
#define NVME_AEC_NS_ATTR(aec) ((aec >> 8) & 0x1)
#define NVME_AEC_FW_ACTIVATION(aec) ((aec >> 9) & 0x1)
+#define NVME_AEC_ENDGRP_NOTICE(aec) ((aec >> 14) & 0x1)
#define NVME_ERR_REC_TLER(err_rec) (err_rec & 0xffff)
#define NVME_ERR_REC_DULBE(err_rec) (err_rec & 0x10000)
@@ -1246,6 +1286,8 @@
NVME_TIMESTAMP = 0xe,
NVME_HOST_BEHAVIOR_SUPPORT = 0x16,
NVME_COMMAND_SET_PROFILE = 0x19,
+ NVME_FDP_MODE = 0x1d,
+ NVME_FDP_EVENTS = 0x1e,
NVME_SOFTWARE_PROGRESS_MARKER = 0x80,
NVME_FID_MAX = 0x100,
};
@@ -1338,7 +1380,10 @@
uint16_t mssrl;
uint32_t mcl;
uint8_t msrc;
- uint8_t rsvd81[23];
+ uint8_t rsvd81[18];
+ uint8_t nsattr;
+ uint16_t nvmsetid;
+ uint16_t endgid;
uint8_t nguid[16];
uint64_t eui64;
NvmeLBAF lbaf[NVME_MAX_NLBAF];
@@ -1617,6 +1662,169 @@
NVME_VIRT_RES_INTERRUPT = 0x01,
} NvmeVirtualResourceType;
+typedef struct NvmeDirectiveIdentify {
+ uint8_t supported;
+ uint8_t unused1[31];
+ uint8_t enabled;
+ uint8_t unused33[31];
+ uint8_t persistent;
+ uint8_t unused65[31];
+ uint8_t rsvd64[4000];
+} NvmeDirectiveIdentify;
+
+enum NvmeDirectiveTypes {
+ NVME_DIRECTIVE_IDENTIFY = 0x0,
+ NVME_DIRECTIVE_DATA_PLACEMENT = 0x2,
+};
+
+enum NvmeDirectiveOperations {
+ NVME_DIRECTIVE_RETURN_PARAMS = 0x1,
+};
+
+typedef struct QEMU_PACKED NvmeFdpConfsHdr {
+ uint16_t num_confs;
+ uint8_t version;
+ uint8_t rsvd3;
+ uint32_t size;
+ uint8_t rsvd8[8];
+} NvmeFdpConfsHdr;
+
+REG8(FDPA, 0x0)
+ FIELD(FDPA, RGIF, 0, 4)
+ FIELD(FDPA, VWC, 4, 1)
+ FIELD(FDPA, VALID, 7, 1);
+
+typedef struct QEMU_PACKED NvmeFdpDescrHdr {
+ uint16_t descr_size;
+ uint8_t fdpa;
+ uint8_t vss;
+ uint32_t nrg;
+ uint16_t nruh;
+ uint16_t maxpids;
+ uint32_t nnss;
+ uint64_t runs;
+ uint32_t erutl;
+ uint8_t rsvd28[36];
+} NvmeFdpDescrHdr;
+
+enum NvmeRuhType {
+ NVME_RUHT_INITIALLY_ISOLATED = 1,
+ NVME_RUHT_PERSISTENTLY_ISOLATED = 2,
+};
+
+typedef struct QEMU_PACKED NvmeRuhDescr {
+ uint8_t ruht;
+ uint8_t rsvd1[3];
+} NvmeRuhDescr;
+
+typedef struct QEMU_PACKED NvmeRuhuLog {
+ uint16_t nruh;
+ uint8_t rsvd2[6];
+} NvmeRuhuLog;
+
+enum NvmeRuhAttributes {
+ NVME_RUHA_UNUSED = 0,
+ NVME_RUHA_HOST = 1,
+ NVME_RUHA_CTRL = 2,
+};
+
+typedef struct QEMU_PACKED NvmeRuhuDescr {
+ uint8_t ruha;
+ uint8_t rsvd1[7];
+} NvmeRuhuDescr;
+
+typedef struct QEMU_PACKED NvmeFdpStatsLog {
+ uint64_t hbmw[2];
+ uint64_t mbmw[2];
+ uint64_t mbe[2];
+ uint8_t rsvd48[16];
+} NvmeFdpStatsLog;
+
+typedef struct QEMU_PACKED NvmeFdpEventsLog {
+ uint32_t num_events;
+ uint8_t rsvd4[60];
+} NvmeFdpEventsLog;
+
+enum NvmeFdpEventType {
+ FDP_EVT_RU_NOT_FULLY_WRITTEN = 0x0,
+ FDP_EVT_RU_ATL_EXCEEDED = 0x1,
+ FDP_EVT_CTRL_RESET_RUH = 0x2,
+ FDP_EVT_INVALID_PID = 0x3,
+ FDP_EVT_MEDIA_REALLOC = 0x80,
+ FDP_EVT_RUH_IMPLICIT_RU_CHANGE = 0x81,
+};
+
+enum NvmeFdpEventFlags {
+ FDPEF_PIV = 1 << 0,
+ FDPEF_NSIDV = 1 << 1,
+ FDPEF_LV = 1 << 2,
+};
+
+typedef struct QEMU_PACKED NvmeFdpEvent {
+ uint8_t type;
+ uint8_t flags;
+ uint16_t pid;
+ uint64_t timestamp;
+ uint32_t nsid;
+ uint64_t type_specific[2];
+ uint16_t rgid;
+ uint8_t ruhid;
+ uint8_t rsvd35[5];
+ uint64_t vendor[3];
+} NvmeFdpEvent;
+
+typedef struct QEMU_PACKED NvmePhidList {
+ uint16_t nnruhd;
+ uint8_t rsvd2[6];
+} NvmePhidList;
+
+typedef struct QEMU_PACKED NvmePhidDescr {
+ uint8_t ruht;
+ uint8_t rsvd1;
+ uint16_t ruhid;
+} NvmePhidDescr;
+
+REG32(FEAT_FDP, 0x0)
+ FIELD(FEAT_FDP, FDPE, 0, 1)
+ FIELD(FEAT_FDP, CONF_NDX, 8, 8);
+
+typedef struct QEMU_PACKED NvmeFdpEventDescr {
+ uint8_t evt;
+ uint8_t evta;
+} NvmeFdpEventDescr;
+
+REG32(NVME_IOMR, 0x0)
+ FIELD(NVME_IOMR, MO, 0, 8)
+ FIELD(NVME_IOMR, MOS, 16, 16);
+
+enum NvmeIomr2Mo {
+ NVME_IOMR_MO_NOP = 0x0,
+ NVME_IOMR_MO_RUH_STATUS = 0x1,
+ NVME_IOMR_MO_VENDOR_SPECIFIC = 0x255,
+};
+
+typedef struct QEMU_PACKED NvmeRuhStatus {
+ uint8_t rsvd0[14];
+ uint16_t nruhsd;
+} NvmeRuhStatus;
+
+typedef struct QEMU_PACKED NvmeRuhStatusDescr {
+ uint16_t pid;
+ uint16_t ruhid;
+ uint32_t earutr;
+ uint64_t ruamw;
+ uint8_t rsvd16[16];
+} NvmeRuhStatusDescr;
+
+REG32(NVME_IOMS, 0x0)
+ FIELD(NVME_IOMS, MO, 0, 8)
+ FIELD(NVME_IOMS, MOS, 16, 16);
+
+enum NvmeIoms2Mo {
+ NVME_IOMS_MO_NOP = 0x0,
+ NVME_IOMS_MO_RUH_UPDATE = 0x1,
+};
+
static inline void _nvme_check_size(void)
{
QEMU_BUILD_BUG_ON(sizeof(NvmeBar) != 4096);
@@ -1655,5 +1863,7 @@
QEMU_BUILD_BUG_ON(sizeof(NvmePriCtrlCap) != 4096);
QEMU_BUILD_BUG_ON(sizeof(NvmeSecCtrlEntry) != 32);
QEMU_BUILD_BUG_ON(sizeof(NvmeSecCtrlList) != 4096);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeEndGrpLog) != 512);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeDirectiveIdentify) != 4096);
}
#endif