accel/tcg: Disconnect TargetPageDataNode from page size
Dynamically size the node for the runtime target page size.
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Ilya Leoshkevich <iii@linux.ibm.com>
Acked-by: Helge Deller <deller@gmx.de>
Message-Id: <20240102015808.132373-29-richard.henderson@linaro.org>
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
index 69b7429..3cac3a7 100644
--- a/accel/tcg/user-exec.c
+++ b/accel/tcg/user-exec.c
@@ -864,7 +864,7 @@
typedef struct TargetPageDataNode {
struct rcu_head rcu;
IntervalTreeNode itree;
- char data[TPD_PAGES][TARGET_PAGE_DATA_SIZE] __attribute__((aligned));
+ char data[] __attribute__((aligned));
} TargetPageDataNode;
static IntervalTreeRoot targetdata_root;
@@ -902,7 +902,8 @@
n_last = MIN(last, n->last);
p_len = (n_last + 1 - n_start) >> TARGET_PAGE_BITS;
- memset(t->data[p_ofs], 0, p_len * TARGET_PAGE_DATA_SIZE);
+ memset(t->data + p_ofs * TARGET_PAGE_DATA_SIZE, 0,
+ p_len * TARGET_PAGE_DATA_SIZE);
}
}
@@ -910,7 +911,7 @@
{
IntervalTreeNode *n;
TargetPageDataNode *t;
- target_ulong page, region;
+ target_ulong page, region, p_ofs;
page = address & TARGET_PAGE_MASK;
region = address & TBD_MASK;
@@ -926,7 +927,8 @@
mmap_lock();
n = interval_tree_iter_first(&targetdata_root, page, page);
if (!n) {
- t = g_new0(TargetPageDataNode, 1);
+ t = g_malloc0(sizeof(TargetPageDataNode)
+ + TPD_PAGES * TARGET_PAGE_DATA_SIZE);
n = &t->itree;
n->start = region;
n->last = region | ~TBD_MASK;
@@ -936,7 +938,8 @@
}
t = container_of(n, TargetPageDataNode, itree);
- return t->data[(page - region) >> TARGET_PAGE_BITS];
+ p_ofs = (page - region) >> TARGET_PAGE_BITS;
+ return t->data + p_ofs * TARGET_PAGE_DATA_SIZE;
}
#else
void page_reset_target_data(target_ulong start, target_ulong last) { }