Skip to content

Commit

Permalink
nvme/rq: fix prp mapping
Browse files Browse the repository at this point in the history
Mapping PRPs are apparently not that easy.

Prior to this patch, the logic was a little too clever. Dumb it down a
bit and fix a bunch of edge cases. And add a bunch of tests for these
cases.

Reported-by: Karl Bonde Torp <[email protected]>
Signed-off-by: Klaus Jensen <[email protected]>
  • Loading branch information
birkelund committed Feb 2, 2024
1 parent 9f6ebfc commit 4a6316e
Show file tree
Hide file tree
Showing 2 changed files with 78 additions and 11 deletions.
20 changes: 10 additions & 10 deletions src/nvme/rq.c
Original file line number Diff line number Diff line change
Expand Up @@ -46,20 +46,20 @@ static inline int __map_first(leint64_t *prp1, leint64_t *prplist, uint64_t iova
int max_prps = 1 << (pageshift - 3);

/* number of prps required to map the buffer */
int prpcount = (int)(len >> pageshift);
int prpcount = 1;

*prp1 = cpu_to_le64(iova);

/*
* If prpcount is at least one and the iova is not aligned to the page
* size, we need one more prp than what we calculated above.
* Additionally, we align the iova down to a page size boundary,
* simplifying the following loop.
*/
if (prpcount && !ALIGNED(iova, pagesize)) {
/* account for what is covered with the first prp */
len -= min_t(size_t, len, pagesize - (iova & (pagesize - 1)));

/* any residual just adds more prps */
if (len)
prpcount += (int)ALIGN_UP(len, pagesize) >> pageshift;

if (prpcount > 1 && !ALIGNED(iova, pagesize))
/* align down to simplify loop below */
iova = ALIGN_DOWN(iova, pagesize);
prpcount++;
}

if (prpcount > max_prps) {
errno = EINVAL;
Expand Down
69 changes: 68 additions & 1 deletion src/nvme/rq_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ int main(void)
leint64_t *prplist;
struct iovec iov[8];

plan_tests(62);
plan_tests(89);

assert(pgmap((void **)&prplist, __VFN_PAGESIZE) > 0);

Expand All @@ -54,13 +54,29 @@ int main(void)
ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000000);
ok1(le64_to_cpu(cmd.dptr.prp2) == 0x0);

/* test 4k + 8 aligned */
memset((void *)prplist, 0x0, __VFN_PAGESIZE);
nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1000000, 0x1008);

ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000000);
ok1(le64_to_cpu(cmd.dptr.prp2) == 0x1001000);

/* test 8k aligned */
memset((void *)prplist, 0x0, __VFN_PAGESIZE);
nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1000000, 0x2000);

ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000000);
ok1(le64_to_cpu(cmd.dptr.prp2) == 0x1001000);

/* test 8k + 16 aligned */
memset((void *)prplist, 0x0, __VFN_PAGESIZE);
nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1000000, 0x2010);

ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000000);
ok1(le64_to_cpu(cmd.dptr.prp2) == 0x8000000);
ok1(le64_to_cpu(prplist[0]) == 0x1001000);
ok1(le64_to_cpu(prplist[1]) == 0x1002000);

/* test 12k aligned */
memset((void *)prplist, 0x0, __VFN_PAGESIZE);
nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1000000, 0x3000);
Expand All @@ -70,20 +86,52 @@ int main(void)
ok1(le64_to_cpu(prplist[0]) == 0x1001000);
ok1(le64_to_cpu(prplist[1]) == 0x1002000);

/* test 12k + 24 aligned */
memset((void *)prplist, 0x0, __VFN_PAGESIZE);
nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1000000, 0x3018);

ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000000);
ok1(le64_to_cpu(cmd.dptr.prp2) == 0x8000000);
ok1(le64_to_cpu(prplist[0]) == 0x1001000);
ok1(le64_to_cpu(prplist[1]) == 0x1002000);

/* test 512b unaligned */
memset((void *)prplist, 0x0, __VFN_PAGESIZE);
nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1000004, 0x200);

ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000004);
ok1(le64_to_cpu(cmd.dptr.prp2) == 0x0);

/* test 512 unaligned (nasty) */
memset((void *)prplist, 0x0, __VFN_PAGESIZE);
nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1001000 - 4, 0x200);

ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1001000 - 4);
ok1(le64_to_cpu(cmd.dptr.prp2) == 0x1001000);

/* test 4k unaligned */
memset((void *)prplist, 0x0, __VFN_PAGESIZE);
nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1000004, 0x1000);

ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000004);
ok1(le64_to_cpu(cmd.dptr.prp2) == 0x1001000);

/* test 4k + 8 unaligned */
memset((void *)prplist, 0x0, __VFN_PAGESIZE);
nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1000004, 0x1008);

ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000004);
ok1(le64_to_cpu(cmd.dptr.prp2) == 0x1001000);

/* test 4k + 8 unaligned (nasty) */
memset((void *)prplist, 0x0, __VFN_PAGESIZE);
nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1001000 - 4, 0x1008);

ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1001000 - 4);
ok1(le64_to_cpu(cmd.dptr.prp2) == 0x8000000);
ok1(le64_to_cpu(prplist[0]) == 0x1001000);
ok1(le64_to_cpu(prplist[1]) == 0x1002000);

/* test 4k - 4 unaligned */
memset((void *)prplist, 0x0, __VFN_PAGESIZE);
nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1000004, 0x1000 - 4);
Expand All @@ -100,6 +148,15 @@ int main(void)
ok1(le64_to_cpu(prplist[0]) == 0x1001000);
ok1(le64_to_cpu(prplist[1]) == 0x1002000);

/* test 8k + 16 unaligned */
memset((void *)prplist, 0x0, __VFN_PAGESIZE);
nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1000004, 0x2010);

ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000004);
ok1(le64_to_cpu(cmd.dptr.prp2) == 0x8000000);
ok1(le64_to_cpu(prplist[0]) == 0x1001000);
ok1(le64_to_cpu(prplist[1]) == 0x1002000);

/* test 8k - 4 unaligned */
memset((void *)prplist, 0x0, __VFN_PAGESIZE);
nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1000004, 0x2000 - 4);
Expand All @@ -117,6 +174,16 @@ int main(void)
ok1(le64_to_cpu(prplist[1]) == 0x1002000);
ok1(le64_to_cpu(prplist[2]) == 0x1003000);

/* test 12k + 24 unaligned */
memset((void *)prplist, 0x0, __VFN_PAGESIZE);
nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1000004, 0x3018);

ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000004);
ok1(le64_to_cpu(cmd.dptr.prp2) == 0x8000000);
ok1(le64_to_cpu(prplist[0]) == 0x1001000);
ok1(le64_to_cpu(prplist[1]) == 0x1002000);
ok1(le64_to_cpu(prplist[2]) == 0x1003000);

/* test 512b aligned 1-iovec */
memset((void *)prplist, 0x0, __VFN_PAGESIZE);
iov[0] = (struct iovec) {.iov_base = (void *)0x1000000, .iov_len = 0x200};
Expand Down

0 comments on commit 4a6316e

Please sign in to comment.