diff --git a/examples/eventfd.c b/examples/eventfd.c index 3bea0d53..8e15bfca 100644 --- a/examples/eventfd.c +++ b/examples/eventfd.c @@ -88,7 +88,7 @@ int main(int argc, char **argv) .nsid = cpu_to_le32(nsid), }; - nvme_rq_map_prp(rq, &cmd, iova, 0x1000); + nvme_rq_map_prp(&ctrl, rq, &cmd, iova, 0x1000); nvme_rq_exec(rq, &cmd); diff --git a/examples/io.c b/examples/io.c index 51cf9b60..f751ef8e 100644 --- a/examples/io.c +++ b/examples/io.c @@ -99,7 +99,7 @@ int main(int argc, char **argv) .nsid = cpu_to_le32(nsid), }; - ret = nvme_rq_map_prp(rq, &cmd, iova, 0x1000); + ret = nvme_rq_map_prp(&ctrl, rq, &cmd, iova, 0x1000); if (ret) err(1, "could not map prps"); diff --git a/include/vfn/nvme/ctrl.h b/include/vfn/nvme/ctrl.h index 74361816..28bee9a5 100644 --- a/include/vfn/nvme/ctrl.h +++ b/include/vfn/nvme/ctrl.h @@ -85,6 +85,7 @@ struct nvme_ctrl { struct { int nsqa, ncqa; int mqes; + int mps; } config; /* private: internal */ diff --git a/include/vfn/nvme/rq.h b/include/vfn/nvme/rq.h index cdd7667e..ee90cb51 100644 --- a/include/vfn/nvme/rq.h +++ b/include/vfn/nvme/rq.h @@ -204,6 +204,7 @@ static inline void nvme_rq_exec(struct nvme_rq *rq, union nvme_cmd *cmd) * nvme_rq_map_prp - Set up the Physical Region Pages in the data pointer of the * command from a buffer that is contiguous in iova mapped * memory. + * @ctrl: &struct nvme_ctrl * @rq: Request tracker (&struct nvme_rq) * @cmd: NVMe command prototype (&union nvme_cmd) * @iova: I/O Virtual Address @@ -213,11 +214,13 @@ static inline void nvme_rq_exec(struct nvme_rq *rq, union nvme_cmd *cmd) * * Return: ``0`` on success, ``-1`` on error and sets errno. */ -int nvme_rq_map_prp(struct nvme_rq *rq, union nvme_cmd *cmd, uint64_t iova, size_t len); +int nvme_rq_map_prp(struct nvme_ctrl *ctrl, struct nvme_rq *rq, union nvme_cmd *cmd, uint64_t iova, + size_t len); /** * nvme_rq_mapv_prp - Set up the Physical Region Pages in the data pointer of * the command from an iovec. + * @ctrl: &struct nvme_ctrl * @rq: Request tracker (&struct nvme_rq) * @cmd: NVMe command prototype (&union nvme_cmd) * @iov: array of iovecs @@ -229,7 +232,8 @@ int nvme_rq_map_prp(struct nvme_rq *rq, union nvme_cmd *cmd, uint64_t iova, size * * Return: ``0`` on success, ``-1`` on error and sets errno. */ -int nvme_rq_mapv_prp(struct nvme_rq *rq, union nvme_cmd *cmd, struct iovec *iov, int niov); +int nvme_rq_mapv_prp(struct nvme_ctrl *ctrl, struct nvme_rq *rq, union nvme_cmd *cmd, + struct iovec *iov, int niov); /** * nvme_rq_spin - Spin for completion of the command associated with the request diff --git a/include/vfn/nvme/util.h b/include/vfn/nvme/util.h index 85d08396..16b97b88 100644 --- a/include/vfn/nvme/util.h +++ b/include/vfn/nvme/util.h @@ -18,6 +18,9 @@ #define NVME_CID_AER (1 << 15) +#define __mps_to_pageshift(mps) (12 + mps) +#define __mps_to_pagesize(mps) (1ULL << __mps_to_pageshift(mps)) + /** * nvme_crc64 - calculate NVMe CRC64 * @crc: starting value diff --git a/src/nvme/core.c b/src/nvme/core.c index c41a1918..3a1b3959 100644 --- a/src/nvme/core.c +++ b/src/nvme/core.c @@ -111,9 +111,7 @@ static void nvme_discard_cq(struct nvme_ctrl *ctrl, struct nvme_cq *cq) if (!cq->vaddr) return; - len = ALIGN_UP((size_t)cq->qsize << NVME_CQES, __VFN_PAGESIZE); - - if (iommu_unmap_vaddr(__iommu_ctx(ctrl), cq->vaddr, NULL)) + if (iommu_unmap_vaddr(__iommu_ctx(ctrl), cq->vaddr, &len)) log_debug("failed to unmap vaddr\n"); pgunmap(cq->vaddr, len); @@ -166,7 +164,11 @@ static int nvme_configure_sq(struct nvme_ctrl *ctrl, int qid, int qsize, sq->dbbuf.eventidx = sqtdbl(ctrl->dbbuf.eventidxs, qid, dstrd); } - len = pgmapn(&sq->pages.vaddr, qsize, __VFN_PAGESIZE); + /* + * Use ctrl->config.mps instead of host page size, as we have the + * opportunity to pack the allocations. + */ + len = pgmapn(&sq->pages.vaddr, qsize, __mps_to_pagesize(ctrl->config.mps)); if (len < 0) return -1; @@ -185,8 +187,8 @@ static int nvme_configure_sq(struct nvme_ctrl *ctrl, int qid, int qsize, rq->sq = sq; rq->cid = (uint16_t)i; - rq->page.vaddr = sq->pages.vaddr + (i << __VFN_PAGESHIFT); - rq->page.iova = sq->pages.iova + (i << __VFN_PAGESHIFT); + rq->page.vaddr = sq->pages.vaddr + ((uint64_t)i << (12 + ctrl->config.mps)); + rq->page.iova = sq->pages.iova + ((uint64_t)i << (12 + ctrl->config.mps)); if (i > 0) rq->rq_next = &sq->rqs[i - 1]; @@ -208,10 +210,10 @@ static int nvme_configure_sq(struct nvme_ctrl *ctrl, int qid, int qsize, free_sq_rqs: free(sq->rqs); unmap_pages: - if (iommu_unmap_vaddr(__iommu_ctx(ctrl), sq->pages.vaddr, NULL)) + if (iommu_unmap_vaddr(__iommu_ctx(ctrl), sq->pages.vaddr, (size_t *)&len)) log_debug("failed to unmap vaddr\n"); - pgunmap(sq->pages.vaddr, (size_t)sq->qsize << __VFN_PAGESHIFT); + pgunmap(sq->pages.vaddr, len); return -1; } @@ -223,18 +225,14 @@ static void nvme_discard_sq(struct nvme_ctrl *ctrl, struct nvme_sq *sq) if (!sq->vaddr) return; - len = ALIGN_UP((size_t)sq->qsize << NVME_SQES, __VFN_PAGESIZE); - - if (iommu_unmap_vaddr(__iommu_ctx(ctrl), sq->vaddr, NULL)) + if (iommu_unmap_vaddr(__iommu_ctx(ctrl), sq->vaddr, &len)) log_debug("failed to unmap vaddr\n"); pgunmap(sq->vaddr, len); free(sq->rqs); - len = (size_t)sq->qsize << __VFN_PAGESHIFT; - - if (iommu_unmap_vaddr(__iommu_ctx(ctrl), sq->pages.vaddr, NULL)) + if (iommu_unmap_vaddr(__iommu_ctx(ctrl), sq->pages.vaddr, &len)) log_debug("failed to unmap vaddr\n"); pgunmap(sq->pages.vaddr, len); @@ -432,12 +430,12 @@ int nvme_enable(struct nvme_ctrl *ctrl) css = NVME_FIELD_GET(cap, CAP_CSS); cc = - NVME_FIELD_SET(__VFN_PAGESHIFT - 12, CC_MPS) | - NVME_FIELD_SET(NVME_CC_AMS_RR, CC_AMS) | - NVME_FIELD_SET(NVME_CC_SHN_NONE, CC_SHN) | - NVME_FIELD_SET(NVME_SQES, CC_IOSQES) | - NVME_FIELD_SET(NVME_CQES, CC_IOCQES) | - NVME_FIELD_SET(0x1, CC_EN); + NVME_FIELD_SET(ctrl->config.mps, CC_MPS) | + NVME_FIELD_SET(NVME_CC_AMS_RR, CC_AMS) | + NVME_FIELD_SET(NVME_CC_SHN_NONE, CC_SHN) | + NVME_FIELD_SET(NVME_SQES, CC_IOSQES) | + NVME_FIELD_SET(NVME_CQES, CC_IOCQES) | + NVME_FIELD_SET(0x1, CC_EN); if (css & NVME_CAP_CSS_CSI) cc |= NVME_FIELD_SET(NVME_CC_CSS_CSI, CC_CSS); @@ -508,7 +506,7 @@ int nvme_init(struct nvme_ctrl *ctrl, const char *bdf, const struct nvme_ctrl_op { unsigned long long classcode; uint64_t cap; - uint8_t mpsmin; + uint8_t mpsmin, mpsmax; uint16_t oacs; ssize_t len; void *vaddr; @@ -549,11 +547,17 @@ int nvme_init(struct nvme_ctrl *ctrl, const char *bdf, const struct nvme_ctrl_op cap = le64_to_cpu(mmio_read64(ctrl->regs + NVME_REG_CAP)); mpsmin = NVME_FIELD_GET(cap, CAP_MPSMIN); + mpsmax = NVME_FIELD_GET(cap, CAP_MPSMAX); + + ctrl->config.mps = clamp_t(int, __VFN_PAGESHIFT - 12, mpsmin, mpsmax); - if ((12 + mpsmin) > __VFN_PAGESHIFT) { - log_debug("controller minimum page size too large\n"); + if ((12 + ctrl->config.mps) > __VFN_PAGESHIFT) { + log_error("mpsmin too large\n"); errno = EINVAL; return -1; + } else if ((12 + ctrl->config.mps) < __VFN_PAGESHIFT) { + log_info("host memory page size is larger than mpsmax; clamping mps to %d\n", + ctrl->config.mps); } ctrl->config.mqes = NVME_FIELD_GET(cap, CAP_MQES); diff --git a/src/nvme/rq.c b/src/nvme/rq.c index cfd93d3a..332ba4e0 100644 --- a/src/nvme/rq.c +++ b/src/nvme/rq.c @@ -39,34 +39,29 @@ #include "iommu/context.h" -static int __rq_max_prps; - -static void __attribute__((constructor)) init_max_prps(void) +static inline int __map_first(leint64_t *prp1, leint64_t *prplist, uint64_t iova, size_t len, + int pageshift) { - __rq_max_prps = (int)(sysconf(_SC_PAGESIZE) / sizeof(uint64_t) + 1); - - log_debug("max prps is %d\n", __rq_max_prps); -} + size_t pagesize = 1 << pageshift; + int max_prps = 1 << (pageshift - 3); -static inline int __map_first(leint64_t *prp1, leint64_t *prplist, uint64_t iova, size_t len) -{ /* number of prps required to map the buffer */ int prpcount = 1; *prp1 = cpu_to_le64(iova); /* account for what is covered with the first prp */ - len -= min_t(size_t, len, __VFN_PAGESIZE - (iova & (__VFN_PAGESIZE - 1))); + len -= min_t(size_t, len, pagesize - (iova & (pagesize - 1))); /* any residual just adds more prps */ if (len) - prpcount += (int)ALIGN_UP(len, __VFN_PAGESIZE) >> __VFN_PAGESHIFT; + prpcount += (int)ALIGN_UP(len, pagesize) >> pageshift; - if (prpcount > 1 && !ALIGNED(iova, __VFN_PAGESIZE)) + if (prpcount > 1 && !ALIGNED(iova, pagesize)) /* align down to simplify loop below */ - iova = ALIGN_DOWN(iova, __VFN_PAGESIZE); + iova = ALIGN_DOWN(iova, pagesize); - if (prpcount > __rq_max_prps) { + if (prpcount > max_prps) { errno = EINVAL; return 0; } @@ -76,7 +71,7 @@ static inline int __map_first(leint64_t *prp1, leint64_t *prplist, uint64_t iova * aligned from the above, which simplifies this. */ for (int i = 1; i < prpcount; i++) - prplist[i - 1] = cpu_to_le64(iova + (i << __VFN_PAGESHIFT)); + prplist[i - 1] = cpu_to_le64(iova + ((uint64_t)i << pageshift)); /* * prpcount may be zero if the buffer length was less than the page @@ -85,26 +80,30 @@ static inline int __map_first(leint64_t *prp1, leint64_t *prplist, uint64_t iova return clamp_t(int, prpcount, 1, prpcount); } -static inline int __map_aligned(leint64_t *prplist, int prpcount, uint64_t iova) +static inline int __map_aligned(leint64_t *prplist, int prpcount, uint64_t iova, int pageshift) { + size_t pagesize = 1 << pageshift; + /* * __map_aligned is used exclusively for mapping into the prplist * entries where addresses must be page size aligned. */ - assert(ALIGNED(iova, __VFN_PAGESIZE)); + assert(ALIGNED(iova, pagesize)); for (int i = 0; i < prpcount; i++) - prplist[i] = cpu_to_le64(iova + (i << __VFN_PAGESHIFT)); + prplist[i] = cpu_to_le64(iova + ((uint64_t)i << pageshift)); return prpcount; } -int nvme_rq_map_prp(struct nvme_rq *rq, union nvme_cmd *cmd, uint64_t iova, size_t len) +int nvme_rq_map_prp(struct nvme_ctrl *ctrl, struct nvme_rq *rq, union nvme_cmd *cmd, uint64_t iova, + size_t len) { int prpcount; leint64_t *prplist = rq->page.vaddr; - prpcount = __map_first(&cmd->dptr.prp1, prplist, iova, len); + prpcount = __map_first(&cmd->dptr.prp1, prplist, iova, len, + __mps_to_pageshift(ctrl->config.mps)); if (!prpcount) { errno = EINVAL; return -1; @@ -120,15 +119,19 @@ int nvme_rq_map_prp(struct nvme_rq *rq, union nvme_cmd *cmd, uint64_t iova, size return 0; } -int nvme_rq_mapv_prp(struct nvme_rq *rq, union nvme_cmd *cmd, struct iovec *iov, int niov) +int nvme_rq_mapv_prp(struct nvme_ctrl *ctrl, struct nvme_rq *rq, union nvme_cmd *cmd, + struct iovec *iov, int niov) { int prpcount, _prpcount; leint64_t *prplist = rq->page.vaddr; uint64_t iova = (uint64_t)iov->iov_base; size_t len = iov->iov_len; + int pageshift = __mps_to_pageshift(ctrl->config.mps); + size_t pagesize = 1 << pageshift; + int max_prps = 1 << (pageshift - 3); /* map the first segment */ - prpcount = __map_first(&cmd->dptr.prp1, prplist, iova, len); + prpcount = __map_first(&cmd->dptr.prp1, prplist, iova, len, pageshift); /* * At this point, one of three conditions must hold: @@ -140,7 +143,7 @@ int nvme_rq_mapv_prp(struct nvme_rq *rq, union nvme_cmd *cmd, struct iovec *iov, * If none holds, the buffer(s) within the iovec cannot be mapped given * the PRP alignment requirements. */ - if (!(prpcount == 1 || niov == 1 || ALIGNED(iova + len, __VFN_PAGESIZE))) { + if (!(prpcount == 1 || niov == 1 || ALIGNED(iova + len, pagesize))) { log_error("iov[0].iov_base/len invalid\n"); goto invalid; @@ -151,29 +154,29 @@ int nvme_rq_mapv_prp(struct nvme_rq *rq, union nvme_cmd *cmd, struct iovec *iov, iova = (uint64_t)iov[i].iov_base; len = iov[i].iov_len; - _prpcount = max_t(int, 1, (int)len >> __VFN_PAGESHIFT); + _prpcount = max_t(int, 1, (int)len >> pageshift); - if (prpcount + _prpcount > __rq_max_prps) { + if (prpcount + _prpcount > max_prps) { log_error("too many prps required\n"); goto invalid; } - if (!ALIGNED(iova, __VFN_PAGESIZE)) { + if (!ALIGNED(iova, pagesize)) { log_error("unaligned iov[%u].iov_base (0x%"PRIx64")\n", i, iova); goto invalid; } /* all entries but the last must have a page size aligned len */ - if (i < niov - 1 && !ALIGNED(len, __VFN_PAGESIZE)) { + if (i < niov - 1 && !ALIGNED(len, pagesize)) { log_error("unaligned iov[%u].len (%zu)\n", i, len); goto invalid; } - prpcount += __map_aligned(&prplist[prpcount - 1], _prpcount, iova); + prpcount += __map_aligned(&prplist[prpcount - 1], _prpcount, iova, pageshift); } if (prpcount == 2) diff --git a/src/nvme/rq_test.c b/src/nvme/rq_test.c index ef94836a..e679b058 100644 --- a/src/nvme/rq_test.c +++ b/src/nvme/rq_test.c @@ -20,8 +20,14 @@ #include "rq.c" +#define __max_prps 513 + int main(void) { + struct nvme_ctrl ctrl = { + .config.mps = 0, + }; + struct nvme_rq rq; union nvme_cmd cmd; leint64_t *prplist; @@ -36,14 +42,14 @@ int main(void) /* test 512b aligned */ memset((void *)prplist, 0x0, __VFN_PAGESIZE); - nvme_rq_map_prp(&rq, &cmd, 0x1000000, 0x200); + nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1000000, 0x200); ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000000); ok1(le64_to_cpu(cmd.dptr.prp2) == 0x0); /* test 4k aligned */ memset((void *)prplist, 0x0, __VFN_PAGESIZE); - nvme_rq_map_prp(&rq, &cmd, 0x1000000, 0x1000); + nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1000000, 0x1000); ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000000); ok1(le64_to_cpu(cmd.dptr.prp2) == 0x0); @@ -57,7 +63,7 @@ int main(void) /* test 8k aligned */ memset((void *)prplist, 0x0, __VFN_PAGESIZE); - nvme_rq_map_prp(&rq, &cmd, 0x1000000, 0x2000); + nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1000000, 0x2000); ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000000); ok1(le64_to_cpu(cmd.dptr.prp2) == 0x1001000); @@ -73,7 +79,7 @@ int main(void) /* test 12k aligned */ memset((void *)prplist, 0x0, __VFN_PAGESIZE); - nvme_rq_map_prp(&rq, &cmd, 0x1000000, 0x3000); + nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1000000, 0x3000); ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000000); ok1(le64_to_cpu(cmd.dptr.prp2) == 0x8000000); @@ -91,7 +97,7 @@ int main(void) /* test 512b unaligned */ memset((void *)prplist, 0x0, __VFN_PAGESIZE); - nvme_rq_map_prp(&rq, &cmd, 0x1000004, 0x200); + nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1000004, 0x200); ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000004); ok1(le64_to_cpu(cmd.dptr.prp2) == 0x0); @@ -105,7 +111,7 @@ int main(void) /* test 4k unaligned */ memset((void *)prplist, 0x0, __VFN_PAGESIZE); - nvme_rq_map_prp(&rq, &cmd, 0x1000004, 0x1000); + nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1000004, 0x1000); ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000004); ok1(le64_to_cpu(cmd.dptr.prp2) == 0x1001000); @@ -128,14 +134,14 @@ int main(void) /* test 4k - 4 unaligned */ memset((void *)prplist, 0x0, __VFN_PAGESIZE); - nvme_rq_map_prp(&rq, &cmd, 0x1000004, 0x1000 - 4); + nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1000004, 0x1000 - 4); ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000004); ok1(le64_to_cpu(cmd.dptr.prp2) == 0x0); /* test 8k unaligned */ memset((void *)prplist, 0x0, __VFN_PAGESIZE); - nvme_rq_map_prp(&rq, &cmd, 0x1000004, 0x2000); + nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1000004, 0x2000); ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000004); ok1(le64_to_cpu(cmd.dptr.prp2) == 0x8000000); @@ -153,14 +159,14 @@ int main(void) /* test 8k - 4 unaligned */ memset((void *)prplist, 0x0, __VFN_PAGESIZE); - nvme_rq_map_prp(&rq, &cmd, 0x1000004, 0x2000 - 4); + nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1000004, 0x2000 - 4); ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000004); ok1(le64_to_cpu(cmd.dptr.prp2) == 0x1001000); /* test 12k unaligned */ memset((void *)prplist, 0x0, __VFN_PAGESIZE); - nvme_rq_map_prp(&rq, &cmd, 0x1000004, 0x3000); + nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1000004, 0x3000); ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000004); ok1(le64_to_cpu(cmd.dptr.prp2) == 0x8000000); @@ -181,7 +187,7 @@ int main(void) /* test 512b aligned 1-iovec */ memset((void *)prplist, 0x0, __VFN_PAGESIZE); iov[0] = (struct iovec) {.iov_base = (void *)0x1000000, .iov_len = 0x200}; - nvme_rq_mapv_prp(&rq, &cmd, iov, 1); + nvme_rq_mapv_prp(&ctrl, &rq, &cmd, iov, 1); ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000000); ok1(le64_to_cpu(cmd.dptr.prp2) == 0x0); @@ -189,7 +195,7 @@ int main(void) /* test 4k aligned 1-iovec */ memset((void *)prplist, 0x0, __VFN_PAGESIZE); iov[0] = (struct iovec) {.iov_base = (void *)0x1000000, .iov_len = 0x1000}; - nvme_rq_mapv_prp(&rq, &cmd, iov, 1); + nvme_rq_mapv_prp(&ctrl, &rq, &cmd, iov, 1); ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000000); ok1(le64_to_cpu(cmd.dptr.prp2) == 0x0); @@ -197,7 +203,7 @@ int main(void) /* test 8k aligned 1-iovec */ memset((void *)prplist, 0x0, __VFN_PAGESIZE); iov[0] = (struct iovec) {.iov_base = (void *)0x1000000, .iov_len = 0x2000}; - nvme_rq_mapv_prp(&rq, &cmd, iov, 1); + nvme_rq_mapv_prp(&ctrl, &rq, &cmd, iov, 1); ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000000); ok1(le64_to_cpu(cmd.dptr.prp2) == 0x1001000); @@ -205,7 +211,7 @@ int main(void) /* test 12k aligned 1-iovec */ memset((void *)prplist, 0x0, __VFN_PAGESIZE); iov[0] = (struct iovec) {.iov_base = (void *)0x1000000, .iov_len = 0x3000}; - nvme_rq_mapv_prp(&rq, &cmd, iov, 1); + nvme_rq_mapv_prp(&ctrl, &rq, &cmd, iov, 1); ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000000); ok1(le64_to_cpu(cmd.dptr.prp2) == 0x8000000); @@ -216,7 +222,7 @@ int main(void) memset((void *)prplist, 0x0, __VFN_PAGESIZE); iov[0] = (struct iovec) {.iov_base = (void *)0x1000000, .iov_len = 0x1000}; iov[1] = (struct iovec) {.iov_base = (void *)0x1001000, .iov_len = 0x1000}; - nvme_rq_mapv_prp(&rq, &cmd, iov, 2); + nvme_rq_mapv_prp(&ctrl, &rq, &cmd, iov, 2); ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000000); ok1(le64_to_cpu(cmd.dptr.prp2) == 0x1001000); @@ -226,7 +232,7 @@ int main(void) iov[0] = (struct iovec) {.iov_base = (void *)0x1000000, .iov_len = 0x1000}; iov[1] = (struct iovec) {.iov_base = (void *)0x1001000, .iov_len = 0x1000}; iov[2] = (struct iovec) {.iov_base = (void *)0x1002000, .iov_len = 0x1000}; - nvme_rq_mapv_prp(&rq, &cmd, iov, 3); + nvme_rq_mapv_prp(&ctrl, &rq, &cmd, iov, 3); ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000000); ok1(le64_to_cpu(cmd.dptr.prp2) == 0x8000000); @@ -237,7 +243,7 @@ int main(void) memset((void *)prplist, 0x0, __VFN_PAGESIZE); iov[0] = (struct iovec) {.iov_base = (void *)0x1000000, .iov_len = 0x1000}; iov[1] = (struct iovec) {.iov_base = (void *)0x1001000, .iov_len = 0x2000}; - nvme_rq_mapv_prp(&rq, &cmd, iov, 3); + nvme_rq_mapv_prp(&ctrl, &rq, &cmd, iov, 3); ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000000); ok1(le64_to_cpu(cmd.dptr.prp2) == 0x8000000); @@ -247,7 +253,7 @@ int main(void) /* test 512b unaligned 1-iovec */ memset((void *)prplist, 0x0, __VFN_PAGESIZE); iov[0] = (struct iovec) {.iov_base = (void *)0x1000004, .iov_len = 0x200}; - nvme_rq_mapv_prp(&rq, &cmd, iov, 1); + nvme_rq_mapv_prp(&ctrl, &rq, &cmd, iov, 1); ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000004); ok1(le64_to_cpu(cmd.dptr.prp2) == 0x0); @@ -255,7 +261,7 @@ int main(void) /* test 4k unaligned 1-iovec */ memset((void *)prplist, 0x0, __VFN_PAGESIZE); iov[0] = (struct iovec) {.iov_base = (void *)0x1000004, .iov_len = 0x1000}; - nvme_rq_mapv_prp(&rq, &cmd, iov, 1); + nvme_rq_mapv_prp(&ctrl, &rq, &cmd, iov, 1); ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000004); ok1(le64_to_cpu(cmd.dptr.prp2) == 0x1001000); @@ -264,7 +270,7 @@ int main(void) memset((void *)prplist, 0x0, __VFN_PAGESIZE); iov[0] = (struct iovec) {.iov_base = (void *)0x1000004, .iov_len = 0x1000 - 4}; iov[1] = (struct iovec) {.iov_base = (void *)0x1001000, .iov_len = 0x1000}; - nvme_rq_mapv_prp(&rq, &cmd, iov, 2); + nvme_rq_mapv_prp(&ctrl, &rq, &cmd, iov, 2); ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000004); ok1(le64_to_cpu(cmd.dptr.prp2) == 0x1001000); @@ -274,7 +280,7 @@ int main(void) iov[0] = (struct iovec) {.iov_base = (void *)0x1000004, .iov_len = 0x1000 - 4}; iov[1] = (struct iovec) {.iov_base = (void *)0x1001000, .iov_len = 0x1000}; iov[2] = (struct iovec) {.iov_base = (void *)0x1002000, .iov_len = 0x1000}; - nvme_rq_mapv_prp(&rq, &cmd, iov, 3); + nvme_rq_mapv_prp(&ctrl, &rq, &cmd, iov, 3); ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000004); ok1(le64_to_cpu(cmd.dptr.prp2) == 0x8000000); @@ -285,7 +291,7 @@ int main(void) memset((void *)prplist, 0x0, __VFN_PAGESIZE); iov[0] = (struct iovec) {.iov_base = (void *)0x1000000, .iov_len = 0x1000}; iov[1] = (struct iovec) {.iov_base = (void *)0x1001000, .iov_len = 0x1000 - 4}; - nvme_rq_mapv_prp(&rq, &cmd, iov, 2); + nvme_rq_mapv_prp(&ctrl, &rq, &cmd, iov, 2); ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000000); ok1(le64_to_cpu(cmd.dptr.prp2) == 0x1001000); @@ -296,17 +302,17 @@ int main(void) */ memset((void *)prplist, 0x0, __VFN_PAGESIZE); - ok1(nvme_rq_map_prp(&rq, &cmd, 0x1000000, (__rq_max_prps + 1) * 0x1000) == -1); + ok1(nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1000000, (__max_prps + 1) * 0x1000) == -1); memset((void *)prplist, 0x0, __VFN_PAGESIZE); iov[0] = (struct iovec) {.iov_base = (void *)0x1000004, .iov_len = 0x1000}; iov[0] = (struct iovec) {.iov_base = (void *)0x1001004, .iov_len = 0x1000}; - ok1(nvme_rq_mapv_prp(&rq, &cmd, iov, 2) == -1); + ok1(nvme_rq_mapv_prp(&ctrl, &rq, &cmd, iov, 2) == -1); memset((void *)prplist, 0x0, __VFN_PAGESIZE); iov[0] = (struct iovec) {.iov_base = (void *)0x1000000, .iov_len = 0x1000}; - iov[1] = (struct iovec) {.iov_base = (void *)0x1001000, .iov_len = __rq_max_prps * 0x1000}; - ok1(nvme_rq_mapv_prp(&rq, &cmd, iov, 2) == -1); + iov[1] = (struct iovec) {.iov_base = (void *)0x1001000, .iov_len = __max_prps * 0x1000}; + ok1(nvme_rq_mapv_prp(&ctrl, &rq, &cmd, iov, 2) == -1); return exit_status(); } diff --git a/src/nvme/types.h b/src/nvme/types.h index c8e775ad..5afd0984 100644 --- a/src/nvme/types.h +++ b/src/nvme/types.h @@ -40,6 +40,8 @@ enum nvme_cap { NVME_CAP_CSS_MASK = 0xff, NVME_CAP_MPSMIN_SHIFT = 48, NVME_CAP_MPSMIN_MASK = 0xf, + NVME_CAP_MPSMAX_SHIFT = 52, + NVME_CAP_MPSMAX_MASK = 0xf, NVME_CAP_CSS_CSI = 1 << 6, NVME_CAP_CSS_ADMIN = 1 << 7, diff --git a/src/nvme/util.c b/src/nvme/util.c index 5c9f79d3..2c9c81ee 100644 --- a/src/nvme/util.c +++ b/src/nvme/util.c @@ -92,7 +92,7 @@ int nvme_sync(struct nvme_ctrl *ctrl, struct nvme_sq *sq, void *sqe, void *buf, return -1; if (buf) { - ret = nvme_rq_map_prp(rq, sqe, iova, len); + ret = nvme_rq_map_prp(ctrl, rq, sqe, iova, len); if (ret) { goto release_rq; }