Skip to content

Commit

Permalink
i386: Always bounce DMA requests above 4G for !PAE kernels
Browse files Browse the repository at this point in the history
i386 kernels without 'options PAE' will still use PAE page tables if
the CPU supports PAE both to support larger amounts of RAM and for
PG_NX permissions.  However, to avoid changing the API, bus_addr_t and
related constants (e.g. BUS_SPACE_MAXADDR) are still limited to
32 bits.

To cope with this, the x86 bus_dma code included an extra check to
bounce requests for addresses above BUS_SPACE_MAXADDR.  This check was
elided (probably because it looks always-true on its face and had no
comment explaining its purpose) in recent refactoring.  To fix,
restore a custom address-validation function for i386 kernels without
options PAE that includes this check.

Reported by:	ci.freebsd.org
Reviewed by:	markj
Fixes:		3933ff5 busdma: tidy bus_dma_run_filter() functions
Differential Revision:	https://reviews.freebsd.org/D43277
  • Loading branch information
bsdjhb committed Mar 13, 2024
2 parents a4e1f40 + 67b0b90 commit c3f0a82
Showing 1 changed file with 23 additions and 6 deletions.
29 changes: 23 additions & 6 deletions sys/x86/x86/busdma_bounce.c
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,23 @@ static MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata");

#include "../../kern/subr_busdma_bounce.c"

/*
* On i386 kernels without 'options PAE' we need to also bounce any
* physical addresses above 4G.
*
* NB: vm_paddr_t is required here since bus_addr_t is only 32 bits in
* i386 kernels without 'options PAE'.
*/
static __inline bool
must_bounce(bus_dma_tag_t dmat, vm_paddr_t paddr)
{
#if defined(__i386__) && !defined(PAE)
if (paddr > BUS_SPACE_MAXADDR)
return (true);
#endif
return (addr_needs_bounce(dmat, paddr));
}

static int
bounce_bus_dma_zone_setup(bus_dma_tag_t dmat)
{
Expand Down Expand Up @@ -491,7 +508,7 @@ _bus_dmamap_pagesneeded(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen,
curaddr = buf;
while (buflen != 0) {
sgsize = MIN(buflen, dmat->common.maxsegsz);
if (addr_needs_bounce(dmat, curaddr)) {
if (must_bounce(dmat, curaddr)) {
sgsize = MIN(sgsize,
PAGE_SIZE - (curaddr & PAGE_MASK));
if (pagesneeded == NULL)
Expand Down Expand Up @@ -547,7 +564,7 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
paddr = pmap_kextract(vaddr);
else
paddr = pmap_extract(pmap, vaddr);
if (addr_needs_bounce(dmat, paddr)) {
if (must_bounce(dmat, paddr)) {
sg_len = roundup2(sg_len,
dmat->common.alignment);
map->pagesneeded++;
Expand Down Expand Up @@ -584,7 +601,7 @@ _bus_dmamap_count_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma,
sg_len = PAGE_SIZE - ma_offs;
max_sgsize = MIN(buflen, dmat->common.maxsegsz);
sg_len = MIN(sg_len, max_sgsize);
if (addr_needs_bounce(dmat, paddr)) {
if (must_bounce(dmat, paddr)) {
sg_len = roundup2(sg_len,
dmat->common.alignment);
sg_len = MIN(sg_len, max_sgsize);
Expand Down Expand Up @@ -685,7 +702,7 @@ bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
sgsize = MIN(buflen, dmat->common.maxsegsz);
if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0 &&
map->pagesneeded != 0 &&
addr_needs_bounce(dmat, curaddr)) {
must_bounce(dmat, curaddr)) {
sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
curaddr = add_bounce_page(dmat, map, 0, curaddr, 0,
sgsize);
Expand Down Expand Up @@ -753,7 +770,7 @@ bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
sgsize = PAGE_SIZE - (curaddr & PAGE_MASK);
if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0 &&
map->pagesneeded != 0 &&
addr_needs_bounce(dmat, curaddr)) {
must_bounce(dmat, curaddr)) {
sgsize = roundup2(sgsize, dmat->common.alignment);
sgsize = MIN(sgsize, max_sgsize);
curaddr = add_bounce_page(dmat, map, kvaddr, curaddr, 0,
Expand Down Expand Up @@ -820,7 +837,7 @@ bounce_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
sgsize = PAGE_SIZE - ma_offs;
if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0 &&
map->pagesneeded != 0 &&
addr_needs_bounce(dmat, paddr)) {
must_bounce(dmat, paddr)) {
sgsize = roundup2(sgsize, dmat->common.alignment);
sgsize = MIN(sgsize, max_sgsize);
KASSERT(vm_addr_align_ok(sgsize,
Expand Down

0 comments on commit c3f0a82

Please sign in to comment.