On some Zhaoxin platforms, xHCI will prefetch TRB for performance
improvement. However this TRB prefetch mechanism may cross page boundary,
which may access memory not belong to xHCI. In order to fix this issue,
using two pages for TRB allocate and only the first page will be used.
The patch is scheduled to be submitted to the kernel mainline in 2021.
Signed-off-by: LeoLiu-oc <LeoLiu-oc(a)zhaoxin.com>
---
drivers/usb/host/xhci-mem.c | 10 ++++++++--
1 file changed, 8 insertions(+), 2 deletions(-)
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 82ce6d8b708d..9f4e1ee82898 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -2364,6 +2364,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
{
dma_addr_t dma;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+ struct pci_dev *pdev = to_pci_dev(dev);
unsigned int val, val2;
u64 val_64;
u32 page_size, temp;
@@ -2429,8 +2430,13 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
* and our use of dma addresses in the trb_address_map radix tree needs
* TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need.
*/
- xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
- TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
+ /*With xHCI TRB prefetch patch:To fix cross page boundry access issue
in IOV environment*/
+ if ((pdev->vendor == PCI_VENDOR_ID_ZHAOXIN) && (pdev->device == 0x9202
|| pdev->device == 0x9203)) {
+ xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
+ TRB_SEGMENT_SIZE*2, TRB_SEGMENT_SIZE*2, xhci->page_size*2);
+ } else
+ xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
+ TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
/* See Table 46 and Note on Figure 55 */
xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
--
2.20.1
显示某日回复