IO_TLB_SHIFT 55 lib/swiotlb.c #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) IO_TLB_SHIFT 62 lib/swiotlb.c #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) IO_TLB_SHIFT 139 lib/swiotlb.c io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); IO_TLB_SHIFT 143 lib/swiotlb.c bytes = io_tlb_nslabs << IO_TLB_SHIFT; IO_TLB_SHIFT 193 lib/swiotlb.c io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); IO_TLB_SHIFT 200 lib/swiotlb.c order = get_order(io_tlb_nslabs << IO_TLB_SHIFT); IO_TLB_SHIFT 202 lib/swiotlb.c bytes = io_tlb_nslabs << IO_TLB_SHIFT; IO_TLB_SHIFT 219 lib/swiotlb.c bytes = io_tlb_nslabs << IO_TLB_SHIFT; IO_TLB_SHIFT 305 lib/swiotlb.c offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; IO_TLB_SHIFT 307 lib/swiotlb.c ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT IO_TLB_SHIFT 308 lib/swiotlb.c : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); IO_TLB_SHIFT 314 lib/swiotlb.c nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; IO_TLB_SHIFT 316 lib/swiotlb.c stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT)); IO_TLB_SHIFT 354 lib/swiotlb.c dma_addr = io_tlb_start + (index << IO_TLB_SHIFT); IO_TLB_SHIFT 382 lib/swiotlb.c io_tlb_orig_addr[index+i] = buffer + (i << IO_TLB_SHIFT); IO_TLB_SHIFT 396 lib/swiotlb.c int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; IO_TLB_SHIFT 397 lib/swiotlb.c int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; IO_TLB_SHIFT 440 lib/swiotlb.c int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; IO_TLB_SHIFT 443 lib/swiotlb.c buffer += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));