f088025729
Some IOMMUs allocate memory areas spanning LLD's segment boundary limit. It
forces low level drivers to have a workaround to adjust scatter lists that the
IOMMU builds. We are in the process of making all the IOMMUs respect the
segment boundary limits to remove such work around in LLDs.
SPARC64 IOMMUs were rewritten to use the IOMMU helper functions and the commit
89c94f2f70
made the IOMMUs not allocate memory
areas spanning the segment boundary limit.
However, SPARC64 IOMMUs allocate memory areas first then try to merge them
(while some IOMMUs walk through all the sg entries to see how they can be
merged first and allocate memory areas). So SPARC64 IOMMUs also need the
boundary limit checking when they try to merge sg entries.
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
83 lines
2.1 KiB
C
83 lines
2.1 KiB
C
/* iommu_common.h: UltraSparc SBUS/PCI common iommu declarations.
|
|
*
|
|
* Copyright (C) 1999, 2008 David S. Miller (davem@davemloft.net)
|
|
*/
|
|
|
|
#ifndef _IOMMU_COMMON_H
|
|
#define _IOMMU_COMMON_H
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/types.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/scatterlist.h>
|
|
#include <linux/device.h>
|
|
#include <linux/iommu-helper.h>
|
|
|
|
#include <asm/iommu.h>
|
|
#include <asm/scatterlist.h>
|
|
|
|
/*
|
|
* These give mapping size of each iommu pte/tlb.
|
|
*/
|
|
#define IO_PAGE_SHIFT 13
|
|
#define IO_PAGE_SIZE (1UL << IO_PAGE_SHIFT)
|
|
#define IO_PAGE_MASK (~(IO_PAGE_SIZE-1))
|
|
#define IO_PAGE_ALIGN(addr) (((addr)+IO_PAGE_SIZE-1)&IO_PAGE_MASK)
|
|
|
|
#define IO_TSB_ENTRIES (128*1024)
|
|
#define IO_TSB_SIZE (IO_TSB_ENTRIES * 8)
|
|
|
|
/*
|
|
* This is the hardwired shift in the iotlb tag/data parts.
|
|
*/
|
|
#define IOMMU_PAGE_SHIFT 13
|
|
|
|
#define SG_ENT_PHYS_ADDRESS(SG) (__pa(sg_virt((SG))))
|
|
|
|
static inline unsigned long iommu_num_pages(unsigned long vaddr,
|
|
unsigned long slen)
|
|
{
|
|
unsigned long npages;
|
|
|
|
npages = IO_PAGE_ALIGN(vaddr + slen) - (vaddr & IO_PAGE_MASK);
|
|
npages >>= IO_PAGE_SHIFT;
|
|
|
|
return npages;
|
|
}
|
|
|
|
static inline unsigned long calc_npages(struct scatterlist *sglist, int nelems)
|
|
{
|
|
unsigned long i, npages = 0;
|
|
struct scatterlist *sg;
|
|
|
|
for_each_sg(sglist, sg, nelems, i) {
|
|
unsigned long paddr = SG_ENT_PHYS_ADDRESS(sg);
|
|
npages += iommu_num_pages(paddr, sg->length);
|
|
}
|
|
|
|
return npages;
|
|
}
|
|
|
|
static inline int is_span_boundary(unsigned long entry,
|
|
unsigned long shift,
|
|
unsigned long boundary_size,
|
|
struct scatterlist *outs,
|
|
struct scatterlist *sg)
|
|
{
|
|
unsigned long paddr = SG_ENT_PHYS_ADDRESS(outs);
|
|
int nr = iommu_num_pages(paddr, outs->dma_length + sg->length);
|
|
|
|
return iommu_is_span_boundary(entry, nr, shift, boundary_size);
|
|
}
|
|
|
|
extern unsigned long iommu_range_alloc(struct device *dev,
|
|
struct iommu *iommu,
|
|
unsigned long npages,
|
|
unsigned long *handle);
|
|
extern void iommu_range_free(struct iommu *iommu,
|
|
dma_addr_t dma_addr,
|
|
unsigned long npages);
|
|
|
|
#endif /* _IOMMU_COMMON_H */
|