The swiotlb is required when programming a DMA address on ARM when a device is not protected by an IOMMU. In this case, the DMA address should always be equal to the machine address. For DOM0 memory, Xen ensure it by have an identity mapping between the guest address and host address. However, when mapping a foreign grant reference, the 1:1 model doesn't work. For ARM guest, most of the callers of pfn_to_mfn expects to get a GFN (Guest Frame Number), i.e a PFN (Page Frame Number) from the Linux point of view given that all ARM guest are auto-translated. Even though the name pfn_to_mfn is misleading, we need to ensure that those caller get a GFN and not by mistake a MFN. In pratical, I haven't seen error related to this but we should fix it for the sake of correctness. In order to fix the implementation of pfn_to_mfn on ARM in a follow-up patch, we have to introduce new helpers to return the DMA from a PFN and the invert. On x86, the new helpers will be an alias of pfn_to_mfn and mfn_to_pfn. The helpers will be used in swiotlb and xen_biovec_phys_mergeable. This is necessary in the latter because we have to ensure that the biovec code will not try to merge a biovec using foreign page and another using Linux memory. Lastly, the helper mfn_to_local_pfn has been renamed to bfn_to_local_pfn given that the only usage was in swiotlb. Signed-off-by: Julien Grall <julien.grall@citrix.com> Reviewed-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Signed-off-by: David Vrabel <david.vrabel@citrix.com>
122 lines
2.9 KiB
C
122 lines
2.9 KiB
C
#ifndef _ASM_ARM_XEN_PAGE_H
|
|
#define _ASM_ARM_XEN_PAGE_H
|
|
|
|
#include <asm/page.h>
|
|
#include <asm/pgtable.h>
|
|
|
|
#include <linux/pfn.h>
|
|
#include <linux/types.h>
|
|
#include <linux/dma-mapping.h>
|
|
|
|
#include <xen/xen.h>
|
|
#include <xen/interface/grant_table.h>
|
|
|
|
#define phys_to_machine_mapping_valid(pfn) (1)
|
|
|
|
#define pte_mfn pte_pfn
|
|
#define mfn_pte pfn_pte
|
|
|
|
/* Xen machine address */
|
|
typedef struct xmaddr {
|
|
phys_addr_t maddr;
|
|
} xmaddr_t;
|
|
|
|
/* Xen pseudo-physical address */
|
|
typedef struct xpaddr {
|
|
phys_addr_t paddr;
|
|
} xpaddr_t;
|
|
|
|
#define XMADDR(x) ((xmaddr_t) { .maddr = (x) })
|
|
#define XPADDR(x) ((xpaddr_t) { .paddr = (x) })
|
|
|
|
#define INVALID_P2M_ENTRY (~0UL)
|
|
|
|
unsigned long __pfn_to_mfn(unsigned long pfn);
|
|
extern struct rb_root phys_to_mach;
|
|
|
|
static inline unsigned long pfn_to_mfn(unsigned long pfn)
|
|
{
|
|
unsigned long mfn;
|
|
|
|
if (phys_to_mach.rb_node != NULL) {
|
|
mfn = __pfn_to_mfn(pfn);
|
|
if (mfn != INVALID_P2M_ENTRY)
|
|
return mfn;
|
|
}
|
|
|
|
return pfn;
|
|
}
|
|
|
|
static inline unsigned long mfn_to_pfn(unsigned long mfn)
|
|
{
|
|
return mfn;
|
|
}
|
|
|
|
/* Pseudo-physical <-> BUS conversion */
|
|
static inline unsigned long pfn_to_bfn(unsigned long pfn)
|
|
{
|
|
unsigned long mfn;
|
|
|
|
if (phys_to_mach.rb_node != NULL) {
|
|
mfn = __pfn_to_mfn(pfn);
|
|
if (mfn != INVALID_P2M_ENTRY)
|
|
return mfn;
|
|
}
|
|
|
|
return pfn;
|
|
}
|
|
|
|
static inline unsigned long bfn_to_pfn(unsigned long bfn)
|
|
{
|
|
return bfn;
|
|
}
|
|
|
|
#define bfn_to_local_pfn(bfn) bfn_to_pfn(bfn)
|
|
|
|
/* VIRT <-> MACHINE conversion */
|
|
#define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v)))
|
|
#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
|
|
|
|
/* Only used in PV code. But ARM guests are always HVM. */
|
|
static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr)
|
|
{
|
|
BUG();
|
|
}
|
|
|
|
/* TODO: this shouldn't be here but it is because the frontend drivers
|
|
* are using it (its rolled in headers) even though we won't hit the code path.
|
|
* So for right now just punt with this.
|
|
*/
|
|
static inline pte_t *lookup_address(unsigned long address, unsigned int *level)
|
|
{
|
|
BUG();
|
|
return NULL;
|
|
}
|
|
|
|
extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
|
|
struct gnttab_map_grant_ref *kmap_ops,
|
|
struct page **pages, unsigned int count);
|
|
|
|
extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
|
|
struct gnttab_unmap_grant_ref *kunmap_ops,
|
|
struct page **pages, unsigned int count);
|
|
|
|
bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
|
|
bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn,
|
|
unsigned long nr_pages);
|
|
|
|
static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
|
|
{
|
|
return __set_phys_to_machine(pfn, mfn);
|
|
}
|
|
|
|
#define xen_remap(cookie, size) ioremap_cache((cookie), (size))
|
|
#define xen_unmap(cookie) iounmap((cookie))
|
|
|
|
bool xen_arch_need_swiotlb(struct device *dev,
|
|
unsigned long pfn,
|
|
unsigned long bfn);
|
|
unsigned long xen_get_swiotlb_free_pages(unsigned int order);
|
|
|
|
#endif /* _ASM_ARM_XEN_PAGE_H */
|