1
0

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-xen-next

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-xen-next: (52 commits)
  xen: add balloon driver
  xen: allow compilation with non-flat memory
  xen: fold xen_sysexit into xen_iret
  xen: allow set_pte_at on init_mm to be lockless
  xen: disable preemption during tlb flush
  xen pvfb: Para-virtual framebuffer, keyboard and pointer driver
  xen: Add compatibility aliases for frontend drivers
  xen: Module autoprobing support for frontend drivers
  xen blkfront: Delay wait for block devices until after the disk is added
  xen/blkfront: use bdget_disk
  xen: Make xen-blkfront write its protocol ABI to xenstore
  xen: import arch generic part of xencomm
  xen: make grant table arch portable
  xen: replace callers of alloc_vm_area()/free_vm_area() with xen_ prefixed one
  xen: make include/xen/page.h portable moving those definitions under asm dir
  xen: add resend_irq_on_evtchn() definition into events.c
  Xen: make events.c portable for ia64/xen support
  xen: move events.c to drivers/xen for IA64/Xen support
  xen: move features.c from arch/x86/xen/features.c to drivers/xen
  xen: add missing definitions in include/xen/interface/vcpu.h which ia64/xen needs
  ...
This commit is contained in:
Linus Torvalds
2008-04-25 12:32:10 -07:00
67 changed files with 3607 additions and 921 deletions

View File

@@ -220,11 +220,13 @@ struct pv_mmu_ops {
unsigned long va);
/* Hooks for allocating/releasing pagetable pages */
void (*alloc_pt)(struct mm_struct *mm, u32 pfn);
void (*alloc_pd)(struct mm_struct *mm, u32 pfn);
void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
void (*release_pt)(u32 pfn);
void (*release_pd)(u32 pfn);
void (*alloc_pte)(struct mm_struct *mm, u32 pfn);
void (*alloc_pmd)(struct mm_struct *mm, u32 pfn);
void (*alloc_pmd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
void (*alloc_pud)(struct mm_struct *mm, u32 pfn);
void (*release_pte)(u32 pfn);
void (*release_pmd)(u32 pfn);
void (*release_pud)(u32 pfn);
/* Pagetable manipulation functions */
void (*set_pte)(pte_t *ptep, pte_t pteval);
@@ -910,28 +912,37 @@ static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va);
}
static inline void paravirt_alloc_pt(struct mm_struct *mm, unsigned pfn)
static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned pfn)
{
PVOP_VCALL2(pv_mmu_ops.alloc_pt, mm, pfn);
PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
}
static inline void paravirt_release_pt(unsigned pfn)
static inline void paravirt_release_pte(unsigned pfn)
{
PVOP_VCALL1(pv_mmu_ops.release_pt, pfn);
PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
}
static inline void paravirt_alloc_pd(struct mm_struct *mm, unsigned pfn)
static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned pfn)
{
PVOP_VCALL2(pv_mmu_ops.alloc_pd, mm, pfn);
PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
}
static inline void paravirt_alloc_pd_clone(unsigned pfn, unsigned clonepfn,
unsigned start, unsigned count)
static inline void paravirt_alloc_pmd_clone(unsigned pfn, unsigned clonepfn,
unsigned start, unsigned count)
{
PVOP_VCALL4(pv_mmu_ops.alloc_pd_clone, pfn, clonepfn, start, count);
PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
}
static inline void paravirt_release_pd(unsigned pfn)
static inline void paravirt_release_pmd(unsigned pfn)
{
PVOP_VCALL1(pv_mmu_ops.release_pd, pfn);
PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
}
static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned pfn)
{
PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
}
static inline void paravirt_release_pud(unsigned pfn)
{
PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
}
#ifdef CONFIG_HIGHPTE

View File

@@ -1,5 +1,110 @@
#ifdef CONFIG_X86_32
# include "pgalloc_32.h"
#ifndef _ASM_X86_PGALLOC_H
#define _ASM_X86_PGALLOC_H
#include <linux/threads.h>
#include <linux/mm.h> /* for struct page */
#include <linux/pagemap.h>
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
# include "pgalloc_64.h"
static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {}
static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {}
static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
unsigned long start, unsigned long count) {}
static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) {}
static inline void paravirt_release_pte(unsigned long pfn) {}
static inline void paravirt_release_pmd(unsigned long pfn) {}
static inline void paravirt_release_pud(unsigned long pfn) {}
#endif
/*
* Allocate and free page tables.
*/
extern pgd_t *pgd_alloc(struct mm_struct *);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
/* Should really implement gc for free page table pages. This could be
done with a reference count in struct page. */
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
free_page((unsigned long)pte);
}
static inline void pte_free(struct mm_struct *mm, struct page *pte)
{
__free_page(pte);
}
extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
static inline void pmd_populate_kernel(struct mm_struct *mm,
pmd_t *pmd, pte_t *pte)
{
paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
}
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
struct page *pte)
{
unsigned long pfn = page_to_pfn(pte);
paravirt_alloc_pte(mm, pfn);
set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
}
#define pmd_pgtable(pmd) pmd_page(pmd)
#if PAGETABLE_LEVELS > 2
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
free_page((unsigned long)pmd);
}
extern void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
#ifdef CONFIG_X86_PAE
extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
#else /* !CONFIG_X86_PAE */
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
}
#endif /* CONFIG_X86_PAE */
#if PAGETABLE_LEVELS > 3
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
{
paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
}
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
}
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
{
BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
free_page((unsigned long)pud);
}
extern void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
#endif /* PAGETABLE_LEVELS > 3 */
#endif /* PAGETABLE_LEVELS > 2 */
#endif /* _ASM_X86_PGALLOC_H */

View File

@@ -1,95 +0,0 @@
#ifndef _I386_PGALLOC_H
#define _I386_PGALLOC_H
#include <linux/threads.h>
#include <linux/mm.h> /* for struct page */
#include <linux/pagemap.h>
#include <asm/tlb.h>
#include <asm-generic/tlb.h>
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
#define paravirt_alloc_pt(mm, pfn) do { } while (0)
#define paravirt_alloc_pd(mm, pfn) do { } while (0)
#define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) do { } while (0)
#define paravirt_release_pt(pfn) do { } while (0)
#define paravirt_release_pd(pfn) do { } while (0)
#endif
static inline void pmd_populate_kernel(struct mm_struct *mm,
pmd_t *pmd, pte_t *pte)
{
paravirt_alloc_pt(mm, __pa(pte) >> PAGE_SHIFT);
set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
}
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
{
unsigned long pfn = page_to_pfn(pte);
paravirt_alloc_pt(mm, pfn);
set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
}
#define pmd_pgtable(pmd) pmd_page(pmd)
/*
* Allocate and free page tables.
*/
extern pgd_t *pgd_alloc(struct mm_struct *);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}
static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
{
pgtable_page_dtor(pte);
__free_page(pte);
}
extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
#ifdef CONFIG_X86_PAE
/*
* In the PAE case we free the pmds as part of the pgd.
*/
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
free_page((unsigned long)pmd);
}
extern void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
{
paravirt_alloc_pd(mm, __pa(pmd) >> PAGE_SHIFT);
/* Note: almost everything apart from _PAGE_PRESENT is
reserved at the pmd (PDPT) level. */
set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
/*
* According to Intel App note "TLBs, Paging-Structure Caches,
* and Their Invalidation", April 2007, document 317080-001,
* section 8.1: in PAE mode we explicitly have to flush the
* TLB via cr3 if the top-level pgd is changed...
*/
if (mm == current->active_mm)
write_cr3(read_cr3());
}
#endif /* CONFIG_X86_PAE */
#endif /* _I386_PGALLOC_H */

View File

@@ -1,133 +0,0 @@
#ifndef _X86_64_PGALLOC_H
#define _X86_64_PGALLOC_H
#include <asm/pda.h>
#include <linux/threads.h>
#include <linux/mm.h>
#define pmd_populate_kernel(mm, pmd, pte) \
set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)))
#define pud_populate(mm, pud, pmd) \
set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)))
#define pgd_populate(mm, pgd, pud) \
set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)))
#define pmd_pgtable(pmd) pmd_page(pmd)
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
{
set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
free_page((unsigned long)pmd);
}
static inline pmd_t *pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
{
return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
}
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
}
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
{
BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
free_page((unsigned long)pud);
}
static inline void pgd_list_add(pgd_t *pgd)
{
struct page *page = virt_to_page(pgd);
unsigned long flags;
spin_lock_irqsave(&pgd_lock, flags);
list_add(&page->lru, &pgd_list);
spin_unlock_irqrestore(&pgd_lock, flags);
}
static inline void pgd_list_del(pgd_t *pgd)
{
struct page *page = virt_to_page(pgd);
unsigned long flags;
spin_lock_irqsave(&pgd_lock, flags);
list_del(&page->lru);
spin_unlock_irqrestore(&pgd_lock, flags);
}
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
unsigned boundary;
pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (!pgd)
return NULL;
pgd_list_add(pgd);
/*
* Copy kernel pointers in from init.
* Could keep a freelist or slab cache of those because the kernel
* part never changes.
*/
boundary = pgd_index(__PAGE_OFFSET);
memset(pgd, 0, boundary * sizeof(pgd_t));
memcpy(pgd + boundary,
init_level4_pgt + boundary,
(PTRS_PER_PGD - boundary) * sizeof(pgd_t));
return pgd;
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
pgd_list_del(pgd);
free_page((unsigned long)pgd);
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
return (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *page;
void *p;
p = (void *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
if (!p)
return NULL;
page = virt_to_page(p);
pgtable_page_ctor(page);
return page;
}
/* Should really implement gc for free page table pages. This could be
done with a reference count in struct page. */
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
free_page((unsigned long)pte);
}
static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
{
pgtable_page_dtor(pte);
__free_page(pte);
}
#define __pte_free_tlb(tlb,pte) \
do { \
pgtable_page_dtor((pte)); \
tlb_remove_page((tlb), (pte)); \
} while (0)
#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
#define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
#endif /* _X86_64_PGALLOC_H */

View File

@@ -1,7 +1,6 @@
#ifndef _ASM_X86_PGTABLE_H
#define _ASM_X86_PGTABLE_H
#define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1)
#define FIRST_USER_ADDRESS 0
#define _PAGE_BIT_PRESENT 0 /* is present */
@@ -330,6 +329,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
# include "pgtable_64.h"
#endif
#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
#ifndef __ASSEMBLY__
enum {
@@ -389,37 +391,17 @@ static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
* bit at the same time.
*/
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
({ \
int __changed = !pte_same(*(ptep), entry); \
if (__changed && dirty) { \
*ptep = entry; \
pte_update_defer((vma)->vm_mm, (address), (ptep)); \
flush_tlb_page(vma, address); \
} \
__changed; \
})
extern int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
pte_t entry, int dirty);
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define ptep_test_and_clear_young(vma, addr, ptep) ({ \
int __ret = 0; \
if (pte_young(*(ptep))) \
__ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, \
&(ptep)->pte); \
if (__ret) \
pte_update((vma)->vm_mm, addr, ptep); \
__ret; \
})
extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep);
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
#define ptep_clear_flush_young(vma, address, ptep) \
({ \
int __young; \
__young = ptep_test_and_clear_young((vma), (address), (ptep)); \
if (__young) \
flush_tlb_page(vma, address); \
__young; \
})
extern int ptep_clear_flush_young(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep);
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
@@ -456,6 +438,22 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
pte_update(mm, addr, ptep);
}
/*
* clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
*
* dst - pointer to pgd range anwhere on a pgd page
* src - ""
* count - the number of pgds to copy.
*
* dst and src can be on the same page, but the range must not overlap,
* and must not cross a page boundary.
*/
static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
{
memcpy(dst, src, count * sizeof(pgd_t));
}
#include <asm-generic/pgtable.h>
#endif /* __ASSEMBLY__ */

View File

@@ -48,9 +48,6 @@ void paging_init(void);
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
/* Just any arbitrary offset to the start of the vmalloc VM area: the
* current 8MB value just means that there will be a 8MB "hole" after the
* physical memory until the kernel virtual memory starts. That means that
@@ -108,21 +105,6 @@ extern int pmd_bad(pmd_t pmd);
# include <asm/pgtable-2level.h>
#endif
/*
* clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
*
* dst - pointer to pgd range anwhere on a pgd page
* src - ""
* count - the number of pgds to copy.
*
* dst and src can be on the same page, but the range must not overlap,
* and must not cross a page boundary.
*/
static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
{
memcpy(dst, src, count * sizeof(pgd_t));
}
/*
* Macro to mark a page protection value as "uncacheable".
* On processors which do not support it, this is a no-op.

View File

@@ -24,7 +24,7 @@ extern void paging_init(void);
#endif /* !__ASSEMBLY__ */
#define SHARED_KERNEL_PMD 1
#define SHARED_KERNEL_PMD 0
/*
* PGDIR_SHIFT determines what a top-level page table entry can map

View File

@@ -0,0 +1,22 @@
#ifndef __XEN_EVENTS_H
#define __XEN_EVENTS_H
enum ipi_vector {
XEN_RESCHEDULE_VECTOR,
XEN_CALL_FUNCTION_VECTOR,
XEN_NR_IPIS,
};
static inline int xen_irqs_disabled(struct pt_regs *regs)
{
return raw_irqs_disabled_flags(regs->flags);
}
static inline void xen_do_IRQ(int irq, struct pt_regs *regs)
{
regs->orig_ax = ~irq;
do_IRQ(regs);
}
#endif /* __XEN_EVENTS_H */

View File

@@ -0,0 +1,7 @@
#ifndef __XEN_GRANT_TABLE_H
#define __XEN_GRANT_TABLE_H
#define xen_alloc_vm_area(size) alloc_vm_area(size)
#define xen_free_vm_area(area) free_vm_area(area)
#endif /* __XEN_GRANT_TABLE_H */

View File

@@ -163,6 +163,12 @@ HYPERVISOR_set_callbacks(unsigned long event_selector,
failsafe_selector, failsafe_address);
}
static inline int
HYPERVISOR_callback_op(int cmd, void *arg)
{
return _hypercall2(int, callback_op, cmd, arg);
}
static inline int
HYPERVISOR_fpu_taskswitch(int set)
{

View File

@@ -22,6 +22,30 @@
#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
#define GUEST_HANDLE(name) __guest_handle_ ## name
#ifdef __XEN__
#if defined(__i386__)
#define set_xen_guest_handle(hnd, val) \
do { \
if (sizeof(hnd) == 8) \
*(uint64_t *)&(hnd) = 0; \
(hnd).p = val; \
} while (0)
#elif defined(__x86_64__)
#define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0)
#endif
#else
#if defined(__i386__)
#define set_xen_guest_handle(hnd, val) \
do { \
if (sizeof(hnd) == 8) \
*(uint64_t *)&(hnd) = 0; \
(hnd) = val; \
} while (0)
#elif defined(__x86_64__)
#define set_xen_guest_handle(hnd, val) do { (hnd) = val; } while (0)
#endif
#endif
#ifndef __ASSEMBLY__
/* Guest handles for primitive C types. */
__DEFINE_GUEST_HANDLE(uchar, unsigned char);
@@ -171,6 +195,10 @@ struct arch_vcpu_info {
unsigned long pad[5]; /* sizeof(struct vcpu_info) == 64 */
};
struct xen_callback {
unsigned long cs;
unsigned long eip;
};
#endif /* !__ASSEMBLY__ */
/*

168
include/asm-x86/xen/page.h Normal file
View File

@@ -0,0 +1,168 @@
#ifndef __XEN_PAGE_H
#define __XEN_PAGE_H
#include <linux/pfn.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <xen/features.h>
/* Xen machine address */
typedef struct xmaddr {
phys_addr_t maddr;
} xmaddr_t;
/* Xen pseudo-physical address */
typedef struct xpaddr {
phys_addr_t paddr;
} xpaddr_t;
#define XMADDR(x) ((xmaddr_t) { .maddr = (x) })
#define XPADDR(x) ((xpaddr_t) { .paddr = (x) })
/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
#define INVALID_P2M_ENTRY (~0UL)
#define FOREIGN_FRAME_BIT (1UL<<31)
#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
extern unsigned long *phys_to_machine_mapping;
static inline unsigned long pfn_to_mfn(unsigned long pfn)
{
if (xen_feature(XENFEAT_auto_translated_physmap))
return pfn;
return phys_to_machine_mapping[(unsigned int)(pfn)] &
~FOREIGN_FRAME_BIT;
}
static inline int phys_to_machine_mapping_valid(unsigned long pfn)
{
if (xen_feature(XENFEAT_auto_translated_physmap))
return 1;
return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY);
}
static inline unsigned long mfn_to_pfn(unsigned long mfn)
{
unsigned long pfn;
if (xen_feature(XENFEAT_auto_translated_physmap))
return mfn;
#if 0
if (unlikely((mfn >> machine_to_phys_order) != 0))
return max_mapnr;
#endif
pfn = 0;
/*
* The array access can fail (e.g., device space beyond end of RAM).
* In such cases it doesn't matter what we return (we return garbage),
* but we must handle the fault without crashing!
*/
__get_user(pfn, &machine_to_phys_mapping[mfn]);
return pfn;
}
static inline xmaddr_t phys_to_machine(xpaddr_t phys)
{
unsigned offset = phys.paddr & ~PAGE_MASK;
return XMADDR(PFN_PHYS((u64)pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset);
}
static inline xpaddr_t machine_to_phys(xmaddr_t machine)
{
unsigned offset = machine.maddr & ~PAGE_MASK;
return XPADDR(PFN_PHYS((u64)mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset);
}
/*
* We detect special mappings in one of two ways:
* 1. If the MFN is an I/O page then Xen will set the m2p entry
* to be outside our maximum possible pseudophys range.
* 2. If the MFN belongs to a different domain then we will certainly
* not have MFN in our p2m table. Conversely, if the page is ours,
* then we'll have p2m(m2p(MFN))==MFN.
* If we detect a special mapping then it doesn't have a 'struct page'.
* We force !pfn_valid() by returning an out-of-range pointer.
*
* NB. These checks require that, for any MFN that is not in our reservation,
* there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
* we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
* Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
*
* NB2. When deliberately mapping foreign pages into the p2m table, you *must*
* use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
* require. In all the cases we care about, the FOREIGN_FRAME bit is
* masked (e.g., pfn_to_mfn()) so behaviour there is correct.
*/
static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
{
extern unsigned long max_mapnr;
unsigned long pfn = mfn_to_pfn(mfn);
if ((pfn < max_mapnr)
&& !xen_feature(XENFEAT_auto_translated_physmap)
&& (phys_to_machine_mapping[pfn] != mfn))
return max_mapnr; /* force !pfn_valid() */
return pfn;
}
static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
if (xen_feature(XENFEAT_auto_translated_physmap)) {
BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
return;
}
phys_to_machine_mapping[pfn] = mfn;
}
/* VIRT <-> MACHINE conversion */
#define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v))))
#define virt_to_mfn(v) (pfn_to_mfn(PFN_DOWN(__pa(v))))
#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
static inline unsigned long pte_mfn(pte_t pte)
{
return (pte.pte & ~_PAGE_NX) >> PAGE_SHIFT;
}
static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot)
{
pte_t pte;
pte.pte = ((phys_addr_t)page_nr << PAGE_SHIFT) |
(pgprot_val(pgprot) & __supported_pte_mask);
return pte;
}
static inline pteval_t pte_val_ma(pte_t pte)
{
return pte.pte;
}
static inline pte_t __pte_ma(pteval_t x)
{
return (pte_t) { .pte = x };
}
#ifdef CONFIG_X86_PAE
#define pmd_val_ma(v) ((v).pmd)
#define pud_val_ma(v) ((v).pgd.pgd)
#define __pmd_ma(x) ((pmd_t) { (x) } )
#else /* !X86_PAE */
#define pmd_val_ma(v) ((v).pud.pgd.pgd)
#endif /* CONFIG_X86_PAE */
#define pgd_val_ma(x) ((x).pgd)
xmaddr_t arbitrary_virt_to_machine(unsigned long address);
void make_lowmem_page_readonly(void *vaddr);
void make_lowmem_page_readwrite(void *vaddr);
#endif /* __XEN_PAGE_H */

61
include/xen/balloon.h Normal file
View File

@@ -0,0 +1,61 @@
/******************************************************************************
* balloon.h
*
* Xen balloon driver - enables returning/claiming memory to/from Xen.
*
* Copyright (c) 2003, B Dragovic
* Copyright (c) 2003-2004, M Williamson, K Fraser
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation; or, when distributed
* separately from the Linux kernel or incorporated into other
* software packages, subject to the following license:
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this source file (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef __XEN_BALLOON_H__
#define __XEN_BALLOON_H__
#include <linux/spinlock.h>
#if 0
/*
* Inform the balloon driver that it should allow some slop for device-driver
* memory activities.
*/
void balloon_update_driver_allowance(long delta);
/* Allocate/free a set of empty pages in low memory (i.e., no RAM mapped). */
struct page **alloc_empty_pages_and_pagevec(int nr_pages);
void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages);
void balloon_release_driver_page(struct page *page);
/*
* Prevent the balloon driver from changing the memory reservation during
* a driver critical region.
*/
extern spinlock_t balloon_lock;
#define balloon_lock(__flags) spin_lock_irqsave(&balloon_lock, __flags)
#define balloon_unlock(__flags) spin_unlock_irqrestore(&balloon_lock, __flags)
#endif
#endif /* __XEN_BALLOON_H__ */

View File

@@ -5,13 +5,7 @@
#include <xen/interface/event_channel.h>
#include <asm/xen/hypercall.h>
enum ipi_vector {
XEN_RESCHEDULE_VECTOR,
XEN_CALL_FUNCTION_VECTOR,
XEN_NR_IPIS,
};
#include <asm/xen/events.h>
int bind_evtchn_to_irq(unsigned int evtchn);
int bind_evtchn_to_irqhandler(unsigned int evtchn,
@@ -37,6 +31,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
void unbind_from_irqhandler(unsigned int irq, void *dev_id);
void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector);
int resend_irq_on_evtchn(unsigned int irq);
static inline void notify_remote_via_evtchn(int port)
{

View File

@@ -39,6 +39,7 @@
#include <asm/xen/hypervisor.h>
#include <xen/interface/grant_table.h>
#include <asm/xen/grant_table.h>
/* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */
#define NR_GRANT_FRAMES 4
@@ -102,6 +103,12 @@ void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
unsigned long pfn);
int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
unsigned long max_nr_gframes,
struct grant_entry **__shared);
void arch_gnttab_unmap_shared(struct grant_entry *shared,
unsigned long nr_gframes);
#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
#endif /* __ASM_GNTTAB_H__ */

View File

@@ -0,0 +1,102 @@
/******************************************************************************
* callback.h
*
* Register guest OS callbacks with Xen.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (c) 2006, Ian Campbell
*/
#ifndef __XEN_PUBLIC_CALLBACK_H__
#define __XEN_PUBLIC_CALLBACK_H__
#include "xen.h"
/*
* Prototype for this hypercall is:
* long callback_op(int cmd, void *extra_args)
* @cmd == CALLBACKOP_??? (callback operation).
* @extra_args == Operation-specific extra arguments (NULL if none).
*/
/* ia64, x86: Callback for event delivery. */
#define CALLBACKTYPE_event 0
/* x86: Failsafe callback when guest state cannot be restored by Xen. */
#define CALLBACKTYPE_failsafe 1
/* x86/64 hypervisor: Syscall by 64-bit guest app ('64-on-64-on-64'). */
#define CALLBACKTYPE_syscall 2
/*
* x86/32 hypervisor: Only available on x86/32 when supervisor_mode_kernel
* feature is enabled. Do not use this callback type in new code.
*/
#define CALLBACKTYPE_sysenter_deprecated 3
/* x86: Callback for NMI delivery. */
#define CALLBACKTYPE_nmi 4
/*
* x86: sysenter is only available as follows:
* - 32-bit hypervisor: with the supervisor_mode_kernel feature enabled
* - 64-bit hypervisor: 32-bit guest applications on Intel CPUs
* ('32-on-32-on-64', '32-on-64-on-64')
* [nb. also 64-bit guest applications on Intel CPUs
* ('64-on-64-on-64'), but syscall is preferred]
*/
#define CALLBACKTYPE_sysenter 5
/*
* x86/64 hypervisor: Syscall by 32-bit guest app on AMD CPUs
* ('32-on-32-on-64', '32-on-64-on-64')
*/
#define CALLBACKTYPE_syscall32 7
/*
* Disable event deliver during callback? This flag is ignored for event and
* NMI callbacks: event delivery is unconditionally disabled.
*/
#define _CALLBACKF_mask_events 0
#define CALLBACKF_mask_events (1U << _CALLBACKF_mask_events)
/*
* Register a callback.
*/
#define CALLBACKOP_register 0
struct callback_register {
uint16_t type;
uint16_t flags;
struct xen_callback address;
};
/*
* Unregister a callback.
*
* Not all callbacks can be unregistered. -EINVAL will be returned if
* you attempt to unregister such a callback.
*/
#define CALLBACKOP_unregister 1
struct callback_unregister {
uint16_t type;
uint16_t _unused;
};
#endif /* __XEN_PUBLIC_CALLBACK_H__ */

View File

@@ -185,6 +185,7 @@ struct gnttab_map_grant_ref {
grant_handle_t handle;
uint64_t dev_bus_addr;
};
DEFINE_GUEST_HANDLE_STRUCT(gnttab_map_grant_ref);
/*
* GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings
@@ -206,6 +207,7 @@ struct gnttab_unmap_grant_ref {
/* OUT parameters. */
int16_t status; /* GNTST_* */
};
DEFINE_GUEST_HANDLE_STRUCT(gnttab_unmap_grant_ref);
/*
* GNTTABOP_setup_table: Set up a grant table for <dom> comprising at least
@@ -223,8 +225,9 @@ struct gnttab_setup_table {
uint32_t nr_frames;
/* OUT parameters. */
int16_t status; /* GNTST_* */
ulong *frame_list;
GUEST_HANDLE(ulong) frame_list;
};
DEFINE_GUEST_HANDLE_STRUCT(gnttab_setup_table);
/*
* GNTTABOP_dump_table: Dump the contents of the grant table to the
@@ -237,6 +240,7 @@ struct gnttab_dump_table {
/* OUT parameters. */
int16_t status; /* GNTST_* */
};
DEFINE_GUEST_HANDLE_STRUCT(gnttab_dump_table);
/*
* GNTTABOP_transfer_grant_ref: Transfer <frame> to a foreign domain. The
@@ -255,7 +259,7 @@ struct gnttab_transfer {
/* OUT parameters. */
int16_t status;
};
DEFINE_GUEST_HANDLE_STRUCT(gnttab_transfer);
/*
* GNTTABOP_copy: Hypervisor based copy
@@ -296,6 +300,7 @@ struct gnttab_copy {
/* OUT parameters. */
int16_t status;
};
DEFINE_GUEST_HANDLE_STRUCT(gnttab_copy);
/*
* GNTTABOP_query_size: Query the current and maximum sizes of the shared
@@ -313,7 +318,7 @@ struct gnttab_query_size {
uint32_t max_nr_frames;
int16_t status; /* GNTST_* */
};
DEFINE_GUEST_HANDLE_STRUCT(gnttab_query_size);
/*
* Bitfield values for update_pin_status.flags.

View File

@@ -0,0 +1,124 @@
/*
* fbif.h -- Xen virtual frame buffer device
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com>
* Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
*/
#ifndef __XEN_PUBLIC_IO_FBIF_H__
#define __XEN_PUBLIC_IO_FBIF_H__
/* Out events (frontend -> backend) */
/*
* Out events may be sent only when requested by backend, and receipt
* of an unknown out event is an error.
*/
/* Event type 1 currently not used */
/*
* Framebuffer update notification event
* Capable frontend sets feature-update in xenstore.
* Backend requests it by setting request-update in xenstore.
*/
#define XENFB_TYPE_UPDATE 2
struct xenfb_update {
uint8_t type; /* XENFB_TYPE_UPDATE */
int32_t x; /* source x */
int32_t y; /* source y */
int32_t width; /* rect width */
int32_t height; /* rect height */
};
#define XENFB_OUT_EVENT_SIZE 40
union xenfb_out_event {
uint8_t type;
struct xenfb_update update;
char pad[XENFB_OUT_EVENT_SIZE];
};
/* In events (backend -> frontend) */
/*
* Frontends should ignore unknown in events.
* No in events currently defined.
*/
#define XENFB_IN_EVENT_SIZE 40
union xenfb_in_event {
uint8_t type;
char pad[XENFB_IN_EVENT_SIZE];
};
/* shared page */
#define XENFB_IN_RING_SIZE 1024
#define XENFB_IN_RING_LEN (XENFB_IN_RING_SIZE / XENFB_IN_EVENT_SIZE)
#define XENFB_IN_RING_OFFS 1024
#define XENFB_IN_RING(page) \
((union xenfb_in_event *)((char *)(page) + XENFB_IN_RING_OFFS))
#define XENFB_IN_RING_REF(page, idx) \
(XENFB_IN_RING((page))[(idx) % XENFB_IN_RING_LEN])
#define XENFB_OUT_RING_SIZE 2048
#define XENFB_OUT_RING_LEN (XENFB_OUT_RING_SIZE / XENFB_OUT_EVENT_SIZE)
#define XENFB_OUT_RING_OFFS (XENFB_IN_RING_OFFS + XENFB_IN_RING_SIZE)
#define XENFB_OUT_RING(page) \
((union xenfb_out_event *)((char *)(page) + XENFB_OUT_RING_OFFS))
#define XENFB_OUT_RING_REF(page, idx) \
(XENFB_OUT_RING((page))[(idx) % XENFB_OUT_RING_LEN])
struct xenfb_page {
uint32_t in_cons, in_prod;
uint32_t out_cons, out_prod;
int32_t width; /* width of the framebuffer (in pixels) */
int32_t height; /* height of the framebuffer (in pixels) */
uint32_t line_length; /* length of a row of pixels (in bytes) */
uint32_t mem_length; /* length of the framebuffer (in bytes) */
uint8_t depth; /* depth of a pixel (in bits) */
/*
* Framebuffer page directory
*
* Each directory page holds PAGE_SIZE / sizeof(*pd)
* framebuffer pages, and can thus map up to PAGE_SIZE *
* PAGE_SIZE / sizeof(*pd) bytes. With PAGE_SIZE == 4096 and
* sizeof(unsigned long) == 4, that's 4 Megs. Two directory
* pages should be enough for a while.
*/
unsigned long pd[2];
};
/*
* Wart: xenkbd needs to know resolution. Put it here until a better
* solution is found, but don't leak it to the backend.
*/
#ifdef __KERNEL__
#define XENFB_WIDTH 800
#define XENFB_HEIGHT 600
#define XENFB_DEPTH 32
#endif
#endif

View File

@@ -0,0 +1,114 @@
/*
* kbdif.h -- Xen virtual keyboard/mouse
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com>
* Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
*/
#ifndef __XEN_PUBLIC_IO_KBDIF_H__
#define __XEN_PUBLIC_IO_KBDIF_H__
/* In events (backend -> frontend) */
/*
* Frontends should ignore unknown in events.
*/
/* Pointer movement event */
#define XENKBD_TYPE_MOTION 1
/* Event type 2 currently not used */
/* Key event (includes pointer buttons) */
#define XENKBD_TYPE_KEY 3
/*
* Pointer position event
* Capable backend sets feature-abs-pointer in xenstore.
* Frontend requests ot instead of XENKBD_TYPE_MOTION by setting
* request-abs-update in xenstore.
*/
#define XENKBD_TYPE_POS 4
struct xenkbd_motion {
uint8_t type; /* XENKBD_TYPE_MOTION */
int32_t rel_x; /* relative X motion */
int32_t rel_y; /* relative Y motion */
};
struct xenkbd_key {
uint8_t type; /* XENKBD_TYPE_KEY */
uint8_t pressed; /* 1 if pressed; 0 otherwise */
uint32_t keycode; /* KEY_* from linux/input.h */
};
struct xenkbd_position {
uint8_t type; /* XENKBD_TYPE_POS */
int32_t abs_x; /* absolute X position (in FB pixels) */
int32_t abs_y; /* absolute Y position (in FB pixels) */
};
#define XENKBD_IN_EVENT_SIZE 40
union xenkbd_in_event {
uint8_t type;
struct xenkbd_motion motion;
struct xenkbd_key key;
struct xenkbd_position pos;
char pad[XENKBD_IN_EVENT_SIZE];
};
/* Out events (frontend -> backend) */
/*
* Out events may be sent only when requested by backend, and receipt
* of an unknown out event is an error.
* No out events currently defined.
*/
#define XENKBD_OUT_EVENT_SIZE 40
union xenkbd_out_event {
uint8_t type;
char pad[XENKBD_OUT_EVENT_SIZE];
};
/* shared page */
#define XENKBD_IN_RING_SIZE 2048
#define XENKBD_IN_RING_LEN (XENKBD_IN_RING_SIZE / XENKBD_IN_EVENT_SIZE)
#define XENKBD_IN_RING_OFFS 1024
#define XENKBD_IN_RING(page) \
((union xenkbd_in_event *)((char *)(page) + XENKBD_IN_RING_OFFS))
#define XENKBD_IN_RING_REF(page, idx) \
(XENKBD_IN_RING((page))[(idx) % XENKBD_IN_RING_LEN])
#define XENKBD_OUT_RING_SIZE 1024
#define XENKBD_OUT_RING_LEN (XENKBD_OUT_RING_SIZE / XENKBD_OUT_EVENT_SIZE)
#define XENKBD_OUT_RING_OFFS (XENKBD_IN_RING_OFFS + XENKBD_IN_RING_SIZE)
#define XENKBD_OUT_RING(page) \
((union xenkbd_out_event *)((char *)(page) + XENKBD_OUT_RING_OFFS))
#define XENKBD_OUT_RING_REF(page, idx) \
(XENKBD_OUT_RING((page))[(idx) % XENKBD_OUT_RING_LEN])
struct xenkbd_page {
uint32_t in_cons, in_prod;
uint32_t out_cons, out_prod;
};
#endif

View File

@@ -0,0 +1,21 @@
#ifndef __XEN_PROTOCOLS_H__
#define __XEN_PROTOCOLS_H__
#define XEN_IO_PROTO_ABI_X86_32 "x86_32-abi"
#define XEN_IO_PROTO_ABI_X86_64 "x86_64-abi"
#define XEN_IO_PROTO_ABI_IA64 "ia64-abi"
#define XEN_IO_PROTO_ABI_POWERPC64 "powerpc64-abi"
#if defined(__i386__)
# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32
#elif defined(__x86_64__)
# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64
#elif defined(__ia64__)
# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_IA64
#elif defined(__powerpc64__)
# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_POWERPC64
#else
# error arch fixup needed here
#endif
#endif

View File

@@ -29,7 +29,7 @@ struct xen_memory_reservation {
* OUT: GMFN bases of extents that were allocated
* (NB. This command also updates the mach_to_phys translation table)
*/
GUEST_HANDLE(ulong) extent_start;
ulong extent_start;
/* Number of extents, and size/alignment of each (2^extent_order pages). */
unsigned long nr_extents;
@@ -50,7 +50,6 @@ struct xen_memory_reservation {
domid_t domid;
};
DEFINE_GUEST_HANDLE_STRUCT(xen_memory_reservation);
/*
* Returns the maximum machine frame number of mapped RAM in this system.
@@ -86,7 +85,7 @@ struct xen_machphys_mfn_list {
* any large discontiguities in the machine address space, 2MB gaps in
* the machphys table will be represented by an MFN base of zero.
*/
GUEST_HANDLE(ulong) extent_start;
ulong extent_start;
/*
* Number of extents written to the above array. This will be smaller
@@ -94,7 +93,6 @@ struct xen_machphys_mfn_list {
*/
unsigned int nr_extents;
};
DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mfn_list);
/*
* Sets the GPFN at which a particular page appears in the specified guest's
@@ -117,7 +115,6 @@ struct xen_add_to_physmap {
/* GPFN where the source mapping page should appear. */
unsigned long gpfn;
};
DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap);
/*
* Translates a list of domain-specific GPFNs into MFNs. Returns a -ve error
@@ -132,14 +129,13 @@ struct xen_translate_gpfn_list {
unsigned long nr_gpfns;
/* List of GPFNs to translate. */
GUEST_HANDLE(ulong) gpfn_list;
ulong gpfn_list;
/*
* Output list to contain MFN translations. May be the same as the input
* list (in which case each input GPFN is overwritten with the output MFN).
*/
GUEST_HANDLE(ulong) mfn_list;
ulong mfn_list;
};
DEFINE_GUEST_HANDLE_STRUCT(xen_translate_gpfn_list);
#endif /* __XEN_PUBLIC_MEMORY_H__ */

View File

@@ -85,6 +85,7 @@ struct vcpu_runstate_info {
*/
uint64_t time[4];
};
DEFINE_GUEST_HANDLE_STRUCT(vcpu_runstate_info);
/* VCPU is currently running on a physical CPU. */
#define RUNSTATE_running 0
@@ -119,6 +120,7 @@ struct vcpu_runstate_info {
#define VCPUOP_register_runstate_memory_area 5
struct vcpu_register_runstate_memory_area {
union {
GUEST_HANDLE(vcpu_runstate_info) h;
struct vcpu_runstate_info *v;
uint64_t p;
} addr;
@@ -134,6 +136,7 @@ struct vcpu_register_runstate_memory_area {
struct vcpu_set_periodic_timer {
uint64_t period_ns;
};
DEFINE_GUEST_HANDLE_STRUCT(vcpu_set_periodic_timer);
/*
* Set or stop a VCPU's single-shot timer. Every VCPU has one single-shot
@@ -145,6 +148,7 @@ struct vcpu_set_singleshot_timer {
uint64_t timeout_abs_ns;
uint32_t flags; /* VCPU_SSHOTTMR_??? */
};
DEFINE_GUEST_HANDLE_STRUCT(vcpu_set_singleshot_timer);
/* Flags to VCPUOP_set_singleshot_timer. */
/* Require the timeout to be in the future (return -ETIME if it's passed). */
@@ -164,5 +168,6 @@ struct vcpu_register_vcpu_info {
uint32_t offset; /* offset within page */
uint32_t rsvd; /* unused */
};
DEFINE_GUEST_HANDLE_STRUCT(vcpu_register_vcpu_info);
#endif /* __XEN_PUBLIC_VCPU_H__ */

View File

@@ -58,6 +58,16 @@
#define __HYPERVISOR_physdev_op 33
#define __HYPERVISOR_hvm_op 34
/* Architecture-specific hypercall definitions. */
#define __HYPERVISOR_arch_0 48
#define __HYPERVISOR_arch_1 49
#define __HYPERVISOR_arch_2 50
#define __HYPERVISOR_arch_3 51
#define __HYPERVISOR_arch_4 52
#define __HYPERVISOR_arch_5 53
#define __HYPERVISOR_arch_6 54
#define __HYPERVISOR_arch_7 55
/*
* VIRTUAL INTERRUPTS
*
@@ -68,8 +78,18 @@
#define VIRQ_CONSOLE 2 /* (DOM0) Bytes received on emergency console. */
#define VIRQ_DOM_EXC 3 /* (DOM0) Exceptional event for some domain. */
#define VIRQ_DEBUGGER 6 /* (DOM0) A domain has paused for debugging. */
#define NR_VIRQS 8
/* Architecture-specific VIRQ definitions. */
#define VIRQ_ARCH_0 16
#define VIRQ_ARCH_1 17
#define VIRQ_ARCH_2 18
#define VIRQ_ARCH_3 19
#define VIRQ_ARCH_4 20
#define VIRQ_ARCH_5 21
#define VIRQ_ARCH_6 22
#define VIRQ_ARCH_7 23
#define NR_VIRQS 24
/*
* MMU-UPDATE REQUESTS
*

View File

@@ -0,0 +1,41 @@
/*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (C) IBM Corp. 2006
*/
#ifndef _XEN_XENCOMM_H_
#define _XEN_XENCOMM_H_
/* A xencomm descriptor is a scatter/gather list containing physical
* addresses corresponding to a virtually contiguous memory area. The
* hypervisor translates these physical addresses to machine addresses to copy
* to and from the virtually contiguous area.
*/
#define XENCOMM_MAGIC 0x58434F4D /* 'XCOM' */
#define XENCOMM_INVALID (~0UL)
struct xencomm_desc {
uint32_t magic;
uint32_t nr_addrs; /* the number of entries in address[] */
uint64_t address[0];
};
#endif /* _XEN_XENCOMM_H_ */

View File

@@ -1,180 +1 @@
#ifndef __XEN_PAGE_H
#define __XEN_PAGE_H
#include <linux/pfn.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <xen/features.h>
#ifdef CONFIG_X86_PAE
/* Xen machine address */
typedef struct xmaddr {
unsigned long long maddr;
} xmaddr_t;
/* Xen pseudo-physical address */
typedef struct xpaddr {
unsigned long long paddr;
} xpaddr_t;
#else
/* Xen machine address */
typedef struct xmaddr {
unsigned long maddr;
} xmaddr_t;
/* Xen pseudo-physical address */
typedef struct xpaddr {
unsigned long paddr;
} xpaddr_t;
#endif
#define XMADDR(x) ((xmaddr_t) { .maddr = (x) })
#define XPADDR(x) ((xpaddr_t) { .paddr = (x) })
/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
#define INVALID_P2M_ENTRY (~0UL)
#define FOREIGN_FRAME_BIT (1UL<<31)
#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
extern unsigned long *phys_to_machine_mapping;
static inline unsigned long pfn_to_mfn(unsigned long pfn)
{
if (xen_feature(XENFEAT_auto_translated_physmap))
return pfn;
return phys_to_machine_mapping[(unsigned int)(pfn)] &
~FOREIGN_FRAME_BIT;
}
static inline int phys_to_machine_mapping_valid(unsigned long pfn)
{
if (xen_feature(XENFEAT_auto_translated_physmap))
return 1;
return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY);
}
static inline unsigned long mfn_to_pfn(unsigned long mfn)
{
unsigned long pfn;
if (xen_feature(XENFEAT_auto_translated_physmap))
return mfn;
#if 0
if (unlikely((mfn >> machine_to_phys_order) != 0))
return max_mapnr;
#endif
pfn = 0;
/*
* The array access can fail (e.g., device space beyond end of RAM).
* In such cases it doesn't matter what we return (we return garbage),
* but we must handle the fault without crashing!
*/
__get_user(pfn, &machine_to_phys_mapping[mfn]);
return pfn;
}
static inline xmaddr_t phys_to_machine(xpaddr_t phys)
{
unsigned offset = phys.paddr & ~PAGE_MASK;
return XMADDR(PFN_PHYS((u64)pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset);
}
static inline xpaddr_t machine_to_phys(xmaddr_t machine)
{
unsigned offset = machine.maddr & ~PAGE_MASK;
return XPADDR(PFN_PHYS((u64)mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset);
}
/*
* We detect special mappings in one of two ways:
* 1. If the MFN is an I/O page then Xen will set the m2p entry
* to be outside our maximum possible pseudophys range.
* 2. If the MFN belongs to a different domain then we will certainly
* not have MFN in our p2m table. Conversely, if the page is ours,
* then we'll have p2m(m2p(MFN))==MFN.
* If we detect a special mapping then it doesn't have a 'struct page'.
* We force !pfn_valid() by returning an out-of-range pointer.
*
* NB. These checks require that, for any MFN that is not in our reservation,
* there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
* we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
* Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
*
* NB2. When deliberately mapping foreign pages into the p2m table, you *must*
* use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
* require. In all the cases we care about, the FOREIGN_FRAME bit is
* masked (e.g., pfn_to_mfn()) so behaviour there is correct.
*/
static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
{
extern unsigned long max_mapnr;
unsigned long pfn = mfn_to_pfn(mfn);
if ((pfn < max_mapnr)
&& !xen_feature(XENFEAT_auto_translated_physmap)
&& (phys_to_machine_mapping[pfn] != mfn))
return max_mapnr; /* force !pfn_valid() */
return pfn;
}
static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
if (xen_feature(XENFEAT_auto_translated_physmap)) {
BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
return;
}
phys_to_machine_mapping[pfn] = mfn;
}
/* VIRT <-> MACHINE conversion */
#define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v))))
#define virt_to_mfn(v) (pfn_to_mfn(PFN_DOWN(__pa(v))))
#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
#ifdef CONFIG_X86_PAE
#define pte_mfn(_pte) (((_pte).pte_low >> PAGE_SHIFT) | \
(((_pte).pte_high & 0xfff) << (32-PAGE_SHIFT)))
static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot)
{
pte_t pte;
pte.pte_high = (page_nr >> (32 - PAGE_SHIFT)) |
(pgprot_val(pgprot) >> 32);
pte.pte_high &= (__supported_pte_mask >> 32);
pte.pte_low = ((page_nr << PAGE_SHIFT) | pgprot_val(pgprot));
pte.pte_low &= __supported_pte_mask;
return pte;
}
static inline unsigned long long pte_val_ma(pte_t x)
{
return x.pte;
}
#define pmd_val_ma(v) ((v).pmd)
#define pud_val_ma(v) ((v).pgd.pgd)
#define __pte_ma(x) ((pte_t) { .pte = (x) })
#define __pmd_ma(x) ((pmd_t) { (x) } )
#else /* !X86_PAE */
#define pte_mfn(_pte) ((_pte).pte_low >> PAGE_SHIFT)
#define mfn_pte(pfn, prot) __pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define pte_val_ma(x) ((x).pte)
#define pmd_val_ma(v) ((v).pud.pgd.pgd)
#define __pte_ma(x) ((pte_t) { (x) } )
#endif /* CONFIG_X86_PAE */
#define pgd_val_ma(x) ((x).pgd)
xmaddr_t arbitrary_virt_to_machine(unsigned long address);
void make_lowmem_page_readonly(void *vaddr);
void make_lowmem_page_readwrite(void *vaddr);
#endif /* __XEN_PAGE_H */
#include <asm/xen/page.h>

8
include/xen/xen-ops.h Normal file
View File

@@ -0,0 +1,8 @@
#ifndef INCLUDE_XEN_OPS_H
#define INCLUDE_XEN_OPS_H
#include <linux/percpu.h>
DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
#endif /* INCLUDE_XEN_OPS_H */

View File

@@ -97,6 +97,7 @@ struct xenbus_driver {
int (*uevent)(struct xenbus_device *, char **, int, char *, int);
struct device_driver driver;
int (*read_otherend_details)(struct xenbus_device *dev);
int (*is_ready)(struct xenbus_device *dev);
};
static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv)

77
include/xen/xencomm.h Normal file
View File

@@ -0,0 +1,77 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Copyright (C) IBM Corp. 2006
*
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
* Jerone Young <jyoung5@us.ibm.com>
*/
#ifndef _LINUX_XENCOMM_H_
#define _LINUX_XENCOMM_H_
#include <xen/interface/xencomm.h>
#define XENCOMM_MINI_ADDRS 3
struct xencomm_mini {
struct xencomm_desc _desc;
uint64_t address[XENCOMM_MINI_ADDRS];
};
/* To avoid additionnal virt to phys conversion, an opaque structure is
presented. */
struct xencomm_handle;
extern void xencomm_free(struct xencomm_handle *desc);
extern struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes);
extern struct xencomm_handle *__xencomm_map_no_alloc(void *ptr,
unsigned long bytes, struct xencomm_mini *xc_area);
#if 0
#define XENCOMM_MINI_ALIGNED(xc_desc, n) \
struct xencomm_mini xc_desc ## _base[(n)] \
__attribute__((__aligned__(sizeof(struct xencomm_mini)))); \
struct xencomm_mini *xc_desc = &xc_desc ## _base[0];
#else
/*
* gcc bug workaround:
* http://gcc.gnu.org/bugzilla/show_bug.cgi?id=16660
* gcc doesn't handle properly stack variable with
* __attribute__((__align__(sizeof(struct xencomm_mini))))
*/
#define XENCOMM_MINI_ALIGNED(xc_desc, n) \
unsigned char xc_desc ## _base[((n) + 1 ) * \
sizeof(struct xencomm_mini)]; \
struct xencomm_mini *xc_desc = (struct xencomm_mini *) \
((unsigned long)xc_desc ## _base + \
(sizeof(struct xencomm_mini) - \
((unsigned long)xc_desc ## _base) % \
sizeof(struct xencomm_mini)));
#endif
#define xencomm_map_no_alloc(ptr, bytes) \
({ XENCOMM_MINI_ALIGNED(xc_desc, 1); \
__xencomm_map_no_alloc(ptr, bytes, xc_desc); })
/* provided by architecture code: */
extern unsigned long xencomm_vtop(unsigned long vaddr);
static inline void *xencomm_pa(void *ptr)
{
return (void *)xencomm_vtop((unsigned long)ptr);
}
#define xen_guest_handle(hnd) ((hnd).p)
#endif /* _LINUX_XENCOMM_H_ */