• [PATCH v2 2/9] xen/x86: Remove PVH support

    From Boris Ostrovsky@110:300/11 to All on Thu Jan 26 21:40:01 2017
    We are replacing existing PVH guests with new implementation.

    We are keeping xen_pvh_domain() macro (for now set to zero) because
    when we introduce new PVH implementation later in this series we will
    reuse current PVH-specific code (xen_pvh_gnttab_setup()), and that
    code is conditioned by 'if (xen_pvh_domain())'. (We will also need
    a noop xen_pvh_domain() for !CONFIG_XEN_PVH).

    Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
    Reviewed-by: Juergen Gross <jgross@suse.com>
    Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
    - ---
    Changes in v2:
    * Added comment to commit message clarifying why xen_pvh_domain()
    is kept.


    arch/x86/xen/enlighten.c | 140 ++++++---------------------------------
    arch/x86/xen/mmu.c | 21 +-----
    arch/x86/xen/setup.c | 37 +----------
    arch/x86/xen/smp.c | 78 ++++++++--------------
    arch/x86/xen/smp.h | 8 ---
    arch/x86/xen/xen-head.S | 62 ++---------------
    arch/x86/xen/xen-ops.h | 1 -
    drivers/xen/events/events_base.c | 1 -
    include/xen/xen.h | 13 +---
    9 files changed, 54 insertions(+), 307 deletions(-)

    diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
    index 51ef952..828f1b2 100644
    - --- a/arch/x86/xen/enlighten.c
    +++ b/arch/x86/xen/enlighten.c
    @@ -1138,10 +1138,11 @@ void xen_setup_vcpu_info_placement(void)
    xen_vcpu_setup(cpu);
    }

    - /* xen_vcpu_setup managed to place the vcpu_info within the
    - * percpu area for all cpus, so make use of it. Note that for
    - * PVH we want to use native IRQ mechanism. */
    - if (have_vcpu_info_placement && !xen_pvh_domain()) {
    + /*
    + * xen_vcpu_setup managed to place the vcpu_info within the
    + * percpu area for all cpus, so make use of it.
    + */
    + if (have_vcpu_info_placement) {
    pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
    pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct);
    pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
    @@ -1413,49 +1414,9 @@ static void __init xen_boot_params_init_edd(void)
    * Set up the GDT and segment registers for -fstack-protector. Until
    * we do this, we have to be careful not to call any stack-protected
    * function, which is most of the kernel.
    - *
    - * Note, that it is __ref because the only caller of this after init
    - * is PVH which is not going to use xen_load_gdt_boot or other
    - * __init functions.
    */
    -static void __ref xen_setup_gdt(int cpu)
    +static void xen_setup_gdt(int cpu)
    {
    - if (xen_feature(XENFEAT_auto_translated_physmap)) {
    -#ifdef CONFIG_X86_64
    - unsigned long dummy;
    -
    - load_percpu_segment(cpu); /* We need to access per-cpu area */ - switch_to_new_gdt(cpu); /* GDT and GS set */
    -
    - /* We are switching of the Xen provided GDT to our HVM mode
    - * GDT. The new GDT has __KERNEL_CS with CS.L = 1
    - * and we are jumping to reload it.
    - */
    - asm volatile ("pushq %0\n"
    - "leaq 1f(%%rip),%0\n"
    - "pushq %0\n"
    - "lretq\n"
    - "1:\n"
    - : "=&r" (dummy) : "0" (__KERNEL_CS));
    -
    - /*
    - * While not needed, we also set the %es, %ds, and %fs
    - * to zero. We don't care about %ss as it is NULL.
    - * Strictly speaking this is not needed as Xen zeros those
    - * out (and also MSR_FS_BASE, MSR_GS_BASE, MSR_KERNEL_GS_BASE) - *
    - * Linux zeros them in cpu_init() and in secondary_startup_64
    - * (for BSP).
    - */
    - loadsegment(es, 0);
    - loadsegment(ds, 0);
    - loadsegment(fs, 0);
    -#else
    - /* PVH: TODO Implement. */
    - BUG();
    -#endif
    - return; /* PVH does not need any PV GDT ops. */
    - }
    pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
    pv_cpu_ops.load_gdt = xen_load_gdt_boot;

    @@ -1466,59 +1427,6 @@ static void __ref xen_setup_gdt(int cpu)
    pv_cpu_ops.load_gdt = xen_load_gdt;
    }

    -#ifdef CONFIG_XEN_PVH
    -/*
    - * A PV guest starts with default flags that are not set for PVH, set them
    - * here asap.
    - */
    -static void xen_pvh_set_cr_flags(int cpu)
    -{
    -
    - /* Some of these are setup in 'secondary_startup_64'. The others:
    - * X86_CR0_TS, X86_CR0_PE, X86_CR0_ET are set by Xen for HVM guests
    - * (which PVH shared codepaths), while X86_CR0_PG is for PVH. */
    - write_cr0(read_cr0() | X86_CR0_MP | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM);
    -
    - if (!cpu)
    - return;
    - /*
    - * For BSP, PSE PGE are set in probe_page_size_mask(), for APs
    - * set them here. For all, OSFXSR OSXMMEXCPT are set in fpu__init_cpu().
    - */
    - if (boot_cpu_has(X86_FEATURE_PSE))
    - cr4_set_bits_and_update_boot(X86_CR4_PSE);
    -
    - if (boot_cpu_has(X86_FEATURE_PGE))
    - cr4_set_bits_and_update_boot(X86_CR4_PGE);
    -}
    -
    -/*
    - * Note, that it is ref - because the only caller of this after init
    - * is PVH which is not going to use xen_load_gdt_boot or other
    - * __init functions.
    - */
    -void __ref xen_pvh_secondary_vcpu_init(int cpu)
    -{
    - xen_setup_gdt(cpu);
    - xen_pvh_set_cr_flags(cpu);
    -}
    -
    -static void __init xen_pvh_early_guest_init(void)
    -{
    - if (!xen_feature(XENFEAT_auto_translated_physmap))
    - return;
    -
    - BUG_ON(!xen_feature(XENFEAT_hvm_callback_vector));
    -
    - xen_pvh_early_cpu_init(0, false);
    - xen_pvh_set_cr_flags(0);
    -
    -#ifdef CONFIG_X86_32
    - BUG(); /* PVH: Implement proper support. */
    -#endif
    -}
    -#endif /* CONFIG_XEN_PVH */
    -
    static void __init xen_dom0_set_legacy_features(void)
    {
    x86_platform.legacy.rtc = 1;
    @@ -1555,24 +1463,17 @@ asmlinkage __visible void __init xen_start_kernel(void)
    xen_domain_type = XEN_PV_DOMAIN;

    xen_setup_features();
    -#ifdef CONFIG_XEN_PVH
    - xen_pvh_early_guest_init();
    -#endif
    +
    xen_setup_machphys_mapping();

    /* Install Xen paravirt ops */
    pv_info = xen_info;
    pv_init_ops = xen_init_ops;
    - if (!xen_pvh_domain()) {
    - pv_cpu_ops = xen_cpu_ops;
    + pv_cpu_ops = xen_cpu_ops;

    - x86_platform.get_nmi_reason = xen_get_nmi_reason;
    - }
    + x86_platform.get_nmi_reason = xen_get_nmi_reason;

    - if (xen_feature(XENFEAT_auto_translated_physmap))
    - x86_init.resources.memory_setup = xen_auto_xlated_memory_setup; - else
    - x86_init.resources.memory_setup = xen_memory_setup;
    + x86_init.resources.memory_setup = xen_memory_setup;
    x86_init.oem.arch_setup = xen_arch_setup;
    x86_init.oem.banner = xen_banner;

    @@ -1665,18 +1566,15 @@ asmlinkage __visible void __init xen_start_kernel(void)
    /* set the limit of our address space */
    xen_reserve_top();

    - /* PVH: runs at default kernel iopl of 0 */
    - if (!xen_pvh_domain()) {
    - /*
    - * We used to do this in xen_arch_setup, but that is too late
    - * on AMD were early_cpu_init (run before ->arch_setup()) calls - * early_amd_init which pokes 0xcf8 port.
    - */
    - set_iopl.iopl = 1;
    - rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
    - if (rc != 0)
    - xen_raw_printk("physdev_op failed %d\n", rc);
    - }
    + /*
    + * We used to do this in xen_arch_setup, but that is too late
    + * on AMD were early_cpu_init (run before ->arch_setup()) calls
    + * early_amd_init which pokes 0xcf8 port.
    + */
    + set_iopl.iopl = 1;
    + rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
    + if (rc != 0)
    + xen_raw_printk("physdev_op failed %d\n", rc);

    #ifdef CONFIG_X86_32
    /* set up basic CPUID stuff */
    diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
    index 7d5afdb..f6740b5 100644
    - --- a/arch/x86/xen/mmu.c
    +++ b/arch/x86/xen/mmu.c
    @@ -1792,10 +1792,6 @@ static void __init set_page_prot_flags(void *addr, pgprot_t prot,
    unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
    pte_t pte = pfn_pte(pfn, prot);

    - /* For PVH no need to set R/O or R/W to pin them or unpin them. */
    - if (xen_feature(XENFEAT_auto_translated_physmap))
    - return;
    -
    if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
    BUG();
    }
    @@ -1902,8 +1898,7 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
    * level2_ident_pgt, and level2_kernel_pgt. This means that only the
    * kernel has a physical mapping to start with - but that's enough to
    * get __va working. We need to fill in the rest of the physical
    - * mapping once some sort of allocator has been set up. NOTE: for
    - * PVH, the page tables are native.
    + * mapping once some sort of allocator has been set up.
    */
    void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
    {
    @@ -2812,16 +2807,6 @@ static int do_remap_gfn(struct vm_area_struct *vma,

    BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));

    - if (xen_feature(XENFEAT_auto_translated_physmap)) {
    -#ifdef CONFIG_XEN_PVH
    - /* We need to update the local page tables and the xen HAP */
    - return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
    - prot, domid, pages);
    -#else
    - return -EINVAL;
    -#endif
    - }
    -
    rmd.mfn = gfn;
    rmd.prot = prot;
    /* We use the err_ptr to indicate if there we are doing a contiguous
    @@ -2915,10 +2900,6 @@ int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
    if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
    return 0;

    -#ifdef CONFIG_XEN_PVH
    - return xen_xlate_unmap_gfn_range(vma, numpgs, pages);
    -#else
    return -EINVAL;
    -#endif
    }
    EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);
    diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
    index f3f7b41..a8c306c 100644
    - --- a/arch/x86/xen/setup.c
    +++ b/arch/x86/xen/setup.c
    @@ -915,39 +915,6 @@ char * __init xen_memory_setup(void)
    }

    /*
    - * Machine specific memory setup for auto-translated guests.
    - */
    -char * __init xen_auto_xlated_memory_setup(void)
    -{
    - struct xen_memory_map memmap;
    - int i;
    - int rc;
    -
    - memmap.nr_entries = ARRAY_SIZE(xen_e820_map);
    - set_xen_guest_handle(memmap.buffer, xen_e820_map);
    -
    - rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
    - if (rc < 0)
    - panic("No memory map (%d)\n", rc);
    -
    - xen_e820_map_entries = memmap.nr_entries;
    -
    - sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map),
    - &xen_e820_map_entries);
    -
    - for (i = 0; i < xen_e820_map_entries; i++)
    - e820_add_region(xen_e820_map[i].addr, xen_e820_map[i].size,
    - xen_e820_map[i].type);
    -
    - /* Remove p2m info, it is not needed. */
    - xen_start_info->mfn_list = 0;
    - xen_start_info->first_p2m_pfn = 0;
    - xen_start_info->nr_p2m_frames = 0;
    -
    - return "Xen";
    -}
    -
    -/*
    * Set the bit indicating "nosegneg" library variants should be used.
    * We only need to bother in pure 32-bit mode; compat 32-bit processes
    * can have un-truncated segments, so wrapping around is allowed.
    @@ -1032,8 +999,8 @@ void __init xen_pvmmu_arch_setup(void)
    void __init xen_arch_setup(void)
    {
    xen_panic_handler_init();
    - if (!xen_feature(XENFEAT_auto_translated_physmap))
    - xen_pvmmu_arch_setup();
    +
    + xen_pvmmu_arch_setup();

    #ifdef CONFIG_ACPI
    if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
    diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
    index 311acad..0dee6f5 100644
    - --- a/arch/x86/xen/smp.c
    +++ b/arch/x86/xen/smp.c
    @@ -99,18 +99,8 @@ static void cpu_bringup(void)
    local_irq_enable();
    }

    -/*
    - * Note: cpu parameter is only relevant for PVH. The reason for passing it
    - * is we can't do smp_processor_id until the percpu segments are loaded, for
    - * which we need the cpu number! So we pass it in rdi as first parameter.
    - */
    -asmlinkage __visible void cpu_bringup_and_idle(int cpu)
    +asmlinkage __visible void cpu_bringup_and_idle(void)
    {
    -#ifdef CONFIG_XEN_PVH
    - if (xen_feature(XENFEAT_auto_translated_physmap) &&
    - xen_feature(XENFEAT_supervisor_mode_kernel))
    - xen_pvh_secondary_vcpu_init(cpu);
    -#endif
    cpu_bringup();
    cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
    }
    @@ -404,61 +394,47 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
    gdt = get_cpu_gdt_table(cpu);

    #ifdef CONFIG_X86_32
    - /* Note: PVH is not yet supported on x86_32. */
    ctxt->user_regs.fs = __KERNEL_PERCPU;
    ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
    #endif
    memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));

    - if (!xen_feature(XENFEAT_auto_translated_physmap)) {
    - ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
    - ctxt->flags = VGCF_IN_KERNEL;
    - ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
    - ctxt->user_regs.ds = __USER_DS;
    - ctxt->user_regs.es = __USER_DS;
    - ctxt->user_regs.ss = __KERNEL_DS;
    + ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
    + ctxt->flags = VGCF_IN_KERNEL;
    + ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
    + ctxt->user_regs.ds = __USER_DS;
    + ctxt->user_regs.es = __USER_DS;
    + ctxt->user_regs.ss = __KERNEL_DS;

    - xen_copy_trap_info(ctxt->trap_ctxt);
    + xen_copy_trap_info(ctxt->trap_ctxt);

    - ctxt->ldt_ents = 0;
    + ctxt->ldt_ents = 0;

    - BUG_ON((unsigned long)gdt & ~PAGE_MASK);
    + BUG_ON((unsigned long)gdt & ~PAGE_MASK);

    - gdt_mfn = arbitrary_virt_to_mfn(gdt);
    - make_lowmem_page_readonly(gdt);
    - make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
    + gdt_mfn = arbitrary_virt_to_mfn(gdt);
    + make_lowmem_page_readonly(gdt);
    + make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));

    - ctxt->gdt_frames[0] = gdt_mfn;
    - ctxt->gdt_ents = GDT_ENTRIES;
    + ctxt->gdt_frames[0] = gdt_mfn;
    + ctxt->gdt_ents = GDT_ENTRIES;

    - ctxt->kernel_ss = __KERNEL_DS;
    - ctxt->kernel_sp = idle->thread.sp0;
    + ctxt->kernel_ss = __KERNEL_DS;
    + ctxt->kernel_sp = idle->thread.sp0;

    #ifdef CONFIG_X86_32
    - ctxt->event_callback_cs = __KERNEL_CS;
    - ctxt->failsafe_callback_cs = __KERNEL_CS;
    + ctxt->event_callback_cs = __KERNEL_CS;
    + ctxt->failsafe_callback_cs = __KERNEL_CS;
    #else
    - ctxt->gs_base_kernel = per_cpu_offset(cpu);
    -#endif
    - ctxt->event_callback_eip =
    - (unsigned long)xen_hypervisor_callback; - ctxt->failsafe_callback_eip =
    - (unsigned long)xen_failsafe_callback;
    - ctxt->user_regs.cs = __KERNEL_CS;
    - per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
    - }
    -#ifdef CONFIG_XEN_PVH
    - else {
    - /*
    - * The vcpu comes on kernel page tables which have the NX pte
    - * bit set. This means before DS/SS is touched, NX in
    - * EFER must be set. Hence the following assembly glue code.
    - */
    - ctxt->user_regs.eip = (unsigned long)xen_pvh_early_cpu_init;
    - ctxt->user_regs.rdi = cpu;
    - ctxt->user_regs.rsi = true; /* entry == true */
    - }
    + ctxt->gs_base_kernel = per_cpu_offset(cpu);
    #endif
    + ctxt->event_callback_eip =
    + (unsigned long)xen_hypervisor_callback;
    + ctxt->failsafe_callback_eip =
    + (unsigned long)xen_failsafe_callback;
    + ctxt->user_regs.cs = __KERNEL_CS;
    + per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
    +
    ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
    ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir));
    if (HYPERVISOR_vcpu_op(VCPUOP_initialise, xen_vcpu_nr(cpu), ctxt))
    diff --git a/arch/x86/xen/smp.h b/arch/x86/xen/smp.h
    index c5c16dc..9beef33 100644
    - --- a/arch/x86/xen/smp.h
    +++ b/arch/x86/xen/smp.h
    @@ -21,12 +21,4 @@ static inline int xen_smp_intr_init(unsigned int cpu)
    static inline void xen_smp_intr_free(unsigned int cpu) {}
    #endif /* CONFIG_SMP */

    -#ifdef CONFIG_XEN_PVH
    -extern void xen_pvh_early_cpu_init(int cpu, bool entry);
    -#else
    -static inline void xen_pvh_early_cpu_init(int cpu, bool entry)
    -{
    -}
    -#endif
    -
    #endif
    diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
    index 7f8d8ab..37794e4 100644
    - --- a/arch/x86/xen/xen-head.S
    +++ b/arch/x86/xen/xen-head.S
    @@ -16,25 +16,6 @@
    #include <xen/interface/xen-mca.h>
    #include <asm/xen/interface.h>

    -#ifdef CONFIG_XEN_PVH
    -#define PVH_FEATURES_STR "|writable_descriptor_tables|auto_translated_physmap|supervisor_mode_kernel"
    -/* Note the lack of 'hvm_callback_vector'. Older hypervisor will
    - * balk at this being part of XEN_ELFNOTE_FEATURES, so we put it in
    - * XEN_ELFNOTE_SUPPORTED_FEATURES which older hypervisors will ignore.
    - */
    -#define PVH_FEATURES ((1 << XENFEAT_writable_page_tables) | \
    - (1 << XENFEAT_auto_translated_physmap) | \
    - (1 << XENFEAT_supervisor_mode_kernel) | \
    - (1 << XENFEAT_hvm_callback_vector))
    -/* The XENFEAT_writable_page_tables is not stricly necessary as we set that
    - * up regardless whether this CONFIG option is enabled or not, but it
    - * clarifies what the right flags need to be.
    - */
    -#else
    -#define PVH_FEATURES_STR ""
    -#define PVH_FEATURES (0)
    -#endif
    -
    __INIT
    ENTRY(startup_xen)
    cld
    @@ -54,41 +35,6 @@ ENTRY(startup_xen)

    __FINIT

    -#ifdef CONFIG_XEN_PVH
    -/*
    - * xen_pvh_early_cpu_init() - early PVH VCPU initialization
    - * @cpu: this cpu number (%rdi)
    - * @entry: true if this is a secondary vcpu coming up on this entry
    - * point, false if this is the boot CPU being initialized for
    - * the first time (%rsi)
    - *
    - * Note: This is called as a function on the boot CPU, and is the entry point - * on the secondary CPU.
    - */
    -ENTRY(xen_pvh_early_cpu_init)
    - mov %rsi, %r11
    -
    - /* Gather features to see if NX implemented. */
    - mov $0x80000001, %eax
    - cpuid
    - mov %edx, %esi
    -
    - mov $MSR_EFER, %ecx
    - rdmsr
    - bts $_EFER_SCE, %eax
    -
    - bt $20, %esi
    - jnc 1f /* No NX, skip setting it */
    - bts $_EFER_NX, %eax
    -1: wrmsr
    -#ifdef CONFIG_SMP
    - cmp $0, %r11b
    - jne cpu_bringup_and_idle
    -#endif
    - ret
    -
    -#endif /* CONFIG_XEN_PVH */
    -
    .pushsection .text
    .balign PAGE_SIZE
    ENTRY(hypercall_page)
    @@ -114,10 +60,10 @@ ENTRY(hypercall_page)
    #endif
    ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, _ASM_PTR startup_xen)
    ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, _ASM_PTR hypercall_page)
    - ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .ascii "!writable_page_tables|pae_pgdir_above_4gb"; .asciz PVH_FEATURES_STR)
    - ELFNOTE(Xen, XEN_ELFNOTE_SUPPORTED_FEATURES, .long (PVH_FEATURES) |
    - (1 << XENFEAT_writable_page_tables) |
    - (1 << XENFEAT_dom0))
    + ELFNOTE(Xen, XEN_ELFNOTE_FEATURES,
    + .ascii "!writable_page_tables|pae_pgdir_above_4gb")
    + ELFNOTE(Xen, XEN_ELFNOTE_SUPPORTED_FEATURES,
    + .long (1 << XENFEAT_writable_page_tables) | (1 << XENFEAT_dom0))
    ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz "yes")
    ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz "generic")
    ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID,
    diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
    index ac0a2b0..f6a41c4 100644
    - --- a/arch/x86/xen/xen-ops.h
    +++ b/arch/x86/xen/xen-ops.h
    @@ -146,5 +146,4 @@ static inline void __init xen_efi_init(void)

    extern int xen_panic_handler_init(void);

    -void xen_pvh_secondary_vcpu_init(int cpu);
    #endif /* XEN_OPS_H */
    diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
    index fd8e872..6a53577 100644
    - --- a/drivers/xen/events/events_base.c
    +++ b/drivers/xen/events/events_base.c
    @@ -1704,7 +1704,6 @@ void __init xen_init_IRQ(void)
    pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
    eoi_gmfn.gmfn = virt_to_gfn(pirq_eoi_map);
    rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
    - /* TODO: No PVH support for PIRQ EOI */
    if (rc != 0) {
    free_page((unsigned long) pirq_eoi_map);
    pirq_eoi_map = NULL;
    diff --git a/include/xen/xen.h b/include/xen/xen.h
    index f0f0252..d0f9684 100644
    - --- a/include/xen/xen.h
    +++ b/include/xen/xen.h
    @@ -29,17 +29,6 @@ enum xen_domain_type {
    #define xen_initial_domain() (0)
    #endif /* CONFIG_XEN_DOM0 */

    -#ifdef CONFIG_XEN_PVH
    -/* This functionality exists only for x86. The XEN_PVHVM support exists
    - * only in x86 world - hence on ARM it will be always disabled.
    - * N.B. ARM guests are neither PV nor HVM nor PVHVM.
    - * It's a bit like PVH but is different also (it's further towards the H
    - * end of the spectrum than even PVH).
    - */
    -#include <xen/features.h>
    -#define xen_pvh_domain() (xen_pv_domain() && \
    - xen_feature(XENFEAT_auto_translated_physmap))
    -#else
    #define xen_pvh_domain() (0)
    -#endif
    +
    #endif /* _XEN_XEN_H */
    --
    1.8.3.1

    --- MBSE BBS v1.0.6.13 (GNU/Linux-x86_64
    * Origin: linux.* mail to news gateway (110:300/11@linuxnet)