Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 mm updates from Ingo Molnar:
"Lots of changes in this cycle:
- Lots of CPA (change page attribute) optimizations and related
cleanups (Thomas Gleixner, Peter Zijstra)
- Make lazy TLB mode even lazier (Rik van Riel)
- Fault handler cleanups and improvements (Dave Hansen)
- kdump, vmcore: Enable kdumping encrypted memory with AMD SME
enabled (Lianbo Jiang)
- Clean up VM layout documentation (Baoquan He, Ingo Molnar)
- ... plus misc other fixes and enhancements"
* 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (51 commits)
x86/stackprotector: Remove the call to boot_init_stack_canary() from cpu_startup_entry()
x86/mm: Kill stray kernel fault handling comment
x86/mm: Do not warn about PCI BIOS W+X mappings
resource: Clean it up a bit
resource: Fix find_next_iomem_res() iteration issue
resource: Include resource end in walk_*() interfaces
x86/kexec: Correct KEXEC_BACKUP_SRC_END off-by-one error
x86/mm: Remove spurious fault pkey check
x86/mm/vsyscall: Consider vsyscall page part of user address space
x86/mm: Add vsyscall address helper
x86/mm: Fix exception table comments
x86/mm: Add clarifying comments for user addr space
x86/mm: Break out user address space handling
x86/mm: Break out kernel address space handling
x86/mm: Clarify hardware vs. software "error_code"
x86/mm/tlb: Make lazy TLB mode lazier
x86/mm/tlb: Add freed_tables element to flush_tlb_info
x86/mm/tlb: Add freed_tables argument to flush_tlb_mm_range
smp,cpumask: introduce on_each_cpu_cond_mask
smp: use __cpumask_set_cpu in on_each_cpu_cond
...
This commit is contained in:
@@ -24,6 +24,8 @@
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/mem_encrypt.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/io.h>
|
||||
#include "internal.h"
|
||||
|
||||
@@ -98,7 +100,8 @@ static int pfn_is_ram(unsigned long pfn)
|
||||
|
||||
/* Reads a page from the oldmem device from given offset. */
|
||||
static ssize_t read_from_oldmem(char *buf, size_t count,
|
||||
u64 *ppos, int userbuf)
|
||||
u64 *ppos, int userbuf,
|
||||
bool encrypted)
|
||||
{
|
||||
unsigned long pfn, offset;
|
||||
size_t nr_bytes;
|
||||
@@ -120,8 +123,15 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
|
||||
if (pfn_is_ram(pfn) == 0)
|
||||
memset(buf, 0, nr_bytes);
|
||||
else {
|
||||
tmp = copy_oldmem_page(pfn, buf, nr_bytes,
|
||||
offset, userbuf);
|
||||
if (encrypted)
|
||||
tmp = copy_oldmem_page_encrypted(pfn, buf,
|
||||
nr_bytes,
|
||||
offset,
|
||||
userbuf);
|
||||
else
|
||||
tmp = copy_oldmem_page(pfn, buf, nr_bytes,
|
||||
offset, userbuf);
|
||||
|
||||
if (tmp < 0)
|
||||
return tmp;
|
||||
}
|
||||
@@ -155,7 +165,7 @@ void __weak elfcorehdr_free(unsigned long long addr)
|
||||
*/
|
||||
ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
|
||||
{
|
||||
return read_from_oldmem(buf, count, ppos, 0);
|
||||
return read_from_oldmem(buf, count, ppos, 0, false);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -163,7 +173,7 @@ ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
|
||||
*/
|
||||
ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
|
||||
{
|
||||
return read_from_oldmem(buf, count, ppos, 0);
|
||||
return read_from_oldmem(buf, count, ppos, 0, sme_active());
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -173,9 +183,20 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
|
||||
unsigned long from, unsigned long pfn,
|
||||
unsigned long size, pgprot_t prot)
|
||||
{
|
||||
prot = pgprot_encrypted(prot);
|
||||
return remap_pfn_range(vma, from, pfn, size, prot);
|
||||
}
|
||||
|
||||
/*
|
||||
* Architectures which support memory encryption override this.
|
||||
*/
|
||||
ssize_t __weak
|
||||
copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
|
||||
unsigned long offset, int userbuf)
|
||||
{
|
||||
return copy_oldmem_page(pfn, buf, csize, offset, userbuf);
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy to either kernel or user space
|
||||
*/
|
||||
@@ -351,7 +372,8 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
|
||||
m->offset + m->size - *fpos,
|
||||
buflen);
|
||||
start = m->paddr + *fpos - m->offset;
|
||||
tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
|
||||
tmp = read_from_oldmem(buffer, tsz, &start,
|
||||
userbuf, sme_active());
|
||||
if (tmp < 0)
|
||||
return tmp;
|
||||
buflen -= tsz;
|
||||
|
||||
Reference in New Issue
Block a user