Skip to content

Commit

Permalink
Automatic merge of 'next-test' into merge-test (2023-10-19 23:30)
Browse files Browse the repository at this point in the history
  • Loading branch information
mpe committed Oct 19, 2023
2 parents 1635a0c + efce842 commit 2bebd2d
Show file tree
Hide file tree
Showing 66 changed files with 661 additions and 824 deletions.
83 changes: 31 additions & 52 deletions arch/powerpc/include/asm/book3s/32/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,15 +20,15 @@

#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
#define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
#define _PAGE_USER 0x004 /* usermode access allowed */
#define _PAGE_READ 0x004 /* software: read access allowed */
#define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
#define _PAGE_DIRTY 0x080 /* C: page changed */
#define _PAGE_ACCESSED 0x100 /* R: page referenced */
#define _PAGE_EXEC 0x200 /* software: exec allowed */
#define _PAGE_RW 0x400 /* software: user write access allowed */
#define _PAGE_WRITE 0x400 /* software: user write access allowed */
#define _PAGE_SPECIAL 0x800 /* software: Special page */

#ifdef CONFIG_PTE_64BIT
Expand All @@ -42,26 +42,13 @@
#define _PMD_PRESENT_MASK (PAGE_MASK)
#define _PMD_BAD (~PAGE_MASK)

/* We borrow the _PAGE_USER bit to store the exclusive marker in swap PTEs. */
#define _PAGE_SWP_EXCLUSIVE _PAGE_USER
/* We borrow the _PAGE_READ bit to store the exclusive marker in swap PTEs. */
#define _PAGE_SWP_EXCLUSIVE _PAGE_READ

/* And here we include common definitions */

#define _PAGE_KERNEL_RO 0
#define _PAGE_KERNEL_ROX (_PAGE_EXEC)
#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW)
#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)

#define _PAGE_HPTEFLAGS _PAGE_HASHPTE

#ifndef __ASSEMBLY__

static inline bool pte_user(pte_t pte)
{
return pte_val(pte) & _PAGE_USER;
}
#endif /* __ASSEMBLY__ */

/*
* Location of the PFN in the PTE. Most 32-bit platforms use the same
* as _PAGE_SHIFT here (ie, naturally aligned).
Expand Down Expand Up @@ -97,20 +84,7 @@ static inline bool pte_user(pte_t pte)
#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT)

/*
* Permission masks used to generate the __P and __S table.
*
* Note:__pgprot is defined in arch/powerpc/include/asm/page.h
*
* Write permissions imply read permissions for now.
*/
#define PAGE_NONE __pgprot(_PAGE_BASE)
#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
#include <asm/pgtable-masks.h>

/* Permission masks used for kernel mappings */
#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
Expand Down Expand Up @@ -170,7 +144,14 @@ void unmap_kernel_page(unsigned long va);
* value (for now) on others, from where we can start layout kernel
* virtual space that goes below PKMAP and FIXMAP
*/
#include <asm/fixmap.h>

#define FIXADDR_SIZE 0
#ifdef CONFIG_KASAN
#include <asm/kasan.h>
#define FIXADDR_TOP (KASAN_SHADOW_START - PAGE_SIZE)
#else
#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
#endif

/*
* ioremap_bot starts at that address. Early ioremaps move down from there,
Expand Down Expand Up @@ -224,9 +205,6 @@ void unmap_kernel_page(unsigned long va);
/* Bits to mask out from a PGD to get to the PUD page */
#define PGD_MASKED_BITS 0

#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
(unsigned long long)pte_val(e))
#define pgd_ERROR(e) \
pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
/*
Expand Down Expand Up @@ -343,7 +321,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
}

static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
Expand Down Expand Up @@ -402,8 +380,16 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
}

/* Generic accessors to PTE bits */
static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);}
static inline int pte_read(pte_t pte) { return 1; }
static inline bool pte_read(pte_t pte)
{
return !!(pte_val(pte) & _PAGE_READ);
}

static inline bool pte_write(pte_t pte)
{
return !!(pte_val(pte) & _PAGE_WRITE);
}

static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); }
static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); }
static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); }
Expand Down Expand Up @@ -438,10 +424,10 @@ static inline bool pte_ci(pte_t pte)
static inline bool pte_access_permitted(pte_t pte, bool write)
{
/*
* A read-only access is controlled by _PAGE_USER bit.
* We have _PAGE_READ set for WRITE and EXECUTE
* A read-only access is controlled by _PAGE_READ bit.
* We have _PAGE_READ set for WRITE
*/
if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
if (!pte_present(pte) || !pte_read(pte))
return false;

if (write && !pte_write(pte))
Expand All @@ -465,7 +451,7 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
/* Generic modifiers for PTE bits */
static inline pte_t pte_wrprotect(pte_t pte)
{
return __pte(pte_val(pte) & ~_PAGE_RW);
return __pte(pte_val(pte) & ~_PAGE_WRITE);
}

static inline pte_t pte_exprotect(pte_t pte)
Expand Down Expand Up @@ -495,6 +481,9 @@ static inline pte_t pte_mkpte(pte_t pte)

static inline pte_t pte_mkwrite_novma(pte_t pte)
{
/*
* write implies read, hence set both
*/
return __pte(pte_val(pte) | _PAGE_RW);
}

Expand All @@ -518,16 +507,6 @@ static inline pte_t pte_mkhuge(pte_t pte)
return pte;
}

static inline pte_t pte_mkprivileged(pte_t pte)
{
return __pte(pte_val(pte) & ~_PAGE_USER);
}

static inline pte_t pte_mkuser(pte_t pte)
{
return __pte(pte_val(pte) | _PAGE_USER);
}

static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
Expand Down
37 changes: 7 additions & 30 deletions arch/powerpc/include/asm/book3s/64/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,10 @@
#define _PAGE_EXEC 0x00001 /* execute permission */
#define _PAGE_WRITE 0x00002 /* write access allowed */
#define _PAGE_READ 0x00004 /* read access allowed */
#define _PAGE_NA _PAGE_PRIVILEGED
#define _PAGE_NAX _PAGE_EXEC
#define _PAGE_RO _PAGE_READ
#define _PAGE_ROX (_PAGE_READ | _PAGE_EXEC)
#define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
#define _PAGE_RWX (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
#define _PAGE_PRIVILEGED 0x00008 /* kernel access only */
Expand Down Expand Up @@ -136,23 +140,7 @@
#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
#define _PAGE_BASE (_PAGE_BASE_NC)

/* Permission masks used to generate the __P and __S table,
*
* Note:__pgprot is defined in arch/powerpc/include/asm/page.h
*
* Write permissions imply read permissions for now (we could make write-only
* pages on BookE but we don't bother for now). Execute permission control is
* possible on platforms that define _PAGE_EXEC
*/
#define PAGE_NONE __pgprot(_PAGE_BASE | _PAGE_PRIVILEGED)
#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW)
#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_EXEC)
#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_READ)
#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_READ)
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
/* Radix only, Hash uses PAGE_READONLY_X + execute-only pkey instead */
#define PAGE_EXECONLY __pgprot(_PAGE_BASE | _PAGE_EXEC)
#include <asm/pgtable-masks.h>

/* Permission masks used for kernel mappings */
#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
Expand Down Expand Up @@ -316,6 +304,7 @@ extern unsigned long pci_io_base;
#define IOREMAP_START (ioremap_bot)
#define IOREMAP_END (KERN_IO_END - FIXADDR_SIZE)
#define FIXADDR_SIZE SZ_32M
#define FIXADDR_TOP (IOREMAP_END + FIXADDR_SIZE)

#ifndef __ASSEMBLY__

Expand Down Expand Up @@ -629,16 +618,6 @@ static inline pte_t pte_mkdevmap(pte_t pte)
return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SPECIAL | _PAGE_DEVMAP));
}

static inline pte_t pte_mkprivileged(pte_t pte)
{
return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PRIVILEGED));
}

static inline pte_t pte_mkuser(pte_t pte)
{
return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_PRIVILEGED));
}

/*
* This is potentially called with a pmd as the argument, in which case it's not
* safe to check _PAGE_DEVMAP unless we also confirm that _PAGE_PTE is set.
Expand All @@ -647,7 +626,7 @@ static inline pte_t pte_mkuser(pte_t pte)
*/
static inline int pte_devmap(pte_t pte)
{
u64 mask = cpu_to_be64(_PAGE_DEVMAP | _PAGE_PTE);
__be64 mask = cpu_to_be64(_PAGE_DEVMAP | _PAGE_PTE);

return (pte_raw(pte) & mask) == mask;
}
Expand Down Expand Up @@ -1014,8 +993,6 @@ static inline pmd_t *pud_pgtable(pud_t pud)
return (pmd_t *)__va(pud_val(pud) & ~PUD_MASKED_BITS);
}

#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
#define pmd_ERROR(e) \
pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
#define pud_ERROR(e) \
Expand Down
33 changes: 0 additions & 33 deletions arch/powerpc/include/asm/book3s/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,37 +8,4 @@
#include <asm/book3s/32/pgtable.h>
#endif

#ifndef __ASSEMBLY__
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
pte_t *ptep, pte_t entry, int dirty);

struct file;
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot);
#define __HAVE_PHYS_MEM_ACCESS_PROT

void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);

/*
* This gets called at the end of handling a page fault, when
* the kernel has put a new PTE into the page table for the process.
* We use it to ensure coherency between the i-cache and d-cache
* for the page which has just been mapped in.
* On machines which use an MMU hash table, we use this to put a
* corresponding HPTE into the hash table ahead of time, instead of
* waiting for the inevitable extra hash-table miss exception.
*/
static inline void update_mmu_cache_range(struct vm_fault *vmf,
struct vm_area_struct *vma, unsigned long address,
pte_t *ptep, unsigned int nr)
{
if (IS_ENABLED(CONFIG_PPC32) && !mmu_has_feature(MMU_FTR_HPTE_TABLE))
return;
if (radix_enabled())
return;
__update_mmu_cache(vma, address, ptep);
}

#endif /* __ASSEMBLY__ */
#endif
16 changes: 4 additions & 12 deletions arch/powerpc/include/asm/fixmap.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,18 +23,6 @@
#include <asm/kmap_size.h>
#endif

#ifdef CONFIG_PPC64
#define FIXADDR_TOP (IOREMAP_END + FIXADDR_SIZE)
#else
#define FIXADDR_SIZE 0
#ifdef CONFIG_KASAN
#include <asm/kasan.h>
#define FIXADDR_TOP (KASAN_SHADOW_START - PAGE_SIZE)
#else
#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
#endif
#endif

/*
* Here we define all the compile-time 'special' virtual
* addresses. The point is to have a constant address at
Expand Down Expand Up @@ -119,5 +107,9 @@ static inline void __set_fixmap(enum fixed_addresses idx,

#define __early_set_fixmap __set_fixmap

#ifdef CONFIG_PPC_8xx
#define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE))
#endif

#endif /* !__ASSEMBLY__ */
#endif
16 changes: 8 additions & 8 deletions arch/powerpc/include/asm/imc-pmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -74,14 +74,14 @@ struct imc_events {
* The following is the data structure to hold trace imc data.
*/
struct trace_imc_data {
u64 tb1;
u64 ip;
u64 val;
u64 cpmc1;
u64 cpmc2;
u64 cpmc3;
u64 cpmc4;
u64 tb2;
__be64 tb1;
__be64 ip;
__be64 val;
__be64 cpmc1;
__be64 cpmc2;
__be64 cpmc3;
__be64 cpmc4;
__be64 tb2;
};

/* Event attribute array index */
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/include/asm/io.h
Original file line number Diff line number Diff line change
Expand Up @@ -950,7 +950,7 @@ extern void __iomem *__ioremap_caller(phys_addr_t, unsigned long size,
* almost all conceivable cases a device driver should not be using
* this function
*/
static inline unsigned long virt_to_phys(volatile void * address)
static inline unsigned long virt_to_phys(const volatile void * address)
{
WARN_ON(IS_ENABLED(CONFIG_DEBUG_VIRTUAL) && !virt_addr_valid(address));

Expand Down
1 change: 0 additions & 1 deletion arch/powerpc/include/asm/nohash/32/mmu-8xx.h
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,6 @@ typedef struct {
} mm_context_t;

#define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000)
#define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE))

/* Page size definitions, common between 32 and 64-bit
*
Expand Down
Loading

0 comments on commit 2bebd2d

Please sign in to comment.