Skip to content

Commit

Permalink
Automatic merge of 'next-test' into merge-test (2023-10-25 16:10)
Browse files Browse the repository at this point in the history
  • Loading branch information
mpe committed Oct 25, 2023
2 parents 8820fc9 + 36e826b commit 79391a2
Show file tree
Hide file tree
Showing 86 changed files with 1,058 additions and 1,018 deletions.
1 change: 1 addition & 0 deletions arch/powerpc/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -237,6 +237,7 @@ config PPC
select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_FAST_GUP
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_FUNCTION_DESCRIPTORS if PPC64_ELF_ABI_V1
select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_FUNCTION_GRAPH_TRACER
Expand Down
16 changes: 10 additions & 6 deletions arch/powerpc/boot/install.sh
Original file line number Diff line number Diff line change
Expand Up @@ -21,13 +21,17 @@ set -e
# this should work for both the pSeries zImage and the iSeries vmlinux.sm
image_name=`basename $2`

if [ -f $4/$image_name ]; then
mv $4/$image_name $4/$image_name.old

echo "Warning: '${INSTALLKERNEL}' command not available... Copying" \
"directly to $4/$image_name-$1" >&2

if [ -f $4/$image_name-$1 ]; then
mv $4/$image_name-$1 $4/$image_name-$1.old
fi

if [ -f $4/System.map ]; then
mv $4/System.map $4/System.old
if [ -f $4/System.map-$1 ]; then
mv $4/System.map-$1 $4/System-$1.old
fi

cat $2 > $4/$image_name
cp $3 $4/System.map
cat $2 > $4/$image_name-$1
cp $3 $4/System.map-$1
83 changes: 31 additions & 52 deletions arch/powerpc/include/asm/book3s/32/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,15 +20,15 @@

#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
#define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
#define _PAGE_USER 0x004 /* usermode access allowed */
#define _PAGE_READ 0x004 /* software: read access allowed */
#define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
#define _PAGE_DIRTY 0x080 /* C: page changed */
#define _PAGE_ACCESSED 0x100 /* R: page referenced */
#define _PAGE_EXEC 0x200 /* software: exec allowed */
#define _PAGE_RW 0x400 /* software: user write access allowed */
#define _PAGE_WRITE 0x400 /* software: user write access allowed */
#define _PAGE_SPECIAL 0x800 /* software: Special page */

#ifdef CONFIG_PTE_64BIT
Expand All @@ -42,26 +42,13 @@
#define _PMD_PRESENT_MASK (PAGE_MASK)
#define _PMD_BAD (~PAGE_MASK)

/* We borrow the _PAGE_USER bit to store the exclusive marker in swap PTEs. */
#define _PAGE_SWP_EXCLUSIVE _PAGE_USER
/* We borrow the _PAGE_READ bit to store the exclusive marker in swap PTEs. */
#define _PAGE_SWP_EXCLUSIVE _PAGE_READ

/* And here we include common definitions */

#define _PAGE_KERNEL_RO 0
#define _PAGE_KERNEL_ROX (_PAGE_EXEC)
#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW)
#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)

#define _PAGE_HPTEFLAGS _PAGE_HASHPTE

#ifndef __ASSEMBLY__

static inline bool pte_user(pte_t pte)
{
return pte_val(pte) & _PAGE_USER;
}
#endif /* __ASSEMBLY__ */

/*
* Location of the PFN in the PTE. Most 32-bit platforms use the same
* as _PAGE_SHIFT here (ie, naturally aligned).
Expand Down Expand Up @@ -97,20 +84,7 @@ static inline bool pte_user(pte_t pte)
#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT)

/*
* Permission masks used to generate the __P and __S table.
*
* Note:__pgprot is defined in arch/powerpc/include/asm/page.h
*
* Write permissions imply read permissions for now.
*/
#define PAGE_NONE __pgprot(_PAGE_BASE)
#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
#include <asm/pgtable-masks.h>

/* Permission masks used for kernel mappings */
#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
Expand Down Expand Up @@ -170,7 +144,14 @@ void unmap_kernel_page(unsigned long va);
* value (for now) on others, from where we can start layout kernel
* virtual space that goes below PKMAP and FIXMAP
*/
#include <asm/fixmap.h>

#define FIXADDR_SIZE 0
#ifdef CONFIG_KASAN
#include <asm/kasan.h>
#define FIXADDR_TOP (KASAN_SHADOW_START - PAGE_SIZE)
#else
#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
#endif

/*
* ioremap_bot starts at that address. Early ioremaps move down from there,
Expand Down Expand Up @@ -224,9 +205,6 @@ void unmap_kernel_page(unsigned long va);
/* Bits to mask out from a PGD to get to the PUD page */
#define PGD_MASKED_BITS 0

#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
(unsigned long long)pte_val(e))
#define pgd_ERROR(e) \
pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
/*
Expand Down Expand Up @@ -343,7 +321,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
}

static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
Expand Down Expand Up @@ -402,8 +380,16 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
}

/* Generic accessors to PTE bits */
static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);}
static inline int pte_read(pte_t pte) { return 1; }
static inline bool pte_read(pte_t pte)
{
return !!(pte_val(pte) & _PAGE_READ);
}

static inline bool pte_write(pte_t pte)
{
return !!(pte_val(pte) & _PAGE_WRITE);
}

static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); }
static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); }
static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); }
Expand Down Expand Up @@ -438,10 +424,10 @@ static inline bool pte_ci(pte_t pte)
static inline bool pte_access_permitted(pte_t pte, bool write)
{
/*
* A read-only access is controlled by _PAGE_USER bit.
* We have _PAGE_READ set for WRITE and EXECUTE
* A read-only access is controlled by _PAGE_READ bit.
* We have _PAGE_READ set for WRITE
*/
if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
if (!pte_present(pte) || !pte_read(pte))
return false;

if (write && !pte_write(pte))
Expand All @@ -465,7 +451,7 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
/* Generic modifiers for PTE bits */
static inline pte_t pte_wrprotect(pte_t pte)
{
return __pte(pte_val(pte) & ~_PAGE_RW);
return __pte(pte_val(pte) & ~_PAGE_WRITE);
}

static inline pte_t pte_exprotect(pte_t pte)
Expand Down Expand Up @@ -495,6 +481,9 @@ static inline pte_t pte_mkpte(pte_t pte)

static inline pte_t pte_mkwrite_novma(pte_t pte)
{
/*
* write implies read, hence set both
*/
return __pte(pte_val(pte) | _PAGE_RW);
}

Expand All @@ -518,16 +507,6 @@ static inline pte_t pte_mkhuge(pte_t pte)
return pte;
}

static inline pte_t pte_mkprivileged(pte_t pte)
{
return __pte(pte_val(pte) & ~_PAGE_USER);
}

static inline pte_t pte_mkuser(pte_t pte)
{
return __pte(pte_val(pte) | _PAGE_USER);
}

static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/include/asm/book3s/32/tlbflush.h
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ static inline void local_flush_tlb_page(struct vm_area_struct *vma,
static inline void local_flush_tlb_page_psize(struct mm_struct *mm,
unsigned long vmaddr, int psize)
{
BUILD_BUG();
flush_range(mm, vmaddr, vmaddr);
}

static inline void local_flush_tlb_mm(struct mm_struct *mm)
Expand Down
37 changes: 7 additions & 30 deletions arch/powerpc/include/asm/book3s/64/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,10 @@
#define _PAGE_EXEC 0x00001 /* execute permission */
#define _PAGE_WRITE 0x00002 /* write access allowed */
#define _PAGE_READ 0x00004 /* read access allowed */
#define _PAGE_NA _PAGE_PRIVILEGED
#define _PAGE_NAX _PAGE_EXEC
#define _PAGE_RO _PAGE_READ
#define _PAGE_ROX (_PAGE_READ | _PAGE_EXEC)
#define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
#define _PAGE_RWX (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
#define _PAGE_PRIVILEGED 0x00008 /* kernel access only */
Expand Down Expand Up @@ -136,23 +140,7 @@
#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
#define _PAGE_BASE (_PAGE_BASE_NC)

/* Permission masks used to generate the __P and __S table,
*
* Note:__pgprot is defined in arch/powerpc/include/asm/page.h
*
* Write permissions imply read permissions for now (we could make write-only
* pages on BookE but we don't bother for now). Execute permission control is
* possible on platforms that define _PAGE_EXEC
*/
#define PAGE_NONE __pgprot(_PAGE_BASE | _PAGE_PRIVILEGED)
#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW)
#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_EXEC)
#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_READ)
#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_READ)
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
/* Radix only, Hash uses PAGE_READONLY_X + execute-only pkey instead */
#define PAGE_EXECONLY __pgprot(_PAGE_BASE | _PAGE_EXEC)
#include <asm/pgtable-masks.h>

/* Permission masks used for kernel mappings */
#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
Expand Down Expand Up @@ -316,6 +304,7 @@ extern unsigned long pci_io_base;
#define IOREMAP_START (ioremap_bot)
#define IOREMAP_END (KERN_IO_END - FIXADDR_SIZE)
#define FIXADDR_SIZE SZ_32M
#define FIXADDR_TOP (IOREMAP_END + FIXADDR_SIZE)

#ifndef __ASSEMBLY__

Expand Down Expand Up @@ -629,16 +618,6 @@ static inline pte_t pte_mkdevmap(pte_t pte)
return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SPECIAL | _PAGE_DEVMAP));
}

static inline pte_t pte_mkprivileged(pte_t pte)
{
return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PRIVILEGED));
}

static inline pte_t pte_mkuser(pte_t pte)
{
return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_PRIVILEGED));
}

/*
* This is potentially called with a pmd as the argument, in which case it's not
* safe to check _PAGE_DEVMAP unless we also confirm that _PAGE_PTE is set.
Expand All @@ -647,7 +626,7 @@ static inline pte_t pte_mkuser(pte_t pte)
*/
static inline int pte_devmap(pte_t pte)
{
u64 mask = cpu_to_be64(_PAGE_DEVMAP | _PAGE_PTE);
__be64 mask = cpu_to_be64(_PAGE_DEVMAP | _PAGE_PTE);

return (pte_raw(pte) & mask) == mask;
}
Expand Down Expand Up @@ -1014,8 +993,6 @@ static inline pmd_t *pud_pgtable(pud_t pud)
return (pmd_t *)__va(pud_val(pud) & ~PUD_MASKED_BITS);
}

#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
#define pmd_ERROR(e) \
pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
#define pud_ERROR(e) \
Expand Down
33 changes: 0 additions & 33 deletions arch/powerpc/include/asm/book3s/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,37 +8,4 @@
#include <asm/book3s/32/pgtable.h>
#endif

#ifndef __ASSEMBLY__
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
pte_t *ptep, pte_t entry, int dirty);

struct file;
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot);
#define __HAVE_PHYS_MEM_ACCESS_PROT

void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);

/*
* This gets called at the end of handling a page fault, when
* the kernel has put a new PTE into the page table for the process.
* We use it to ensure coherency between the i-cache and d-cache
* for the page which has just been mapped in.
* On machines which use an MMU hash table, we use this to put a
* corresponding HPTE into the hash table ahead of time, instead of
* waiting for the inevitable extra hash-table miss exception.
*/
static inline void update_mmu_cache_range(struct vm_fault *vmf,
struct vm_area_struct *vma, unsigned long address,
pte_t *ptep, unsigned int nr)
{
if (IS_ENABLED(CONFIG_PPC32) && !mmu_has_feature(MMU_FTR_HPTE_TABLE))
return;
if (radix_enabled())
return;
__update_mmu_cache(vma, address, ptep);
}

#endif /* __ASSEMBLY__ */
#endif
1 change: 1 addition & 0 deletions arch/powerpc/include/asm/code-patching.h
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ int create_cond_branch(ppc_inst_t *instr, const u32 *addr,
int patch_branch(u32 *addr, unsigned long target, int flags);
int patch_instruction(u32 *addr, ppc_inst_t instr);
int raw_patch_instruction(u32 *addr, ppc_inst_t instr);
int patch_instructions(u32 *addr, u32 *code, size_t len, bool repeat_instr);

static inline unsigned long patch_site_addr(s32 *site)
{
Expand Down
5 changes: 0 additions & 5 deletions arch/powerpc/include/asm/cpm1.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,11 +49,6 @@
*/
extern cpm8xx_t __iomem *cpmp; /* Pointer to comm processor */

#define cpm_dpalloc cpm_muram_alloc
#define cpm_dpfree cpm_muram_free
#define cpm_dpram_addr cpm_muram_addr
#define cpm_dpram_phys cpm_muram_dma

extern void cpm_setbrg(uint brg, uint rate);

extern void __init cpm_load_patch(cpm8xx_t *cp);
Expand Down
4 changes: 0 additions & 4 deletions arch/powerpc/include/asm/cpm2.h
Original file line number Diff line number Diff line change
Expand Up @@ -87,10 +87,6 @@
*/
extern cpm_cpm2_t __iomem *cpmp; /* Pointer to comm processor */

#define cpm_dpalloc cpm_muram_alloc
#define cpm_dpfree cpm_muram_free
#define cpm_dpram_addr cpm_muram_addr

extern void cpm2_reset(void);

/* Baud rate generators.
Expand Down
16 changes: 4 additions & 12 deletions arch/powerpc/include/asm/fixmap.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,18 +23,6 @@
#include <asm/kmap_size.h>
#endif

#ifdef CONFIG_PPC64
#define FIXADDR_TOP (IOREMAP_END + FIXADDR_SIZE)
#else
#define FIXADDR_SIZE 0
#ifdef CONFIG_KASAN
#include <asm/kasan.h>
#define FIXADDR_TOP (KASAN_SHADOW_START - PAGE_SIZE)
#else
#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
#endif
#endif

/*
* Here we define all the compile-time 'special' virtual
* addresses. The point is to have a constant address at
Expand Down Expand Up @@ -119,5 +107,9 @@ static inline void __set_fixmap(enum fixed_addresses idx,

#define __early_set_fixmap __set_fixmap

#ifdef CONFIG_PPC_8xx
#define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE))
#endif

#endif /* !__ASSEMBLY__ */
#endif
Loading

0 comments on commit 79391a2

Please sign in to comment.