mirror of
https://github.com/fail0verflow/switch-linux.git
synced 2025-05-04 02:34:21 -04:00
sh: Support explicit L1 cache disabling.
This reworks the cache mode configuration in Kconfig, and allows for explicit selection of write-back/write-through/off configurations. All of the cache flushing routines are optimized away for the off case. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
parent
ac919986d7
commit
e7bd34a15b
9 changed files with 86 additions and 31 deletions
|
@ -143,12 +143,15 @@ static void __init cache_init(void)
|
||||||
flags &= ~CCR_CACHE_EMODE;
|
flags &= ~CCR_CACHE_EMODE;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_SH_WRITETHROUGH
|
#if defined(CONFIG_CACHE_WRITETHROUGH)
|
||||||
/* Turn on Write-through caching */
|
/* Write-through */
|
||||||
flags |= CCR_CACHE_WT;
|
flags |= CCR_CACHE_WT;
|
||||||
#else
|
#elif defined(CONFIG_CACHE_WRITEBACK)
|
||||||
/* .. or default to Write-back */
|
/* Write-back */
|
||||||
flags |= CCR_CACHE_CB;
|
flags |= CCR_CACHE_CB;
|
||||||
|
#else
|
||||||
|
/* Off */
|
||||||
|
flags &= ~CCR_CACHE_ENABLE;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
ctrl_outl(flags, CCR);
|
ctrl_outl(flags, CCR);
|
||||||
|
|
|
@ -128,7 +128,8 @@ DECLARE_EXPORT(__movstrSI12_i4);
|
||||||
#endif /* __GNUC__ == 4 */
|
#endif /* __GNUC__ == 4 */
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
|
#if !defined(CONFIG_CACHE_OFF) && (defined(CONFIG_CPU_SH4) || \
|
||||||
|
defined(CONFIG_SH7705_CACHE_32KB))
|
||||||
/* needed by some modules */
|
/* needed by some modules */
|
||||||
EXPORT_SYMBOL(flush_cache_all);
|
EXPORT_SYMBOL(flush_cache_all);
|
||||||
EXPORT_SYMBOL(flush_cache_range);
|
EXPORT_SYMBOL(flush_cache_range);
|
||||||
|
@ -136,8 +137,8 @@ EXPORT_SYMBOL(flush_dcache_page);
|
||||||
EXPORT_SYMBOL(__flush_purge_region);
|
EXPORT_SYMBOL(__flush_purge_region);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_MMU) && (defined(CONFIG_CPU_SH4) || \
|
#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \
|
||||||
defined(CONFIG_SH7705_CACHE_32KB))
|
(defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB))
|
||||||
EXPORT_SYMBOL(clear_user_page);
|
EXPORT_SYMBOL(clear_user_page);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
# Processor families
|
# Processor families
|
||||||
#
|
#
|
||||||
config CPU_SH2
|
config CPU_SH2
|
||||||
select SH_WRITETHROUGH if !CPU_SH2A
|
|
||||||
bool
|
bool
|
||||||
|
|
||||||
config CPU_SH2A
|
config CPU_SH2A
|
||||||
|
@ -414,8 +413,17 @@ config SH_DIRECT_MAPPED
|
||||||
Turn this option off for platforms that do not have a direct-mapped
|
Turn this option off for platforms that do not have a direct-mapped
|
||||||
cache, and you have no need to run the caches in such a configuration.
|
cache, and you have no need to run the caches in such a configuration.
|
||||||
|
|
||||||
config SH_WRITETHROUGH
|
choice
|
||||||
bool "Use write-through caching"
|
prompt "Cache mode"
|
||||||
|
default CACHE_WRITEBACK if CPU_SH2A || CPU_SH3 || CPU_SH4
|
||||||
|
default CACHE_WRITETHROUGH if (CPU_SH2 && !CPU_SH2A)
|
||||||
|
|
||||||
|
config CACHE_WRITEBACK
|
||||||
|
bool "Write-back"
|
||||||
|
depends on CPU_SH2A || CPU_SH3 || CPU_SH4
|
||||||
|
|
||||||
|
config CACHE_WRITETHROUGH
|
||||||
|
bool "Write-through"
|
||||||
help
|
help
|
||||||
Selecting this option will configure the caches in write-through
|
Selecting this option will configure the caches in write-through
|
||||||
mode, as opposed to the default write-back configuration.
|
mode, as opposed to the default write-back configuration.
|
||||||
|
@ -426,4 +434,9 @@ config SH_WRITETHROUGH
|
||||||
|
|
||||||
If unsure, say N.
|
If unsure, say N.
|
||||||
|
|
||||||
|
config CACHE_OFF
|
||||||
|
bool "Off"
|
||||||
|
|
||||||
|
endchoice
|
||||||
|
|
||||||
endmenu
|
endmenu
|
||||||
|
|
|
@ -4,9 +4,10 @@
|
||||||
|
|
||||||
obj-y := init.o extable.o consistent.o
|
obj-y := init.o extable.o consistent.o
|
||||||
|
|
||||||
obj-$(CONFIG_CPU_SH2) += cache-sh2.o
|
cache-$(CONFIG_CPU_SH2) := cache-sh2.o
|
||||||
obj-$(CONFIG_CPU_SH3) += cache-sh3.o
|
cache-$(CONFIG_CPU_SH3) := cache-sh3.o
|
||||||
obj-$(CONFIG_CPU_SH4) += cache-sh4.o
|
cache-$(CONFIG_CPU_SH4) := cache-sh4.o pg-sh4.o
|
||||||
|
cache-$(CONFIG_CACHE_OFF) :=
|
||||||
|
|
||||||
mmu-y := tlb-nommu.o pg-nommu.o
|
mmu-y := tlb-nommu.o pg-nommu.o
|
||||||
mmu-$(CONFIG_CPU_SH3) += fault-nommu.o
|
mmu-$(CONFIG_CPU_SH3) += fault-nommu.o
|
||||||
|
@ -14,7 +15,7 @@ mmu-$(CONFIG_CPU_SH4) += fault-nommu.o
|
||||||
mmu-$(CONFIG_MMU) := fault.o clear_page.o copy_page.o tlb-flush.o \
|
mmu-$(CONFIG_MMU) := fault.o clear_page.o copy_page.o tlb-flush.o \
|
||||||
ioremap.o
|
ioremap.o
|
||||||
|
|
||||||
obj-y += $(mmu-y)
|
obj-y += $(cache-y) $(mmu-y)
|
||||||
|
|
||||||
ifdef CONFIG_DEBUG_FS
|
ifdef CONFIG_DEBUG_FS
|
||||||
obj-$(CONFIG_CPU_SH4) += cache-debugfs.o
|
obj-$(CONFIG_CPU_SH4) += cache-debugfs.o
|
||||||
|
@ -22,7 +23,7 @@ endif
|
||||||
|
|
||||||
ifdef CONFIG_MMU
|
ifdef CONFIG_MMU
|
||||||
obj-$(CONFIG_CPU_SH3) += tlb-sh3.o
|
obj-$(CONFIG_CPU_SH3) += tlb-sh3.o
|
||||||
obj-$(CONFIG_CPU_SH4) += tlb-sh4.o pg-sh4.o
|
obj-$(CONFIG_CPU_SH4) += tlb-sh4.o
|
||||||
obj-$(CONFIG_SH7705_CACHE_32KB) += pg-sh7705.o
|
obj-$(CONFIG_SH7705_CACHE_32KB) += pg-sh7705.o
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
|
|
@ -145,7 +145,7 @@ repeat:
|
||||||
|
|
||||||
ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos));
|
ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos));
|
||||||
|
|
||||||
#ifdef CONFIG_SH_WRITETHROUGH
|
#ifdef CONFIG_CACHE_WRITETHROUGH
|
||||||
/*
|
/*
|
||||||
* When we are in 32-bit address extended mode, CCR.CB becomes
|
* When we are in 32-bit address extended mode, CCR.CB becomes
|
||||||
* invalid, so care must be taken to manually adjust cacheable
|
* invalid, so care must be taken to manually adjust cacheable
|
||||||
|
|
|
@ -34,22 +34,27 @@ void update_mmu_cache(struct vm_area_struct * vma,
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned long pteval;
|
unsigned long pteval;
|
||||||
unsigned long vpn;
|
unsigned long vpn;
|
||||||
struct page *page;
|
|
||||||
unsigned long pfn;
|
|
||||||
|
|
||||||
/* Ptrace may call this routine. */
|
/* Ptrace may call this routine. */
|
||||||
if (vma && current->active_mm != vma->vm_mm)
|
if (vma && current->active_mm != vma->vm_mm)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
pfn = pte_pfn(pte);
|
#ifndef CONFIG_CACHE_OFF
|
||||||
if (pfn_valid(pfn)) {
|
{
|
||||||
page = pfn_to_page(pfn);
|
unsigned long pfn = pte_pfn(pte);
|
||||||
if (!test_bit(PG_mapped, &page->flags)) {
|
|
||||||
unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
|
if (pfn_valid(pfn)) {
|
||||||
__flush_wback_region((void *)P1SEGADDR(phys), PAGE_SIZE);
|
struct page *page = pfn_to_page(pfn);
|
||||||
__set_bit(PG_mapped, &page->flags);
|
|
||||||
|
if (!test_bit(PG_mapped, &page->flags)) {
|
||||||
|
unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
|
||||||
|
__flush_wback_region((void *)P1SEGADDR(phys),
|
||||||
|
PAGE_SIZE);
|
||||||
|
__set_bit(PG_mapped, &page->flags);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
|
|
||||||
|
@ -66,7 +71,7 @@ void update_mmu_cache(struct vm_area_struct * vma,
|
||||||
|
|
||||||
/* Set PTEL register */
|
/* Set PTEL register */
|
||||||
pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
|
pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
|
||||||
#ifdef CONFIG_SH_WRITETHROUGH
|
#ifdef CONFIG_CACHE_WRITETHROUGH
|
||||||
pteval |= _PAGE_WT;
|
pteval |= _PAGE_WT;
|
||||||
#endif
|
#endif
|
||||||
/* conveniently, we want all the software flags to be 0 anyway */
|
/* conveniently, we want all the software flags to be 0 anyway */
|
||||||
|
|
|
@ -1,16 +1,47 @@
|
||||||
#ifndef __ASM_SH_CACHEFLUSH_H
|
#ifndef __ASM_SH_CACHEFLUSH_H
|
||||||
#define __ASM_SH_CACHEFLUSH_H
|
#define __ASM_SH_CACHEFLUSH_H
|
||||||
|
|
||||||
#ifdef __KERNEL__
|
#ifdef __KERNEL__
|
||||||
|
|
||||||
#include <linux/mm.h>
|
#ifdef CONFIG_CACHE_OFF
|
||||||
|
/*
|
||||||
|
* Nothing to do when the cache is disabled, initial flush and explicit
|
||||||
|
* disabling is handled at CPU init time.
|
||||||
|
*
|
||||||
|
* See arch/sh/kernel/cpu/init.c:cache_init().
|
||||||
|
*/
|
||||||
|
#define p3_cache_init() do { } while (0)
|
||||||
|
#define flush_cache_all() do { } while (0)
|
||||||
|
#define flush_cache_mm(mm) do { } while (0)
|
||||||
|
#define flush_cache_dup_mm(mm) do { } while (0)
|
||||||
|
#define flush_cache_range(vma, start, end) do { } while (0)
|
||||||
|
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
|
||||||
|
#define flush_dcache_page(page) do { } while (0)
|
||||||
|
#define flush_icache_range(start, end) do { } while (0)
|
||||||
|
#define flush_icache_page(vma,pg) do { } while (0)
|
||||||
|
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||||
|
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||||
|
#define flush_cache_sigtramp(vaddr) do { } while (0)
|
||||||
|
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
|
||||||
|
#define __flush_wback_region(start, size) do { (void)(start); } while (0)
|
||||||
|
#define __flush_purge_region(start, size) do { (void)(start); } while (0)
|
||||||
|
#define __flush_invalidate_region(start, size) do { (void)(start); } while (0)
|
||||||
|
#else
|
||||||
#include <asm/cpu/cacheflush.h>
|
#include <asm/cpu/cacheflush.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Consistent DMA requires that the __flush_xxx() primitives must be set
|
||||||
|
* for any of the enabled non-coherent caches (most of the UP CPUs),
|
||||||
|
* regardless of PIPT or VIPT cache configurations.
|
||||||
|
*/
|
||||||
|
|
||||||
/* Flush (write-back only) a region (smaller than a page) */
|
/* Flush (write-back only) a region (smaller than a page) */
|
||||||
extern void __flush_wback_region(void *start, int size);
|
extern void __flush_wback_region(void *start, int size);
|
||||||
/* Flush (write-back & invalidate) a region (smaller than a page) */
|
/* Flush (write-back & invalidate) a region (smaller than a page) */
|
||||||
extern void __flush_purge_region(void *start, int size);
|
extern void __flush_purge_region(void *start, int size);
|
||||||
/* Flush (invalidate only) a region (smaller than a page) */
|
/* Flush (invalidate only) a region (smaller than a page) */
|
||||||
extern void __flush_invalidate_region(void *start, int size);
|
extern void __flush_invalidate_region(void *start, int size);
|
||||||
|
#endif
|
||||||
|
|
||||||
#define flush_cache_vmap(start, end) flush_cache_all()
|
#define flush_cache_vmap(start, end) flush_cache_all()
|
||||||
#define flush_cache_vunmap(start, end) flush_cache_all()
|
#define flush_cache_vunmap(start, end) flush_cache_all()
|
||||||
|
|
|
@ -70,14 +70,14 @@ extern void clear_page_nommu(void *to);
|
||||||
extern void copy_page_nommu(void *to, void *from);
|
extern void copy_page_nommu(void *to, void *from);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_MMU) && (defined(CONFIG_CPU_SH4) || \
|
#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \
|
||||||
defined(CONFIG_SH7705_CACHE_32KB))
|
(defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB))
|
||||||
struct page;
|
struct page;
|
||||||
extern void clear_user_page(void *to, unsigned long address, struct page *pg);
|
extern void clear_user_page(void *to, unsigned long address, struct page *pg);
|
||||||
extern void copy_user_page(void *to, void *from, unsigned long address, struct page *pg);
|
extern void copy_user_page(void *to, void *from, unsigned long address, struct page *pg);
|
||||||
extern void __clear_user_page(void *to, void *orig_to);
|
extern void __clear_user_page(void *to, void *orig_to);
|
||||||
extern void __copy_user_page(void *to, void *from, void *orig_to);
|
extern void __copy_user_page(void *to, void *from, void *orig_to);
|
||||||
#elif defined(CONFIG_CPU_SH2) || defined(CONFIG_CPU_SH3) || !defined(CONFIG_MMU)
|
#else
|
||||||
#define clear_user_page(page, vaddr, pg) clear_page(page)
|
#define clear_user_page(page, vaddr, pg) clear_page(page)
|
||||||
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
|
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -563,7 +563,8 @@ struct mm_struct;
|
||||||
extern unsigned int kobjsize(const void *objp);
|
extern unsigned int kobjsize(const void *objp);
|
||||||
#endif /* !CONFIG_MMU */
|
#endif /* !CONFIG_MMU */
|
||||||
|
|
||||||
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
|
#if !defined(CONFIG_CACHE_OFF) && (defined(CONFIG_CPU_SH4) || \
|
||||||
|
defined(CONFIG_SH7705_CACHE_32KB))
|
||||||
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
|
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
|
||||||
extern pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
|
extern pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
|
||||||
#endif
|
#endif
|
||||||
|
|
Loading…
Add table
Reference in a new issue