1
0

[Blackfin] arch: Initial checkin of the memory protection support.

Enable it with CONFIG_MPU.

Signed-off-by: Bernd Schmidt <bernd.schmidt@analog.com>
Signed-off-by: Bryan Wu <bryan.wu@analog.com>
This commit is contained in:
Bernd Schmidt
2008-01-27 18:39:16 +08:00
committed by Bryan Wu
parent 2047e40d72
commit b97b8a9983
15 changed files with 822 additions and 8 deletions

View File

@@ -0,0 +1,61 @@
/*
* File: include/asm-blackfin/cplbinit.h
* Based on:
* Author:
*
* Created:
* Description:
*
* Modified:
* Copyright 2004-2006 Analog Devices Inc.
*
* Bugs: Enter bugs at http://blackfin.uclinux.org/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef __ASM_BFIN_CPLB_MPU_H
#define __ASM_BFIN_CPLB_MPU_H
struct cplb_entry {
unsigned long data, addr;
};
struct mem_region {
unsigned long start, end;
unsigned long dcplb_data;
unsigned long icplb_data;
};
extern struct cplb_entry dcplb_tbl[MAX_CPLBS];
extern struct cplb_entry icplb_tbl[MAX_CPLBS];
extern int first_switched_icplb;
extern int first_mask_dcplb;
extern int first_switched_dcplb;
extern int nr_dcplb_miss, nr_icplb_miss, nr_icplb_supv_miss, nr_dcplb_prot;
extern int nr_cplb_flush;
extern int page_mask_order;
extern int page_mask_nelts;
extern unsigned long *current_rwx_mask;
extern void flush_switched_cplbs(void);
extern void set_mask_dcplbs(unsigned long *);
extern void __noreturn panic_cplb_error(int seqstat, struct pt_regs *);
#endif /* __ASM_BFIN_CPLB_MPU_H */

View File

@@ -65,7 +65,11 @@
#define SIZE_1M 0x00100000 /* 1M */
#define SIZE_4M 0x00400000 /* 4M */
#ifdef CONFIG_MPU
#define MAX_CPLBS 16
#else
#define MAX_CPLBS (16 * 2)
#endif
#define ASYNC_MEMORY_CPLB_COVERAGE ((ASYNC_BANK0_SIZE + ASYNC_BANK1_SIZE + \
ASYNC_BANK2_SIZE + ASYNC_BANK3_SIZE) / SIZE_4M)

View File

@@ -33,6 +33,12 @@
#include <asm/blackfin.h>
#include <asm/cplb.h>
#ifdef CONFIG_MPU
#include <asm/cplb-mpu.h>
#else
#define INITIAL_T 0x1
#define SWITCH_T 0x2
#define I_CPLB 0x4
@@ -79,6 +85,8 @@ extern u_long ipdt_swapcount_table[];
extern u_long dpdt_swapcount_table[];
#endif
#endif /* CONFIG_MPU */
extern unsigned long reserved_mem_dcache_on;
extern unsigned long reserved_mem_icache_on;

View File

@@ -24,7 +24,9 @@ typedef struct {
unsigned long exec_fdpic_loadmap;
unsigned long interp_fdpic_loadmap;
#endif
#ifdef CONFIG_MPU
unsigned long *page_rwx_mask;
#endif
} mm_context_t;
#endif

View File

@@ -30,9 +30,12 @@
#ifndef __BLACKFIN_MMU_CONTEXT_H__
#define __BLACKFIN_MMU_CONTEXT_H__
#include <linux/gfp.h>
#include <linux/sched.h>
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/cplbinit.h>
extern void *current_l1_stack_save;
extern int nr_l1stack_tasks;
@@ -50,6 +53,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
#ifdef CONFIG_MPU
unsigned long p = __get_free_pages(GFP_KERNEL, page_mask_order);
mm->context.page_rwx_mask = (unsigned long *)p;
memset(mm->context.page_rwx_mask, 0,
page_mask_nelts * 3 * sizeof(long));
#endif
return 0;
}
@@ -73,6 +82,11 @@ static inline void destroy_context(struct mm_struct *mm)
sram_free(tmp->addr);
kfree(tmp);
}
#ifdef CONFIG_MPU
if (current_rwx_mask == mm->context.page_rwx_mask)
current_rwx_mask = NULL;
free_pages((unsigned long)mm->context.page_rwx_mask, page_mask_order);
#endif
}
static inline unsigned long
@@ -106,9 +120,21 @@ activate_l1stack(struct mm_struct *mm, unsigned long sp_base)
#define deactivate_mm(tsk,mm) do { } while (0)
static inline void activate_mm(struct mm_struct *prev_mm,
struct mm_struct *next_mm)
#define activate_mm(prev, next) switch_mm(prev, next, NULL)
static inline void switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
struct task_struct *tsk)
{
if (prev_mm == next_mm)
return;
#ifdef CONFIG_MPU
if (prev_mm->context.page_rwx_mask == current_rwx_mask) {
flush_switched_cplbs();
set_mask_dcplbs(next_mm->context.page_rwx_mask);
}
#endif
/* L1 stack switching. */
if (!next_mm->context.l1_stack_save)
return;
if (next_mm->context.l1_stack_save == current_l1_stack_save)
@@ -120,10 +146,36 @@ static inline void activate_mm(struct mm_struct *prev_mm,
memcpy(l1_stack_base, current_l1_stack_save, l1_stack_len);
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
#ifdef CONFIG_MPU
static inline void protect_page(struct mm_struct *mm, unsigned long addr,
unsigned long flags)
{
activate_mm(prev, next);
unsigned long *mask = mm->context.page_rwx_mask;
unsigned long page = addr >> 12;
unsigned long idx = page >> 5;
unsigned long bit = 1 << (page & 31);
if (flags & VM_MAYREAD)
mask[idx] |= bit;
else
mask[idx] &= ~bit;
mask += page_mask_nelts;
if (flags & VM_MAYWRITE)
mask[idx] |= bit;
else
mask[idx] &= ~bit;
mask += page_mask_nelts;
if (flags & VM_MAYEXEC)
mask[idx] |= bit;
else
mask[idx] &= ~bit;
}
static inline void update_protections(struct mm_struct *mm)
{
flush_switched_cplbs();
set_mask_dcplbs(mm->context.page_rwx_mask);
}
#endif
#endif