summaryrefslogtreecommitdiff
path: root/os/sa1110/mmu.c
blob: 1dab239a410432463d080dee29e4c44fa4c73503 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
#include "u.h"
#include "../port/lib.h"
#include "mem.h"
#include "dat.h"
#include "fns.h"

/*
 * return physical address corresponding to a given virtual address,
 * or 0 if there is no such address
 */
ulong
va2pa(void *v)
{
	int idx;
	ulong pte, ste, *ttb;

	idx = MmuL1x((ulong)v);
	ttb = (ulong*)KTTB;
	ste = ttb[idx];
	switch(ste & MmuL1type) {
	case MmuL1section:
		return MmuSBA(ste)|((ulong)v & 0x000fffff);
	case MmuL1page:
		pte = ((ulong *)MmuPTBA(ste))[MmuL2x((ulong)v)]; 
		switch(pte & 3) {
		case MmuL2large:
			return (pte & 0xffff0000)|((ulong)v & 0x0000ffff);
		case MmuL2small:
			return (pte & 0xfffff000)|((ulong)v & 0x00000fff);
		}
	}
	return 0;
}

enum {
	SectionPages = MmuSection/MmuSmallPage,
	PtAlign = 1<<10,

	MINICACHED = 0x10000000,
};

/* for debugging */
void
prs(char *s)
{
	for(; *s; s++)
		uartputc(*s);
}

void
pr16(ulong n)
{
	int i;

	for(i=28; i>=0; i-=4)
		uartputc("0123456789ABCDEF"[(n>>i)&0xF]);
}

void*
mmuphysmap(ulong phys, ulong)
{
	ulong *ttb;
	void *va;

	ttb = (ulong*)KTTB;
	va = KADDR(phys);
	ttb[MmuL1x((ulong)va)] = phys | 0xC10 | MmuL1section;
	return va;
}

/*
 * Set a 1-1 map of virtual to physical memory, except:
 *	doubly-map page0 at the alternative interrupt vector address,
 * 	doubly-map physical memory at KZERO+256*MB as uncached but buffered, and
 *	disable access to 0 (nil pointers).
 */
void
mmuinit(void)
{
	int i;
	ulong *ttb, *ptable, va;

	ttb = (ulong*)KTTB;
	for(i=0; i<MmuL1x(0x10000000); i++)
		ttb[i] = 0;
	for(; i < 0x1000; i++)
		ttb[i] = (i<<20) | 0xC10 | MmuL1section;
	for(va = KZERO; va < KZERO+64*MB; va += MB)
		ttb[MmuL1x(va)] |= MmuWB | MmuIDC;	/* DRAM is cacheable */
	for(i = 0; i < 64*MB; i += MB)
		ttb[MmuL1x(UCDRAMZERO+i)] = (PHYSMEM0+i) | 0xC10 | MmuL1section;
	/* TO DO: make the text read only */
	for(va = KZERO; va < KZERO+64*MB; va += MB)
		ttb[MmuL1x(va|MINICACHED)] = va | 0xC10  | MmuIDC | MmuL1section;	/* cached but unbuffered (thus minicache) for frame buffer */
	ttb[MmuL1x(DCFADDR)] |= MmuIDC | MmuWB;	/* cached and buffered for cache writeback */
	ttb[MmuL1x(MCFADDR)] |= MmuIDC;	/* cached and unbuffered for minicache writeback */
	/* remap flash */
	for(i=0; i<32*MB; i+=MB)
		ttb[MmuL1x(FLASHMEM+i)] = (PHYSFLASH0+i) | 0xC10 | MmuL1section;	/* we'll make flash uncached for now */

	/*
	 * build page table for alternative vector page, mapping trap vectors in *page0
	 */
	ptable = xspanalloc(SectionPages*sizeof(*ptable), PtAlign, 0);
	ptable[MmuL2x(AIVECADDR)] = PADDR(page0) | MmuL2AP(MmuAPsrw) | MmuWB | MmuIDC | MmuL2small;
	ttb[MmuL1x(AIVECADDR)] = PADDR(ptable) | MmuL1page;
	mmuputttb(KTTB);
	mmuputdac(1);	/* client */
	mmuenable(CpCaltivec | CpCIcache | CpCsystem | (1<<6) | CpCd32 | CpCi32 | CpCwb | CpCDcache | CpCmmu);
}

/*
 * flush data in a given address range to memory
 * and invalidate the region in the instruction cache.
 */
int
segflush(void *a, ulong n)
{
	dcflush(a, n);
	icflushall();	/* can't be more precise */
	return 0;
}

/*
 * map an address to cached but unbuffered memory
 * forcing load allocations to the mini data cache.
 * the address a must be in a region that is cache line aligned
 * with a length that is a multiple of the cache line size
 */
void *
minicached(void *a)
{
	if(conf.useminicache == 0)
		return a;
	/* must flush and invalidate any data lingering in main cache */
	dcflushall();
	minidcflush();
	dcinval();
	return (void*)((ulong)a | MINICACHED);
}