summaryrefslogtreecommitdiffstats
path: root/arch/m68k/include/asm/page_mm.h
blob: 2411ea9ef578b19dbc5d369440d30d708830b682 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _M68K_PAGE_MM_H
#define _M68K_PAGE_MM_H

#ifndef __ASSEMBLY__

#include <linux/compiler.h>
#include <asm/module.h>

/*
 * We don't need to check for alignment etc.
 */
#ifdef CPU_M68040_OR_M68060_ONLY
static inline void copy_page(void *to, void *from)
{
  unsigned long tmp;

  __asm__ __volatile__("1:\t"
		       ".chip 68040\n\t"
		       "move16 %1@+,%0@+\n\t"
		       "move16 %1@+,%0@+\n\t"
		       ".chip 68k\n\t"
		       "dbra  %2,1b\n\t"
		       : "=a" (to), "=a" (from), "=d" (tmp)
		       : "0" (to), "1" (from) , "2" (PAGE_SIZE / 32 - 1)
		       );
}

static inline void clear_page(void *page)
{
	unsigned long tmp;
	unsigned long *sp = page;

	*sp++ = 0;
	*sp++ = 0;
	*sp++ = 0;
	*sp++ = 0;

	__asm__ __volatile__("1:\t"
			     ".chip 68040\n\t"
			     "move16 %2@+,%0@+\n\t"
			     ".chip 68k\n\t"
			     "subqw  #8,%2\n\t"
			     "subqw  #8,%2\n\t"
			     "dbra   %1,1b\n\t"
			     : "=a" (sp), "=d" (tmp)
			     : "a" (page), "0" (sp),
			       "1" ((PAGE_SIZE - 16) / 16 - 1));
}

#else
#define clear_page(page)	memset((page), 0, PAGE_SIZE)
#define copy_page(to,from)	memcpy((to), (from), PAGE_SIZE)
#endif

#define clear_user_page(addr, vaddr, page)	\
	do {	clear_page(addr);		\
		flush_dcache_page(page);	\
	} while (0)
#define copy_user_page(to, from, vaddr, page)	\
	do {	copy_page(to, from);		\
		flush_dcache_page(page);	\
	} while (0)

extern unsigned long m68k_memoffset;

#ifndef CONFIG_SUN3

#define WANT_PAGE_VIRTUAL

static inline unsigned long ___pa(void *vaddr)
{
	unsigned long paddr;
	asm (
		"1:	addl #0,%0\n"
		m68k_fixup(%c2, 1b+2)
		: "=r" (paddr)
		: "0" (vaddr), "i" (m68k_fixup_memoffset));
	return paddr;
}
#define __pa(vaddr)	___pa((void *)(long)(vaddr))
static inline void *__va(unsigned long paddr)
{
	void *vaddr;
	asm (
		"1:	subl #0,%0\n"
		m68k_fixup(%c2, 1b+2)
		: "=r" (vaddr)
		: "0" (paddr), "i" (m68k_fixup_memoffset));
	return vaddr;
}

#else	/* !CONFIG_SUN3 */
/* This #define is a horrible hack to suppress lots of warnings. --m */
#define __pa(x) ___pa((unsigned long)(x))
static inline unsigned long ___pa(unsigned long x)
{
     if(x == 0)
	  return 0;
     if(x >= PAGE_OFFSET)
        return (x-PAGE_OFFSET);
     else
        return (x+0x2000000);
}

static inline void *__va(unsigned long x)
{
     if(x == 0)
	  return (void *)0;

     if(x < 0x2000000)
        return (void *)(x+PAGE_OFFSET);
     else
        return (void *)(x-0x2000000);
}
#endif	/* CONFIG_SUN3 */

/*
 * NOTE: virtual isn't really correct, actually it should be the offset into the
 * memory node, but we have no highmem, so that works for now.
 * TODO: implement (fast) pfn<->pgdat_idx conversion functions, this makes lots
 * of the shifts unnecessary.
 */
#define virt_to_pfn(kaddr)	(__pa(kaddr) >> PAGE_SHIFT)
#define pfn_to_virt(pfn)	__va((pfn) << PAGE_SHIFT)

extern int m68k_virt_to_node_shift;

#ifndef CONFIG_DISCONTIGMEM
#define __virt_to_node(addr)	(&pg_data_map[0])
#else
extern struct pglist_data *pg_data_table[];

static inline __attribute_const__ int __virt_to_node_shift(void)
{
	int shift;

	asm (
		"1:	moveq	#0,%0\n"
		m68k_fixup(%c1, 1b)
		: "=d" (shift)
		: "i" (m68k_fixup_vnode_shift));
	return shift;
}

#define __virt_to_node(addr)	(pg_data_table[(unsigned long)(addr) >> __virt_to_node_shift()])
#endif

#define virt_to_page(addr) ({						\
	pfn_to_page(virt_to_pfn(addr));					\
})
#define page_to_virt(page) ({						\
	pfn_to_virt(page_to_pfn(page));					\
})

#ifdef CONFIG_DISCONTIGMEM
#define pfn_to_page(pfn) ({						\
	unsigned long __pfn = (pfn);					\
	struct pglist_data *pgdat;					\
	pgdat = __virt_to_node((unsigned long)pfn_to_virt(__pfn));	\
	pgdat->node_mem_map + (__pfn - pgdat->node_start_pfn);		\
})
#define page_to_pfn(_page) ({						\
	const struct page *__p = (_page);				\
	struct pglist_data *pgdat;					\
	pgdat = &pg_data_map[page_to_nid(__p)];				\
	((__p) - pgdat->node_mem_map) + pgdat->node_start_pfn;		\
})
#else
#define ARCH_PFN_OFFSET (m68k_memory[0].addr >> PAGE_SHIFT)
#include <asm-generic/memory_model.h>
#endif

#define virt_addr_valid(kaddr)	((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
#define pfn_valid(pfn)		virt_addr_valid(pfn_to_virt(pfn))

#endif /* __ASSEMBLY__ */

#endif /* _M68K_PAGE_MM_H */