summaryrefslogtreecommitdiffstats
path: root/arch/mips/lib/uncached.c
blob: 27b012d4341c2ad6f259ac33b4557b7360bc0a9b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 2005 Thiemo Seufer
 * Copyright (C) 2005  MIPS Technologies, Inc.  All rights reserved.
 *	Author: Maciej W. Rozycki <macro@mips.com>
 */

#include <linux/init.h>

#include <asm/addrspace.h>
#include <asm/bug.h>
#include <asm/cacheflush.h>

#ifndef CKSEG2
#define CKSEG2 CKSSEG
#endif
#ifndef TO_PHYS_MASK
#define TO_PHYS_MASK -1
#endif

/*
 * FUNC is executed in one of the uncached segments, depending on its
 * original address as follows:
 *
 * 1. If the original address is in CKSEG0 or CKSEG1, then the uncached
 *    segment used is CKSEG1.
 * 2. If the original address is in XKPHYS, then the uncached segment
 *    used is XKPHYS(2).
 * 3. Otherwise it's a bug.
 *
 * The same remapping is done with the stack pointer.  Stack handling
 * works because we don't handle stack arguments or more complex return
 * values, so we can avoid sharing the same stack area between a cached
 * and the uncached mode.
 */
unsigned long __init run_uncached(void *func)
{
	register long sp __asm__("$sp");
	register long ret __asm__("$2");
	long lfunc = (long)func, ufunc;
	long usp;

	if (sp >= (long)CKSEG0 && sp < (long)CKSEG2)
		usp = CKSEG1ADDR(sp);
#ifdef CONFIG_64BIT
	else if ((long long)sp >= (long long)PHYS_TO_XKPHYS(0, 0) &&
		 (long long)sp < (long long)PHYS_TO_XKPHYS(8, 0))
		usp = PHYS_TO_XKPHYS(K_CALG_UNCACHED,
				     XKPHYS_TO_PHYS((long long)sp));
#endif
	else {
		BUG();
		usp = sp;
	}
	if (lfunc >= (long)CKSEG0 && lfunc < (long)CKSEG2)
		ufunc = CKSEG1ADDR(lfunc);
#ifdef CONFIG_64BIT
	else if ((long long)lfunc >= (long long)PHYS_TO_XKPHYS(0, 0) &&
		 (long long)lfunc < (long long)PHYS_TO_XKPHYS(8, 0))
		ufunc = PHYS_TO_XKPHYS(K_CALG_UNCACHED,
				       XKPHYS_TO_PHYS((long long)lfunc));
#endif
	else {
		BUG();
		ufunc = lfunc;
	}

	__asm__ __volatile__ (
		"	move	$16, $sp\n"
		"	move	$sp, %1\n"
		"	jalr	%2\n"
		"	move	$sp, $16"
		: "=r" (ret)
		: "r" (usp), "r" (ufunc)
		: "$16", "$31");

	return ret;
}