1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
// SPDX-License-Identifier: GPL-2.0

#include "lkdtm.h"
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <asm/mmu.h>

/* Inserts new slb entries */
static void insert_slb_entry(unsigned long p, int ssize, int page_size)
{
	unsigned long flags;

	flags = SLB_VSID_KERNEL | mmu_psize_defs[page_size].sllp;
	preempt_disable();

	asm volatile("slbmte %0,%1" :
		     : "r" (mk_vsid_data(p, ssize, flags)),
		       "r" (mk_esid_data(p, ssize, SLB_NUM_BOLTED))
		     : "memory");

	asm volatile("slbmte %0,%1" :
			: "r" (mk_vsid_data(p, ssize, flags)),
			  "r" (mk_esid_data(p, ssize, SLB_NUM_BOLTED + 1))
			: "memory");
	preempt_enable();
}

/* Inject slb multihit on vmalloc-ed address i.e 0xD00... */
static int inject_vmalloc_slb_multihit(void)
{
	char *p;

	p = vmalloc(PAGE_SIZE);
	if (!p)
		return -ENOMEM;

	insert_slb_entry((unsigned long)p, MMU_SEGSIZE_1T, mmu_vmalloc_psize);
	/*
	 * This triggers exception, If handled correctly we must recover
	 * from this error.
	 */
	p[0] = '!';
	vfree(p);
	return 0;
}

/* Inject slb multihit on kmalloc-ed address i.e 0xC00... */
static int inject_kmalloc_slb_multihit(void)
{
	char *p;

	p = kmalloc(2048, GFP_KERNEL);
	if (!p)
		return -ENOMEM;

	insert_slb_entry((unsigned long)p, MMU_SEGSIZE_1T, mmu_linear_psize);
	/*
	 * This triggers exception, If handled correctly we must recover
	 * from this error.
	 */
	p[0] = '!';
	kfree(p);
	return 0;
}

/*
 * Few initial SLB entries are bolted. Add a test to inject
 * multihit in bolted entry 0.
 */
static void insert_dup_slb_entry_0(void)
{
	unsigned long test_address = PAGE_OFFSET, *test_ptr;
	unsigned long esid, vsid;
	unsigned long i = 0;

	test_ptr = (unsigned long *)test_address;
	preempt_disable();

	asm volatile("slbmfee  %0,%1" : "=r" (esid) : "r" (i));
	asm volatile("slbmfev  %0,%1" : "=r" (vsid) : "r" (i));

	/* for i !=0 we would need to mask out the old entry number */
	asm volatile("slbmte %0,%1" :
			: "r" (vsid),
			  "r" (esid | SLB_NUM_BOLTED)
			: "memory");

	asm volatile("slbmfee  %0,%1" : "=r" (esid) : "r" (i));
	asm volatile("slbmfev  %0,%1" : "=r" (vsid) : "r" (i));

	/* for i !=0 we would need to mask out the old entry number */
	asm volatile("slbmte %0,%1" :
			: "r" (vsid),
			  "r" (esid | (SLB_NUM_BOLTED + 1))
			: "memory");

	pr_info("%s accessing test address 0x%lx: 0x%lx\n",
		__func__, test_address, *test_ptr);

	preempt_enable();
}

static void lkdtm_PPC_SLB_MULTIHIT(void)
{
	if (!radix_enabled()) {
		pr_info("Injecting SLB multihit errors\n");
		/*
		 * These need not be separate tests, And they do pretty
		 * much same thing. In any case we must recover from the
		 * errors introduced by these functions, machine would not
		 * survive these tests in case of failure to handle.
		 */
		inject_vmalloc_slb_multihit();
		inject_kmalloc_slb_multihit();
		insert_dup_slb_entry_0();
		pr_info("Recovered from SLB multihit errors\n");
	} else {
		pr_err("XFAIL: This test is for ppc64 and with hash mode MMU only\n");
	}
}

static struct crashtype crashtypes[] = {
	CRASHTYPE(PPC_SLB_MULTIHIT),
};

struct crashtype_category powerpc_crashtypes = {
	.crashtypes = crashtypes,
	.len	    = ARRAY_SIZE(crashtypes),
};
吵醒沈睡冰山後從容脫逃 你總是有辦法輕易做到