summaryrefslogtreecommitdiff
blob: 17e02477a07d8f3948277374e97b5ed612016302 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
--- linux-2.6.20.noarch/arch/x86_64/kernel/vsyscall.c~orig	2007-04-26 02:05:31.000000000 -0700
+++ linux-2.6.20.noarch/arch/x86_64/kernel/vsyscall.c	2007-04-26 15:11:02.000000000 -0700
@@ -40,6 +40,9 @@
 #include <asm/segment.h>
 #include <asm/desc.h>
 #include <asm/topology.h>
+#ifdef CONFIG_XEN
+#include <asm/hypercall.h>
+#endif
 
 #define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
 #define __syscall_clobber "r11","rcx","memory"
@@ -246,12 +249,11 @@
 
 #endif
 
-#ifndef CONFIG_XEN
 /* Assume __initcall executes before all user space. Hopefully kmod
    doesn't violate that. We'll find out if it does. */
 static void __cpuinit vsyscall_set_cpu(int cpu)
 {
-	unsigned long *d;
+	unsigned long *d, n;
 	unsigned long node = 0;
 #ifdef CONFIG_NUMA
 	node = cpu_to_node[cpu];
@@ -263,10 +265,15 @@
 	   in user space in vgetcpu.
 	   12 bits for the CPU and 8 bits for the node. */
 	d = (unsigned long *)(cpu_gdt(cpu) + GDT_ENTRY_PER_CPU);
-	*d = 0x0f40000000000ULL;
-	*d |= cpu;
-	*d |= (node & 0xf) << 12;
-	*d |= (node >> 4) << 48;
+	n = 0x0f40000000000ULL;
+	n |= cpu;
+	n |= (node & 0xf) << 12;
+	n |= (node >> 4) << 48;
+#ifndef CONFIG_XEN
+	*d = n;
+#else
+	HYPERVISOR_update_descriptor(virt_to_machine(d), n);
+#endif
 }
 
 static void __cpuinit cpu_vsyscall_init(void *arg)
@@ -283,7 +290,6 @@
 		smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1);
 	return NOTIFY_DONE;
 }
-#endif
 
 static void __init map_vsyscall(void)
 {
@@ -320,10 +326,8 @@
 #ifdef CONFIG_SYSCTL
 	register_sysctl_table(kernel_root_table2, 0);
 #endif
-#ifndef CONFIG_XEN
 	on_each_cpu(cpu_vsyscall_init, NULL, 0, 1);
 	hotcpu_notifier(cpu_vsyscall_notifier, 0);
-#endif
 	return 0;
 }