1 --- linux/arch/i386/kernel/process.c.seg 2005-03-27 13:07:14.000000000 -0800
2 +++ linux/arch/i386/kernel/process.c 2005-03-28 10:28:47.000000000 -0800
3 @@ -597,8 +597,8 @@ struct task_struct fastcall * __switch_t
4 * Save away %fs and %gs. No need to save %es and %ds, as
5 * those are always kernel segments while inside the kernel.
7 - asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->fs));
8 - asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->gs));
9 + asm volatile("mov %%fs,%0":"=m" (prev->fs));
10 + asm volatile("mov %%gs,%0":"=m" (prev->gs));
13 * Restore %fs and %gs if needed.
14 --- linux/arch/i386/kernel/vm86.c.seg 2005-03-27 13:07:14.000000000 -0800
15 +++ linux/arch/i386/kernel/vm86.c 2005-03-28 10:28:47.000000000 -0800
16 @@ -294,8 +294,8 @@ static void do_sys_vm86(struct kernel_vm
18 info->regs32->eax = 0;
19 tsk->thread.saved_esp0 = tsk->thread.esp0;
20 - asm volatile("movl %%fs,%0":"=m" (tsk->thread.saved_fs));
21 - asm volatile("movl %%gs,%0":"=m" (tsk->thread.saved_gs));
22 + asm volatile("mov %%fs,%0":"=m" (tsk->thread.saved_fs));
23 + asm volatile("mov %%gs,%0":"=m" (tsk->thread.saved_gs));
25 tss = &per_cpu(init_tss, get_cpu());
26 tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0;
27 --- linux/arch/x86_64/kernel/process.c.seg 2005-03-27 13:07:49.000000000 -0800
28 +++ linux/arch/x86_64/kernel/process.c 2005-03-28 11:11:04.206766410 -0800
29 @@ -391,10 +391,10 @@ int copy_thread(int nr, unsigned long cl
30 p->thread.fs = me->thread.fs;
31 p->thread.gs = me->thread.gs;
33 - asm("movl %%gs,%0" : "=m" (p->thread.gsindex));
34 - asm("movl %%fs,%0" : "=m" (p->thread.fsindex));
35 - asm("movl %%es,%0" : "=m" (p->thread.es));
36 - asm("movl %%ds,%0" : "=m" (p->thread.ds));
37 + asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
38 + asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
39 + asm("mov %%es,%0" : "=m" (p->thread.es));
40 + asm("mov %%ds,%0" : "=m" (p->thread.ds));
42 if (unlikely(me->thread.io_bitmap_ptr != NULL)) {
43 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
44 @@ -457,11 +457,11 @@ struct task_struct *__switch_to(struct t
46 * This won't pick up thread selector changes, but I guess that is ok.
48 - asm volatile("movl %%es,%0" : "=m" (prev->es));
49 + asm volatile("mov %%es,%0" : "=m" (prev->es));
50 if (unlikely(next->es | prev->es))
51 loadsegment(es, next->es);
53 - asm volatile ("movl %%ds,%0" : "=m" (prev->ds));
54 + asm volatile ("mov %%ds,%0" : "=m" (prev->ds));
55 if (unlikely(next->ds | prev->ds))
56 loadsegment(ds, next->ds);
58 @@ -472,7 +472,7 @@ struct task_struct *__switch_to(struct t
62 - asm volatile("movl %%fs,%0" : "=g" (fsindex));
63 + asm volatile("movl %%fs,%0" : "=r" (fsindex));
64 /* segment register != 0 always requires a reload.
65 also reload when it has changed.
66 when prev process used 64bit base always reload
67 @@ -493,7 +493,7 @@ struct task_struct *__switch_to(struct t
71 - asm volatile("movl %%gs,%0" : "=g" (gsindex));
72 + asm volatile("movl %%gs,%0" : "=r" (gsindex));
73 if (unlikely(gsindex | next->gsindex | prev->gs)) {
74 load_gs_index(next->gsindex);
76 --- linux/include/asm-i386/system.h.seg 2005-03-27 13:09:12.000000000 -0800
77 +++ linux/include/asm-i386/system.h 2005-03-28 10:28:47.000000000 -0800
78 @@ -81,7 +81,7 @@ static inline unsigned long _get_base(ch
79 #define loadsegment(seg,value) \
82 - "movl %0,%%" #seg "\n" \
83 + "mov %0,%%" #seg "\n" \
85 ".section .fixup,\"ax\"\n" \
87 @@ -93,13 +93,13 @@ static inline unsigned long _get_base(ch
91 - : :"m" (*(unsigned int *)&(value)))
95 * Save a segment register away
97 #define savesegment(seg, value) \
98 - asm volatile("movl %%" #seg ",%0":"=m" (*(int *)&(value)))
99 + asm volatile("mov %%" #seg ",%0":"=m" (value))
102 * Clear and set 'TS' bit respectively