1 /* 2 * linux/arch/i386/traps.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 * 6 * Pentium III FXSR, SSE support 7 * Gareth Hughes <gareth@valinux.com>, May 2000 8 */ 9 10 /* 11 * 'Traps.c' handles hardware traps and faults after we have saved some 12 * state in 'asm.s'. 13 */ 14 #include <linux/config.h> 15 #include <linux/sched.h> 16 #include <linux/kernel.h> 17 #include <linux/string.h> 18 #include <linux/errno.h> 19 #include <linux/ptrace.h> 20 #include <linux/timer.h> 21 #include <linux/mm.h> 22 #include <linux/init.h> 23 #include <linux/delay.h> 24 #include <linux/spinlock.h> 25 #include <linux/interrupt.h> 26 #include <linux/highmem.h> 27 28 #ifdef CONFIG_MCA 29 #include <linux/mca.h> 30 #include <asm/processor.h> 31 #endif 32 33 #include <asm/system.h> 34 #include <asm/uaccess.h> 35 #include <asm/io.h> 36 #include <asm/atomic.h> 37 #include <asm/debugreg.h> 38 #include <asm/desc.h> 39 #include <asm/i387.h> 40 41 #include <asm/smp.h> 42 #include <asm/pgalloc.h> 43 #include <asm/fixmap.h> 44 45 #ifdef CONFIG_X86_VISWS_APIC 46 #include <asm/cobalt.h> 47 #include <asm/lithium.h> 48 #endif 49 50 #include <linux/irq.h> 51 #include <linux/module.h> 52 53 asmlinkage int system_call(void); 54 asmlinkage void lcall7(void); 55 asmlinkage void lcall27(void); 56 57 struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 }, 58 { 0, 0 }, { 0, 0 } }; 59 60 /* 61 * The IDT has to be page-aligned to simplify the Pentium 62 * F0 0F bug workaround.. We have a special link segment 63 * for this. 64 */ 65 struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, }; 66 67 asmlinkage void divide_error(void); 68 asmlinkage void debug(void); 69 asmlinkage void nmi(void); 70 asmlinkage void int3(void); 71 asmlinkage void overflow(void); 72 asmlinkage void bounds(void); 73 asmlinkage void invalid_op(void); 74 asmlinkage void device_not_available(void); 75 asmlinkage void double_fault(void); 76 asmlinkage void coprocessor_segment_overrun(void); 77 asmlinkage void invalid_TSS(void); 78 asmlinkage void segment_not_present(void); 79 asmlinkage void stack_segment(void); 80 asmlinkage void general_protection(void); 81 asmlinkage void page_fault(void); 82 asmlinkage void coprocessor_error(void); 83 asmlinkage void simd_coprocessor_error(void); 84 asmlinkage void alignment_check(void); 85 asmlinkage void spurious_interrupt_bug(void); 86 asmlinkage void machine_check(void); 87 88 int kstack_depth_to_print = 24; 89 90 91 /* 92 * If the address is either in the .text section of the 93 * kernel, or in the vmalloc'ed module regions, it *may* 94 * be the address of a calling routine 95 */ 96 97 #ifdef CONFIG_MODULES 98 99 extern struct module *module_list; 100 extern struct module kernel_module; 101 102 static inline int kernel_text_address(unsigned long addr) 103 { 104 int retval = 0; 105 struct module *mod; 106 107 if (addr >= (unsigned long) &_stext && 108 addr <= (unsigned long) &_etext) 109 return 1; 110 111 for (mod = module_list; mod != &kernel_module; mod = mod->next) { 112 /* mod_bound tests for addr being inside the vmalloc'ed 113 * module area. Of course it'd be better to test only 114 * for the .text subset... */ 115 if (mod_bound(addr, 0, mod)) { 116 retval = 1; 117 break; 118 } 119 } 120 121 return retval; 122 } 123 124 #else 125 126 static inline int kernel_text_address(unsigned long addr) 127 { 128 return (addr >= (unsigned long) &_stext && 129 addr <= (unsigned long) &_etext); 130 } 131 132 #endif 133 134 void show_trace(unsigned long * stack) 135 { 136 int i; 137 unsigned long addr; 138 139 if (!stack) 140 stack = (unsigned long*)&stack; 141 142 printk("Call Trace: "); 143 i = 1; 144 while (((long) stack & (THREAD_SIZE-1)) != 0) { 145 addr = *stack++; 146 if (kernel_text_address(addr)) { 147 if (i && ((i % 6) == 0)) 148 printk("\n "); 149 printk(" [<%08lx>]", addr); 150 i++; 151 } 152 } 153 printk("\n"); 154 } 155 156 void show_trace_task(struct task_struct *tsk) 157 { 158 unsigned long esp = tsk->thread.esp; 159 160 /* User space on another CPU? */ 161 if ((esp ^ (unsigned long)tsk) & (PAGE_MASK<<1)) 162 return; 163 show_trace((unsigned long *)esp); 164 } 165 166 void show_stack(unsigned long * esp) 167 { 168 unsigned long *stack; 169 int i; 170 171 // debugging aid: "show_stack(NULL);" prints the 172 // back trace for this cpu. 173 174 if(esp==NULL) 175 esp=(unsigned long*)&esp; 176 177 stack = esp; 178 for(i=0; i < kstack_depth_to_print; i++) { 179 if (((long) stack & (THREAD_SIZE-1)) == 0) 180 break; 181 if (i && ((i % 8) == 0)) 182 printk("\n "); 183 printk("%08lx ", *stack++); 184 } 185 printk("\n"); 186 show_trace(esp); 187 } 188 189 /* 190 * The architecture-independent backtrace generator 191 */ 192 void dump_stack(void) 193 { 194 show_stack(0); 195 } 196 197 void show_registers(struct pt_regs *regs) 198 { 199 int i; 200 int in_kernel = 1; 201 unsigned long esp; 202 unsigned short ss; 203 204 esp = (unsigned long) (®s->esp); 205 ss = __KERNEL_DS; 206 if (regs->xcs & 3) { 207 in_kernel = 0; 208 esp = regs->esp; 209 ss = regs->xss & 0xffff; 210 } 211 printk("CPU: %d\nEIP: %04x:[<%08lx>] %s\nEFLAGS: %08lx\n", 212 smp_processor_id(), 0xffff & regs->xcs, regs->eip, print_tainted(), regs->eflags); 213 printk("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n", 214 regs->eax, regs->ebx, regs->ecx, regs->edx); 215 printk("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n", 216 regs->esi, regs->edi, regs->ebp, esp); 217 printk("ds: %04x es: %04x ss: %04x\n", 218 regs->xds & 0xffff, regs->xes & 0xffff, ss); 219 printk("Process %s (pid: %d, stackpage=%08lx)", 220 current->comm, current->pid, 4096+(unsigned long)current); 221 /* 222 * When in-kernel, we also print out the stack and code at the 223 * time of the fault.. 224 */ 225 if (in_kernel) { 226 227 printk("\nStack: "); 228 show_stack((unsigned long*)esp); 229 230 printk("\nCode: "); 231 if(regs->eip < PAGE_OFFSET) 232 goto bad; 233 234 for(i=0;i<20;i++) 235 { 236 unsigned char c; 237 if(__get_user(c, &((unsigned char*)regs->eip)[i])) { 238 bad: 239 printk(" Bad EIP value."); 240 break; 241 } 242 printk("%02x ", c); 243 } 244 } 245 printk("\n"); 246 } 247 248 static void handle_BUG(struct pt_regs *regs) 249 { 250 unsigned short ud2; 251 unsigned short line; 252 char *file; 253 char c; 254 unsigned long eip; 255 256 if (regs->xcs & 3) 257 goto no_bug; /* Not in kernel */ 258 259 eip = regs->eip; 260 261 if (eip < PAGE_OFFSET) 262 goto no_bug; 263 if (__get_user(ud2, (unsigned short *)eip)) 264 goto no_bug; 265 if (ud2 != 0x0b0f) 266 goto no_bug; 267 if (__get_user(line, (unsigned short *)(eip + 2))) 268 goto bug; 269 if (__get_user(file, (char **)(eip + 4)) || 270 (unsigned long)file < PAGE_OFFSET || __get_user(c, file)) 271 file = "<bad filename>"; 272 273 printk("kernel BUG at %s:%d!\n", file, line); 274 275 no_bug: 276 return; 277 278 /* Here we know it was a BUG but file-n-line is unavailable */ 279 bug: 280 printk("Kernel BUG\n"); 281 } 282 283 spinlock_t die_lock = SPIN_LOCK_UNLOCKED; 284 285 void die(const char * str, struct pt_regs * regs, long err) 286 { 287 console_verbose(); 288 spin_lock_irq(&die_lock); 289 bust_spinlocks(1); 290 handle_BUG(regs); 291 printk("%s: %04lx\n", str, err & 0xffff); 292 show_registers(regs); 293 bust_spinlocks(0); 294 spin_unlock_irq(&die_lock); 295 do_exit(SIGSEGV); 296 } 297 298 static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err) 299 { 300 if (!(regs->eflags & VM_MASK) && !(3 & regs->xcs)) 301 die(str, regs, err); 302 } 303 304 static inline unsigned long get_cr2(void) 305 { 306 unsigned long address; 307 308 /* get the address */ 309 __asm__("movl %%cr2,%0":"=r" (address)); 310 return address; 311 } 312 313 static void inline do_trap(int trapnr, int signr, char *str, int vm86, 314 struct pt_regs * regs, long error_code, siginfo_t *info) 315 { 316 if (regs->eflags & VM_MASK) { 317 if (vm86) 318 goto vm86_trap; 319 else 320 goto trap_signal; 321 } 322 323 if (!(regs->xcs & 3)) 324 goto kernel_trap; 325 326 trap_signal: { 327 struct task_struct *tsk = current; 328 tsk->thread.error_code = error_code; 329 tsk->thread.trap_no = trapnr; 330 if (info) 331 force_sig_info(signr, info, tsk); 332 else 333 force_sig(signr, tsk); 334 return; 335 } 336 337 kernel_trap: { 338 unsigned long fixup = search_exception_table(regs->eip); 339 if (fixup) 340 regs->eip = fixup; 341 else 342 die(str, regs, error_code); 343 return; 344 } 345 346 vm86_trap: { 347 int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr); 348 if (ret) goto trap_signal; 349 return; 350 } 351 } 352 353 #define DO_ERROR(trapnr, signr, str, name) \ 354 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ 355 { \ 356 do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \ 357 } 358 359 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ 360 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ 361 { \ 362 siginfo_t info; \ 363 info.si_signo = signr; \ 364 info.si_errno = 0; \ 365 info.si_code = sicode; \ 366 info.si_addr = (void *)siaddr; \ 367 do_trap(trapnr, signr, str, 0, regs, error_code, &info); \ 368 } 369 370 #define DO_VM86_ERROR(trapnr, signr, str, name) \ 371 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ 372 { \ 373 do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \ 374 } 375 376 #define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ 377 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ 378 { \ 379 siginfo_t info; \ 380 info.si_signo = signr; \ 381 info.si_errno = 0; \ 382 info.si_code = sicode; \ 383 info.si_addr = (void *)siaddr; \ 384 do_trap(trapnr, signr, str, 1, regs, error_code, &info); \ 385 } 386 387 DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->eip) 388 DO_VM86_ERROR( 3, SIGTRAP, "int3", int3) 389 DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow) 390 DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds) 391 DO_ERROR_INFO( 6, SIGILL, "invalid operand", invalid_op, ILL_ILLOPN, regs->eip) 392 DO_VM86_ERROR( 7, SIGSEGV, "device not available", device_not_available) 393 DO_ERROR( 8, SIGSEGV, "double fault", double_fault) 394 DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) 395 DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) 396 DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) 397 DO_ERROR(12, SIGBUS, "stack segment", stack_segment) 398 DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, get_cr2()) 399 400 asmlinkage void do_general_protection(struct pt_regs * regs, long error_code) 401 { 402 if (regs->eflags & VM_MASK) 403 goto gp_in_vm86; 404 405 if (!(regs->xcs & 3)) 406 goto gp_in_kernel; 407 408 current->thread.error_code = error_code; 409 current->thread.trap_no = 13; 410 force_sig(SIGSEGV, current); 411 return; 412 413 gp_in_vm86: 414 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); 415 return; 416 417 gp_in_kernel: 418 { 419 unsigned long fixup; 420 fixup = search_exception_table(regs->eip); 421 if (fixup) { 422 regs->eip = fixup; 423 return; 424 } 425 die("general protection fault", regs, error_code); 426 } 427 } 428 429 static void mem_parity_error(unsigned char reason, struct pt_regs * regs) 430 { 431 printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n"); 432 printk("You probably have a hardware problem with your RAM chips\n"); 433 434 /* Clear and disable the memory parity error line. */ 435 reason = (reason & 0xf) | 4; 436 outb(reason, 0x61); 437 } 438 439 static void io_check_error(unsigned char reason, struct pt_regs * regs) 440 { 441 unsigned long i; 442 443 printk("NMI: IOCK error (debug interrupt?)\n"); 444 show_registers(regs); 445 446 /* Re-enable the IOCK line, wait for a few seconds */ 447 reason = (reason & 0xf) | 8; 448 outb(reason, 0x61); 449 i = 2000; 450 while (--i) udelay(1000); 451 reason &= ~8; 452 outb(reason, 0x61); 453 } 454 455 static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs) 456 { 457 #ifdef CONFIG_MCA 458 /* Might actually be able to figure out what the guilty party 459 * is. */ 460 if( MCA_bus ) { 461 mca_handle_nmi(); 462 return; 463 } 464 #endif 465 printk("Uhhuh. NMI received for unknown reason %02x.\n", reason); 466 printk("Dazed and confused, but trying to continue\n"); 467 printk("Do you have a strange power saving mode enabled?\n"); 468 } 469 470 asmlinkage void do_nmi(struct pt_regs * regs, long error_code) 471 { 472 unsigned char reason = inb(0x61); 473 474 ++nmi_count(smp_processor_id()); 475 476 if (!(reason & 0xc0)) { 477 #if CONFIG_X86_LOCAL_APIC 478 /* 479 * Ok, so this is none of the documented NMI sources, 480 * so it must be the NMI watchdog. 481 */ 482 if (nmi_watchdog) { 483 nmi_watchdog_tick(regs); 484 return; 485 } 486 #endif 487 unknown_nmi_error(reason, regs); 488 return; 489 } 490 if (reason & 0x80) 491 mem_parity_error(reason, regs); 492 if (reason & 0x40) 493 io_check_error(reason, regs); 494 /* 495 * Reassert NMI in case it became active meanwhile 496 * as it's edge-triggered. 497 */ 498 outb(0x8f, 0x70); 499 inb(0x71); /* dummy */ 500 outb(0x0f, 0x70); 501 inb(0x71); /* dummy */ 502 } 503 504 /* 505 * Our handling of the processor debug registers is non-trivial. 506 * We do not clear them on entry and exit from the kernel. Therefore 507 * it is possible to get a watchpoint trap here from inside the kernel. 508 * However, the code in ./ptrace.c has ensured that the user can 509 * only set watchpoints on userspace addresses. Therefore the in-kernel 510 * watchpoint trap can only occur in code which is reading/writing 511 * from user space. Such code must not hold kernel locks (since it 512 * can equally take a page fault), therefore it is safe to call 513 * force_sig_info even though that claims and releases locks. 514 * 515 * Code in ./signal.c ensures that the debug control register 516 * is restored before we deliver any signal, and therefore that 517 * user code runs with the correct debug control register even though 518 * we clear it here. 519 * 520 * Being careful here means that we don't have to be as careful in a 521 * lot of more complicated places (task switching can be a bit lazy 522 * about restoring all the debug state, and ptrace doesn't have to 523 * find every occurrence of the TF bit that could be saved away even 524 * by user code) 525 */ 526 asmlinkage void do_debug(struct pt_regs * regs, long error_code) 527 { 528 unsigned int condition; 529 struct task_struct *tsk = current; 530 unsigned long eip = regs->eip; 531 siginfo_t info; 532 533 __asm__ __volatile__("movl %%db6,%0" : "=r" (condition)); 534 535 /* If the user set TF, it's simplest to clear it right away. */ 536 if ((eip >=PAGE_OFFSET) && (regs->eflags & TF_MASK)) 537 goto clear_TF; 538 539 /* Mask out spurious debug traps due to lazy DR7 setting */ 540 if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) { 541 if (!tsk->thread.debugreg[7]) 542 goto clear_dr7; 543 } 544 545 if (regs->eflags & VM_MASK) 546 goto debug_vm86; 547 548 /* Save debug status register where ptrace can see it */ 549 tsk->thread.debugreg[6] = condition; 550 551 /* Mask out spurious TF errors due to lazy TF clearing */ 552 if (condition & DR_STEP) { 553 /* 554 * The TF error should be masked out only if the current 555 * process is not traced and if the TRAP flag has been set 556 * previously by a tracing process (condition detected by 557 * the PT_DTRACE flag); remember that the i386 TRAP flag 558 * can be modified by the process itself in user mode, 559 * allowing programs to debug themselves without the ptrace() 560 * interface. 561 */ 562 if ((regs->xcs & 3) == 0) 563 goto clear_TF; 564 if ((tsk->ptrace & (PT_DTRACE|PT_PTRACED)) == PT_DTRACE) 565 goto clear_TF; 566 } 567 568 /* Ok, finally something we can handle */ 569 tsk->thread.trap_no = 1; 570 tsk->thread.error_code = error_code; 571 info.si_signo = SIGTRAP; 572 info.si_errno = 0; 573 info.si_code = TRAP_BRKPT; 574 575 /* If this is a kernel mode trap, save the user PC on entry to 576 * the kernel, that's what the debugger can make sense of. 577 */ 578 info.si_addr = ((regs->xcs & 3) == 0) ? (void *)tsk->thread.eip : 579 (void *)regs->eip; 580 force_sig_info(SIGTRAP, &info, tsk); 581 582 /* Disable additional traps. They'll be re-enabled when 583 * the signal is delivered. 584 */ 585 clear_dr7: 586 __asm__("movl %0,%%db7" 587 : /* no output */ 588 : "r" (0)); 589 return; 590 591 debug_vm86: 592 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1); 593 return; 594 595 clear_TF: 596 regs->eflags &= ~TF_MASK; 597 return; 598 } 599 600 /* 601 * Note that we play around with the 'TS' bit in an attempt to get 602 * the correct behaviour even in the presence of the asynchronous 603 * IRQ13 behaviour 604 */ 605 void math_error(void *eip) 606 { 607 struct task_struct * task; 608 siginfo_t info; 609 unsigned short cwd, swd; 610 611 /* 612 * Save the info for the exception handler and clear the error. 613 */ 614 task = current; 615 save_init_fpu(task); 616 task->thread.trap_no = 16; 617 task->thread.error_code = 0; 618 info.si_signo = SIGFPE; 619 info.si_errno = 0; 620 info.si_code = __SI_FAULT; 621 info.si_addr = eip; 622 /* 623 * (~cwd & swd) will mask out exceptions that are not set to unmasked 624 * status. 0x3f is the exception bits in these regs, 0x200 is the 625 * C1 reg you need in case of a stack fault, 0x040 is the stack 626 * fault bit. We should only be taking one exception at a time, 627 * so if this combination doesn't produce any single exception, 628 * then we have a bad program that isn't syncronizing its FPU usage 629 * and it will suffer the consequences since we won't be able to 630 * fully reproduce the context of the exception 631 */ 632 cwd = get_fpu_cwd(task); 633 swd = get_fpu_swd(task); 634 switch (((~cwd) & swd & 0x3f) | (swd & 0x240)) { 635 case 0x000: 636 default: 637 break; 638 case 0x001: /* Invalid Op */ 639 case 0x041: /* Stack Fault */ 640 case 0x241: /* Stack Fault | Direction */ 641 info.si_code = FPE_FLTINV; 642 /* Should we clear the SF or let user space do it ???? */ 643 break; 644 case 0x002: /* Denormalize */ 645 case 0x010: /* Underflow */ 646 info.si_code = FPE_FLTUND; 647 break; 648 case 0x004: /* Zero Divide */ 649 info.si_code = FPE_FLTDIV; 650 break; 651 case 0x008: /* Overflow */ 652 info.si_code = FPE_FLTOVF; 653 break; 654 case 0x020: /* Precision */ 655 info.si_code = FPE_FLTRES; 656 break; 657 } 658 force_sig_info(SIGFPE, &info, task); 659 } 660 661 asmlinkage void do_coprocessor_error(struct pt_regs * regs, long error_code) 662 { 663 ignore_irq13 = 1; 664 math_error((void *)regs->eip); 665 } 666 667 void simd_math_error(void *eip) 668 { 669 struct task_struct * task; 670 siginfo_t info; 671 unsigned short mxcsr; 672 673 /* 674 * Save the info for the exception handler and clear the error. 675 */ 676 task = current; 677 save_init_fpu(task); 678 task->thread.trap_no = 19; 679 task->thread.error_code = 0; 680 info.si_signo = SIGFPE; 681 info.si_errno = 0; 682 info.si_code = __SI_FAULT; 683 info.si_addr = eip; 684 /* 685 * The SIMD FPU exceptions are handled a little differently, as there 686 * is only a single status/control register. Thus, to determine which 687 * unmasked exception was caught we must mask the exception mask bits 688 * at 0x1f80, and then use these to mask the exception bits at 0x3f. 689 */ 690 mxcsr = get_fpu_mxcsr(task); 691 switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) { 692 case 0x000: 693 default: 694 break; 695 case 0x001: /* Invalid Op */ 696 info.si_code = FPE_FLTINV; 697 break; 698 case 0x002: /* Denormalize */ 699 case 0x010: /* Underflow */ 700 info.si_code = FPE_FLTUND; 701 break; 702 case 0x004: /* Zero Divide */ 703 info.si_code = FPE_FLTDIV; 704 break; 705 case 0x008: /* Overflow */ 706 info.si_code = FPE_FLTOVF; 707 break; 708 case 0x020: /* Precision */ 709 info.si_code = FPE_FLTRES; 710 break; 711 } 712 force_sig_info(SIGFPE, &info, task); 713 } 714 715 asmlinkage void do_simd_coprocessor_error(struct pt_regs * regs, 716 long error_code) 717 { 718 if (cpu_has_xmm) { 719 /* Handle SIMD FPU exceptions on PIII+ processors. */ 720 ignore_irq13 = 1; 721 simd_math_error((void *)regs->eip); 722 } else { 723 /* 724 * Handle strange cache flush from user space exception 725 * in all other cases. This is undocumented behaviour. 726 */ 727 if (regs->eflags & VM_MASK) { 728 handle_vm86_fault((struct kernel_vm86_regs *)regs, 729 error_code); 730 return; 731 } 732 die_if_kernel("cache flush denied", regs, error_code); 733 current->thread.trap_no = 19; 734 current->thread.error_code = error_code; 735 force_sig(SIGSEGV, current); 736 } 737 } 738 739 asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs, 740 long error_code) 741 { 742 #if 0 743 /* No need to warn about this any longer. */ 744 printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n"); 745 #endif 746 } 747 748 /* 749 * 'math_state_restore()' saves the current math information in the 750 * old math state array, and gets the new ones from the current task 751 * 752 * Careful.. There are problems with IBM-designed IRQ13 behaviour. 753 * Don't touch unless you *really* know how it works. 754 */ 755 asmlinkage void math_state_restore(struct pt_regs regs) 756 { 757 __asm__ __volatile__("clts"); /* Allow maths ops (or we recurse) */ 758 759 if (current->used_math) { 760 restore_fpu(current); 761 } else { 762 init_fpu(); 763 } 764 current->flags |= PF_USEDFPU; /* So we fnsave on switch_to() */ 765 } 766 767 #ifndef CONFIG_MATH_EMULATION 768 769 asmlinkage void math_emulate(long arg) 770 { 771 printk("math-emulation not enabled and no coprocessor found.\n"); 772 printk("killing %s.\n",current->comm); 773 force_sig(SIGFPE,current); 774 schedule(); 775 } 776 777 #endif /* CONFIG_MATH_EMULATION */ 778 779 #ifndef CONFIG_X86_F00F_WORKS_OK 780 void __init trap_init_f00f_bug(void) 781 { 782 /* 783 * "idt" is magic - it overlaps the idt_descr 784 * variable so that updating idt will automatically 785 * update the idt descriptor.. 786 */ 787 __set_fixmap(FIX_F00F, __pa(&idt_table), PAGE_KERNEL_RO); 788 idt = (struct desc_struct *)__fix_to_virt(FIX_F00F); 789 790 __asm__ __volatile__("lidt %0": "=m" (idt_descr)); 791 } 792 #endif 793 794 #define _set_gate(gate_addr,type,dpl,addr) \ 795 do { \ 796 int __d0, __d1; \ 797 __asm__ __volatile__ ("movw %%dx,%%ax\n\t" \ 798 "movw %4,%%dx\n\t" \ 799 "movl %%eax,%0\n\t" \ 800 "movl %%edx,%1" \ 801 :"=m" (*((long *) (gate_addr))), \ 802 "=m" (*(1+(long *) (gate_addr))), "=&a" (__d0), "=&d" (__d1) \ 803 :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \ 804 "3" ((char *) (addr)),"2" (__KERNEL_CS << 16)); \ 805 } while (0) 806 807 808 /* 809 * This needs to use 'idt_table' rather than 'idt', and 810 * thus use the _nonmapped_ version of the IDT, as the 811 * Pentium F0 0F bugfix can have resulted in the mapped 812 * IDT being write-protected. 813 */ 814 void set_intr_gate(unsigned int n, void *addr) 815 { 816 _set_gate(idt_table+n,14,0,addr); 817 } 818 819 static void __init set_trap_gate(unsigned int n, void *addr) 820 { 821 _set_gate(idt_table+n,15,0,addr); 822 } 823 824 static void __init set_system_gate(unsigned int n, void *addr) 825 { 826 _set_gate(idt_table+n,15,3,addr); 827 } 828 829 static void __init set_call_gate(void *a, void *addr) 830 { 831 _set_gate(a,12,3,addr); 832 } 833 834 #define _set_seg_desc(gate_addr,type,dpl,base,limit) {\ 835 *((gate_addr)+1) = ((base) & 0xff000000) | \ 836 (((base) & 0x00ff0000)>>16) | \ 837 ((limit) & 0xf0000) | \ 838 ((dpl)<<13) | \ 839 (0x00408000) | \ 840 ((type)<<8); \ 841 *(gate_addr) = (((base) & 0x0000ffff)<<16) | \ 842 ((limit) & 0x0ffff); } 843 844 #define _set_tssldt_desc(n,addr,limit,type) \ 845 __asm__ __volatile__ ("movw %w3,0(%2)\n\t" \ 846 "movw %%ax,2(%2)\n\t" \ 847 "rorl $16,%%eax\n\t" \ 848 "movb %%al,4(%2)\n\t" \ 849 "movb %4,5(%2)\n\t" \ 850 "movb $0,6(%2)\n\t" \ 851 "movb %%ah,7(%2)\n\t" \ 852 "rorl $16,%%eax" \ 853 : "=m"(*(n)) : "a" (addr), "r"(n), "ir"(limit), "i"(type)) 854 855 void set_tss_desc(unsigned int n, void *addr) 856 { 857 _set_tssldt_desc(gdt_table+__TSS(n), (int)addr, 235, 0x89); 858 } 859 860 void set_ldt_desc(unsigned int n, void *addr, unsigned int size) 861 { 862 _set_tssldt_desc(gdt_table+__LDT(n), (int)addr, ((size << 3)-1), 0x82); 863 } 864 865 #ifdef CONFIG_X86_VISWS_APIC 866 867 /* 868 * On Rev 005 motherboards legacy device interrupt lines are wired directly 869 * to Lithium from the 307. But the PROM leaves the interrupt type of each 870 * 307 logical device set appropriate for the 8259. Later we'll actually use 871 * the 8259, but for now we have to flip the interrupt types to 872 * level triggered, active lo as required by Lithium. 873 */ 874 875 #define REG 0x2e /* The register to read/write */ 876 #define DEV 0x07 /* Register: Logical device select */ 877 #define VAL 0x2f /* The value to read/write */ 878 879 static void 880 superio_outb(int dev, int reg, int val) 881 { 882 outb(DEV, REG); 883 outb(dev, VAL); 884 outb(reg, REG); 885 outb(val, VAL); 886 } 887 888 static int __attribute__ ((unused)) 889 superio_inb(int dev, int reg) 890 { 891 outb(DEV, REG); 892 outb(dev, VAL); 893 outb(reg, REG); 894 return inb(VAL); 895 } 896 897 #define FLOP 3 /* floppy logical device */ 898 #define PPORT 4 /* parallel logical device */ 899 #define UART5 5 /* uart2 logical device (not wired up) */ 900 #define UART6 6 /* uart1 logical device (THIS is the serial port!) */ 901 #define IDEST 0x70 /* int. destination (which 307 IRQ line) reg. */ 902 #define ITYPE 0x71 /* interrupt type register */ 903 904 /* interrupt type bits */ 905 #define LEVEL 0x01 /* bit 0, 0 == edge triggered */ 906 #define ACTHI 0x02 /* bit 1, 0 == active lo */ 907 908 static void 909 superio_init(void) 910 { 911 if (visws_board_type == VISWS_320 && visws_board_rev == 5) { 912 superio_outb(UART6, IDEST, 0); /* 0 means no intr propagated */ 913 printk("SGI 320 rev 5: disabling 307 uart1 interrupt\n"); 914 } 915 } 916 917 static void 918 lithium_init(void) 919 { 920 set_fixmap(FIX_LI_PCIA, LI_PCI_A_PHYS); 921 printk("Lithium PCI Bridge A, Bus Number: %d\n", 922 li_pcia_read16(LI_PCI_BUSNUM) & 0xff); 923 set_fixmap(FIX_LI_PCIB, LI_PCI_B_PHYS); 924 printk("Lithium PCI Bridge B (PIIX4), Bus Number: %d\n", 925 li_pcib_read16(LI_PCI_BUSNUM) & 0xff); 926 927 /* XXX blindly enables all interrupts */ 928 li_pcia_write16(LI_PCI_INTEN, 0xffff); 929 li_pcib_write16(LI_PCI_INTEN, 0xffff); 930 } 931 932 static void 933 cobalt_init(void) 934 { 935 /* 936 * On normal SMP PC this is used only with SMP, but we have to 937 * use it and set it up here to start the Cobalt clock 938 */ 939 set_fixmap(FIX_APIC_BASE, APIC_DEFAULT_PHYS_BASE); 940 printk("Local APIC ID %lx\n", apic_read(APIC_ID)); 941 printk("Local APIC Version %lx\n", apic_read(APIC_LVR)); 942 943 set_fixmap(FIX_CO_CPU, CO_CPU_PHYS); 944 printk("Cobalt Revision %lx\n", co_cpu_read(CO_CPU_REV)); 945 946 set_fixmap(FIX_CO_APIC, CO_APIC_PHYS); 947 printk("Cobalt APIC ID %lx\n", co_apic_read(CO_APIC_ID)); 948 949 /* Enable Cobalt APIC being careful to NOT change the ID! */ 950 co_apic_write(CO_APIC_ID, co_apic_read(CO_APIC_ID)|CO_APIC_ENABLE); 951 952 printk("Cobalt APIC enabled: ID reg %lx\n", co_apic_read(CO_APIC_ID)); 953 } 954 #endif 955 void __init trap_init(void) 956 { 957 #ifdef CONFIG_EISA 958 if (isa_readl(0x0FFFD9) == 'E'+('I'<<8)+('S'<<16)+('A'<<24)) 959 EISA_bus = 1; 960 #endif 961 962 #ifdef CONFIG_X86_LOCAL_APIC 963 init_apic_mappings(); 964 #endif 965 966 set_trap_gate(0,÷_error); 967 set_trap_gate(1,&debug); 968 set_intr_gate(2,&nmi); 969 set_system_gate(3,&int3); /* int3-5 can be called from all */ 970 set_system_gate(4,&overflow); 971 set_system_gate(5,&bounds); 972 set_trap_gate(6,&invalid_op); 973 set_trap_gate(7,&device_not_available); 974 set_trap_gate(8,&double_fault); 975 set_trap_gate(9,&coprocessor_segment_overrun); 976 set_trap_gate(10,&invalid_TSS); 977 set_trap_gate(11,&segment_not_present); 978 set_trap_gate(12,&stack_segment); 979 set_trap_gate(13,&general_protection); 980 set_intr_gate(14,&page_fault); 981 set_trap_gate(15,&spurious_interrupt_bug); 982 set_trap_gate(16,&coprocessor_error); 983 set_trap_gate(17,&alignment_check); 984 set_trap_gate(18,&machine_check); 985 set_trap_gate(19,&simd_coprocessor_error); 986 987 set_system_gate(SYSCALL_VECTOR,&system_call); 988 989 /* 990 * default LDT is a single-entry callgate to lcall7 for iBCS 991 * and a callgate to lcall27 for Solaris/x86 binaries 992 */ 993 set_call_gate(&default_ldt[0],lcall7); 994 set_call_gate(&default_ldt[4],lcall27); 995 996 /* 997 * Should be a barrier for any external CPU state. 998 */ 999 cpu_init(); 1000 1001 #ifdef CONFIG_X86_VISWS_APIC 1002 superio_init(); 1003 lithium_init(); 1004 cobalt_init(); 1005 #endif 1006 } 1007