/* * from: vector.s, 386BSD 0.1 unknown origin * $FreeBSD: src/sys/i386/isa/apic_vector.s,v 1.47.2.5 2001/09/01 22:33:38 tegge Exp $ */ #if 0 #include "opt_auto_eoi.h" #endif #include #include #include #include #include #include #include #include "assym.s" #include "apicreg.h" #include #include #ifdef foo /* convert an absolute IRQ# into bitmask */ #define IRQ_LBIT(irq_num) (1UL << (irq_num & 0x3f)) #endif #define IRQ_SBITS(irq_num) ((irq_num) & 0x3f) /* convert an absolute IRQ# into gd_ipending index */ #define IRQ_LIDX(irq_num) ((irq_num) >> 6) #define MPLOCKED lock ; #define APIC_PUSH_FRAME_TFRIP \ PUSH_FRAME_TFRIP ; /* 15 regs + space for 5 extras */ \ movq $0,TF_XFLAGS(%rsp) ; \ movq $0,TF_TRAPNO(%rsp) ; \ movq $0,TF_ADDR(%rsp) ; \ movq $0,TF_FLAGS(%rsp) ; \ movq $0,TF_ERR(%rsp) ; \ cld ; \ /* * JG stale? Warning: POP_FRAME can only be used if there is no chance of a * segment register being changed (e.g. by procfs), which is why syscalls * have to use doreti. */ #define APIC_POP_FRAME(lastinsn) \ POP_FRAME(lastinsn) \ #define IOAPICADDR(irq_num) \ CNAME(ioapic_irqs) + IOAPIC_IRQI_SIZE * (irq_num) + IOAPIC_IRQI_ADDR #define REDIRIDX(irq_num) \ CNAME(ioapic_irqs) + IOAPIC_IRQI_SIZE * (irq_num) + IOAPIC_IRQI_IDX #define IOAPICFLAGS(irq_num) \ CNAME(ioapic_irqs) + IOAPIC_IRQI_SIZE * (irq_num) + IOAPIC_IRQI_FLAGS #define MASK_IRQ(irq_num) \ testl $IOAPIC_IRQI_FLAG_MASKED, IOAPICFLAGS(irq_num) ; \ jne 7f ; /* masked, don't mask */ \ orl $IOAPIC_IRQI_FLAG_MASKED, IOAPICFLAGS(irq_num) ; \ /* set the mask bit */ \ movq IOAPICADDR(irq_num), %rcx ; /* ioapic addr */ \ movl REDIRIDX(irq_num), %eax ; /* get the index */ \ movl %eax, (%rcx) ; /* write the index */ \ orl $IOART_INTMASK,IOAPIC_WINDOW(%rcx) ;/* set the mask */ \ 7: ; /* already masked */ \ /* * Test to see whether we are handling an edge or level triggered INT. * Level-triggered INTs must still be masked as we don't clear the source, * and the EOI cycle would cause redundant INTs to occur. */ #define MASK_LEVEL_IRQ(irq_num) \ testl $IOAPIC_IRQI_FLAG_LEVEL, IOAPICFLAGS(irq_num) ; \ jz 9f ; /* edge, don't mask */ \ MASK_IRQ(irq_num) ; \ 9: ; \ /* * Test to see if the source is currntly masked, clear if so. */ #define UNMASK_IRQ(irq_num) \ cmpl $0,%eax ; \ jnz 8f ; \ IOAPIC_IMASK_LOCK ; \ testl $IOAPIC_IRQI_FLAG_MASKED, IOAPICFLAGS(irq_num) ; \ je 7f ; /* bit clear, not masked */ \ andl $~IOAPIC_IRQI_FLAG_MASKED, IOAPICFLAGS(irq_num) ; \ /* clear mask bit */ \ movq IOAPICADDR(irq_num),%rcx ; /* ioapic addr */ \ movl REDIRIDX(irq_num), %eax ; /* get the index */ \ movl %eax,(%rcx) ; /* write the index */ \ andl $~IOART_INTMASK,IOAPIC_WINDOW(%rcx) ;/* clear the mask */ \ 7: ; \ IOAPIC_IMASK_UNLOCK ; \ 8: ; \ /* * Interrupt call handlers run in the following sequence: * * - Push the trap frame required by doreti * - Mask the interrupt and reenable its source * - If we cannot take the interrupt set its ipending bit and * doreti. * - If we can take the interrupt clear its ipending bit, * call the handler, then unmask and doreti. * * YYY can cache gd base opitner instead of using hidden %fs prefixes. */ #define INTR_HANDLER(irq_num) \ .text ; \ SUPERALIGN_TEXT ; \ IDTVEC(ioapic_intr##irq_num) ; \ APIC_PUSH_FRAME_TFRIP ; \ FAKE_MCOUNT(TF_RIP(%rsp)) ; \ IOAPIC_IMASK_LOCK ; \ MASK_LEVEL_IRQ(irq_num) ; \ movq lapic_eoi, %rax ; \ IOAPIC_IMASK_UNLOCK ; \ callq *%rax ; \ movq PCPU(curthread),%rbx ; \ testl $-1,TD_NEST_COUNT(%rbx) ; \ jne 1f ; \ testl $-1,TD_CRITCOUNT(%rbx) ; \ je 2f ; \ 1: ; \ /* in critical section, make interrupt pending */ \ /* set the pending bit and return, leave interrupt masked */ \ movq $1,%rcx ; \ shlq $IRQ_SBITS(irq_num),%rcx ; \ movq $IRQ_LIDX(irq_num),%rdx ; \ orq %rcx,PCPU_E8(ipending,%rdx) ; \ orl $RQF_INTPEND,PCPU(reqflags) ; \ jmp 5f ; \ 2: ; \ /* clear pending bit, run handler */ \ movq $1,%rcx ; \ shlq $IRQ_SBITS(irq_num),%rcx ; \ notq %rcx ; \ movq $IRQ_LIDX(irq_num),%rdx ; \ andq %rcx,PCPU_E8(ipending,%rdx) ; \ pushq $irq_num ; /* trapframe -> intrframe */ \ movq %rsp, %rdi ; /* pass frame by reference */ \ incl TD_CRITCOUNT(%rbx) ; \ sti ; \ call ithread_fast_handler ; /* returns 0 to unmask */ \ cli ; /* interlock avoid stacking */ \ decl TD_CRITCOUNT(%rbx) ; \ addq $8, %rsp ; /* intrframe -> trapframe */ \ UNMASK_IRQ(irq_num) ; \ 5: ; \ MEXITCOUNT ; \ jmp doreti ; \ /* * Handle "spurious INTerrupts". * * NOTE: This is different than the "spurious INTerrupt" generated by an * 8259 PIC for missing INTs. See the APIC documentation for details. * This routine should NOT do an 'EOI' cycle. * * NOTE: Even though we don't do anything here we must still swapgs if * coming from a user frame in case the iretq faults... just use * the nominal APIC_PUSH_FRAME sequence to get it done. */ .text SUPERALIGN_TEXT .globl Xspuriousint Xspuriousint: APIC_PUSH_FRAME_TFRIP /* No EOI cycle used here */ FAKE_MCOUNT(TF_RIP(%rsp)) MEXITCOUNT APIC_POP_FRAME(jmp doreti_iret) /* * Handle TLB shootdowns. * * NOTE: interrupts are left disabled. */ .text SUPERALIGN_TEXT .globl Xinvltlb Xinvltlb: APIC_PUSH_FRAME_TFRIP movq lapic_eoi, %rax callq *%rax /* End Of Interrupt to APIC */ FAKE_MCOUNT(TF_RIP(%rsp)) incl PCPU(cnt) + V_IPI movq PCPU(curthread),%rbx incl PCPU(intr_nesting_level) incl TD_CRITCOUNT(%rbx) subq $8,%rsp /* make same as interrupt frame */ movq %rsp,%rdi /* pass frame by reference */ orl $RQF_XINVLTLB,PCPU(reqflags) /* HVM interlock */ call smp_inval_intr /* called w/interrupts disabled */ addq $8,%rsp /* turn into trapframe */ decl TD_CRITCOUNT(%rbx) decl PCPU(intr_nesting_level) MEXITCOUNT /*APIC_POP_FRAME*/ jmp doreti /* doreti b/c intrs enabled */ /* * Handle sniffs - sniff %rip and %rsp. */ .text SUPERALIGN_TEXT .globl Xsniff Xsniff: APIC_PUSH_FRAME_TFRIP movq lapic_eoi, %rax callq *%rax /* End Of Interrupt to APIC */ FAKE_MCOUNT(TF_RIP(%rsp)) incl PCPU(cnt) + V_IPI movq %rsp,%rdi call CNAME(hard_sniff) /* systat -pv and flame sniff */ MEXITCOUNT APIC_POP_FRAME(jmp doreti_iret) /* * Executed by a CPU when it receives an Xcpustop IPI from another CPU, * * - We cannot call doreti * - Signals its receipt. * - Waits for permission to restart. * - Processing pending IPIQ events while waiting. * - Signals its restart. */ .text SUPERALIGN_TEXT .globl Xcpustop Xcpustop: APIC_PUSH_FRAME_TFRIP movq lapic_eoi, %rax callq *%rax /* End Of Interrupt to APIC */ movl PCPU(cpuid), %eax imull $PCB_SIZE, %eax leaq CNAME(stoppcbs), %rdi addq %rax, %rdi call CNAME(savectx) /* Save process context */ /* * Indicate that we have stopped and loop waiting for permission * to start again. We must still process IPI events while in a * stopped state. * * Interrupts must remain enabled for non-IPI'd per-cpu interrupts * (e.g. Xtimer, Xinvltlb). */ #if CPUMASK_ELEMENTS != 4 #error "assembly incompatible with cpumask_t" #endif movq PCPU(cpumask)+0,%rax /* stopped_cpus |= 1 << cpuid */ MPLOCKED orq %rax, stopped_cpus+0 movq PCPU(cpumask)+8,%rax MPLOCKED orq %rax, stopped_cpus+8 movq PCPU(cpumask)+16,%rax MPLOCKED orq %rax, stopped_cpus+16 movq PCPU(cpumask)+24,%rax MPLOCKED orq %rax, stopped_cpus+24 movq PCPU(curthread),%rbx incl PCPU(intr_nesting_level) incl TD_CRITCOUNT(%rbx) sti 1: andl $~RQF_IPIQ,PCPU(reqflags) call lwkt_smp_stopped pause subq %rdi,%rdi movq started_cpus+0,%rax /* while (!(started_cpus & (1<