///////////////////////////////////////////////////////////////////////////////
static void __init smp_init(void)
{
/* Get other processors into their bootup holding patterns. */
smp_boot_cpus();
wait_init_idle = cpu_online_map;
clear_bit(current->processor, &wait_init_idle); /* Don't wait on me! */
smp_threads_ready=1;
smp_commence() {
/* Lets the callins below out of their loop. */
Dprintk("Setting commenced=1, go go go\n");
wmb();
atomic_set(&smp_commenced,1);
}
/* Wait for the other cpus to set up their idle processes */
printk("Waiting on wait_init_idle (map = 0x%lx)\n", wait_init_idle);
while (wait_init_idle) {
cpu_relax(); // i.e. "rep;nop"
barrier();
}
printk("All processors have done init_idle\n");
}
///////////////////////////////////////////////////////////////////////////////
void __init smp_boot_cpus(void)
{
// ... çok ilginç olmayan birşey :-)
/* Initialize the logical to physical CPU number mapping
* and the per-CPU profiling router/multiplier */
prof_counter[0..NR_CPUS-1] = 0;
prof_old_multiplier[0..NR_CPUS-1] = 0;
prof_multiplier[0..NR_CPUS-1] = 0;
init_cpu_to_apicid() {
physical_apicid_2_cpu[0..MAX_APICID-1] = -1;
logical_apicid_2_cpu[0..MAX_APICID-1] = -1;
cpu_2_physical_apicid[0..NR_CPUS-1] = 0;
cpu_2_logical_apicid[0..NR_CPUS-1] = 0;
}
/* Setup boot CPU information */
smp_store_cpu_info(0); /* Final full version of the data */
printk("CPU%d: ", 0);
print_cpu_info(&cpu_data[0]);
/* We have the boot CPU online for sure. */
set_bit(0, &cpu_online_map);
boot_cpu_logical_apicid = logical_smp_processor_id() {
GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
}
map_cpu_to_boot_apicid(0, boot_cpu_apicid) {
physical_apicid_2_cpu[boot_cpu_apicid] = 0;
cpu_2_physical_apicid[0] = boot_cpu_apicid;
}
global_irq_holder = 0;
current->processor = 0;
init_idle(); // will clear corresponding bit in wait_init_idle
smp_tune_scheduling();
// ... some conditions checked
connect_bsp_APIC(); // enable APIC mode if used to be PIC mode
setup_local_APIC();
if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_physical_apicid)
BUG();
/* Scan the CPU present map and fire up the other CPUs
* via do_boot_cpu() */
Dprintk("CPU present map: %lx\n", phys_cpu_present_map);
for (bit = 0; bit < NR_CPUS; bit++) {
apicid = cpu_present_to_apicid(bit);
/* Don't even attempt to start the boot CPU! */
if (apicid == boot_cpu_apicid)
continue;
if (!(phys_cpu_present_map & (1 << bit)))
continue;
if ((max_cpus >= 0) && (max_cpus <= cpucount+1))
continue;
do_boot_cpu(apicid);
/* Make sure we unmap all failed CPUs */
if ((boot_apicid_to_cpu(apicid) == -1) &&
(phys_cpu_present_map & (1 << bit)))
printk("CPU #%d not responding - cannot use it.\n", apicid);
}
// ... SMP BogoMIPS
// ... B stepping processor warning
// ... HyperThreading handling
/* Set up all local APIC timers in the system */
setup_APIC_clocks();
/* Synchronize the TSC with the AP */
if (cpu_has_tsc && cpucount)
synchronize_tsc_bp();
smp_done:
zap_low_mappings();
}
///////////////////////////////////////////////////////////////////////////////
static void __init do_boot_cpu (int apicid)
{
cpu = ++cpucount;
// 1. prepare "idle process" task struct for next AP
/* We can't use kernel_thread since we must avoid to
* reschedule the child. */
if (fork_by_hand() < 0)
panic("failed fork for CPU %d", cpu);
/* We remove it from the pidhash and the runqueue
* once we got the process: */
idle = init_task.prev_task;
if (!idle)
panic("No idle process for CPU %d", cpu);
/* we schedule the first task manually */
idle->processor = cpu;
idle->cpus_runnable = 1 << cpu; // only on this AP!
map_cpu_to_boot_apicid(cpu, apicid) {
physical_apicid_2_cpu[apicid] = cpu;
cpu_2_physical_apicid[cpu] = apicid;
}
idle->thread.eip = (unsigned long) start_secondary;
del_from_runqueue(idle);
unhash_process(idle);
init_tasks[cpu] = idle;
// 2. prepare stack and code (CS:IP) for next AP
/* start_eip had better be page-aligned! */
start_eip = setup_trampoline() {
memcpy(trampoline_base, trampoline_data,
trampoline_end - trampoline_data);
/* trampoline_base was reserved in
* start_kernel() -> setup_arch() -> smp_alloc_memory(),
* and will be shared by all APs (one by one) */
return virt_to_phys(trampoline_base);
}
/* So we see what's up */
printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
stack_start.esp = (void *) (1024 + PAGE_SIZE + (char *)idle);
/* this value is used by next AP when it executes
* "lss stack_start,%esp" in
* linux/arch/i386/kernel/head.S:startup_32(). */
/* This grunge runs the startup process for
* the targeted processor. */
atomic_set(&init_deasserted, 0);
Dprintk("Setting warm reset code and vector.\n");
CMOS_WRITE(0xa, 0xf);
local_flush_tlb();
Dprintk("1.\n");
*((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4;
Dprintk("2.\n");
*((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf;
Dprintk("3.\n");
// we have setup 0:467 to start_eip (trampoline_base)
// 3. kick AP to run (AP gets CS:IP from 0:467)
// Starting actual IPI sequence...
boot_error = wakeup_secondary_via_INIT(apicid, start_eip);
if (!boot_error) { // looks OK
/* allow APs to start initializing. */
set_bit(cpu, &cpu_callout_map);
/* ... Wait 5s total for a response */
// bit cpu in cpu_callin_map is set by AP in smp_callin()
if (test_bit(cpu, &cpu_callin_map)) {
print_cpu_info(&cpu_data[cpu]);
} else {
boot_error= 1;
// marker 0xA5 set by AP in trampoline_data()
if (*((volatile unsigned char *)phys_to_virt(8192)) == 0xA5)
/* trampoline started but... */
printk("Stuck ??\n");
else
/* trampoline code not run */
printk("Not responding.\n");
}
}
if (boot_error) {
/* Try to put things back the way they were before ... */
unmap_cpu_to_boot_apicid(cpu, apicid);
clear_bit(cpu, &cpu_callout_map); /* set in do_boot_cpu() */
clear_bit(cpu, &cpu_initialized); /* set in cpu_init() */
clear_bit(cpu, &cpu_online_map); /* set in smp_callin() */
cpucount--;
}
/* mark "stuck" area as not stuck */
*((volatile unsigned long *)phys_to_virt(8192)) = 0;
}