Skip to content

Commit 78fd584

Browse files
AKASHI Takahiroctmarinas
AKASHI Takahiro
authored andcommitted
arm64: kdump: implement machine_crash_shutdown()
Primary kernel calls machine_crash_shutdown() to shut down non-boot cpus and save registers' status in per-cpu ELF notes before starting crash dump kernel. See kernel_kexec(). Even if not all secondary cpus have shut down, we do kdump anyway. As we don't have to make non-boot(crashed) cpus offline (to preserve correct status of cpus at crash dump) before shutting down, this patch also adds a variant of smp_send_stop(). Signed-off-by: AKASHI Takahiro <takahiro.akashi@linaro.org> Reviewed-by: James Morse <james.morse@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
1 parent 254a41c commit 78fd584

File tree

5 files changed

+167
-6
lines changed

5 files changed

+167
-6
lines changed

arch/arm64/include/asm/hardirq.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
#include <linux/threads.h>
2121
#include <asm/irq.h>
2222

23-
#define NR_IPI 6
23+
#define NR_IPI 7
2424

2525
typedef struct {
2626
unsigned int __softirq_pending;

arch/arm64/include/asm/kexec.h

+41-1
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,47 @@
4040
static inline void crash_setup_regs(struct pt_regs *newregs,
4141
struct pt_regs *oldregs)
4242
{
43-
/* Empty routine needed to avoid build errors. */
43+
if (oldregs) {
44+
memcpy(newregs, oldregs, sizeof(*newregs));
45+
} else {
46+
u64 tmp1, tmp2;
47+
48+
__asm__ __volatile__ (
49+
"stp x0, x1, [%2, #16 * 0]\n"
50+
"stp x2, x3, [%2, #16 * 1]\n"
51+
"stp x4, x5, [%2, #16 * 2]\n"
52+
"stp x6, x7, [%2, #16 * 3]\n"
53+
"stp x8, x9, [%2, #16 * 4]\n"
54+
"stp x10, x11, [%2, #16 * 5]\n"
55+
"stp x12, x13, [%2, #16 * 6]\n"
56+
"stp x14, x15, [%2, #16 * 7]\n"
57+
"stp x16, x17, [%2, #16 * 8]\n"
58+
"stp x18, x19, [%2, #16 * 9]\n"
59+
"stp x20, x21, [%2, #16 * 10]\n"
60+
"stp x22, x23, [%2, #16 * 11]\n"
61+
"stp x24, x25, [%2, #16 * 12]\n"
62+
"stp x26, x27, [%2, #16 * 13]\n"
63+
"stp x28, x29, [%2, #16 * 14]\n"
64+
"mov %0, sp\n"
65+
"stp x30, %0, [%2, #16 * 15]\n"
66+
67+
"/* faked current PSTATE */\n"
68+
"mrs %0, CurrentEL\n"
69+
"mrs %1, SPSEL\n"
70+
"orr %0, %0, %1\n"
71+
"mrs %1, DAIF\n"
72+
"orr %0, %0, %1\n"
73+
"mrs %1, NZCV\n"
74+
"orr %0, %0, %1\n"
75+
/* pc */
76+
"adr %1, 1f\n"
77+
"1:\n"
78+
"stp %1, %0, [%2, #16 * 16]\n"
79+
: "=&r" (tmp1), "=&r" (tmp2)
80+
: "r" (newregs)
81+
: "memory"
82+
);
83+
}
4484
}
4585

4686
#if defined(CONFIG_KEXEC_CORE) && defined(CONFIG_HIBERNATION)

arch/arm64/include/asm/smp.h

+3
Original file line numberDiff line numberDiff line change
@@ -148,6 +148,9 @@ static inline void cpu_panic_kernel(void)
148148
*/
149149
bool cpus_are_stuck_in_kernel(void);
150150

151+
extern void smp_send_crash_stop(void);
152+
extern bool smp_crash_stop_failed(void);
153+
151154
#endif /* ifndef __ASSEMBLY__ */
152155

153156
#endif /* ifndef __ASM_SMP_H */

arch/arm64/kernel/machine_kexec.c

+54-4
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,9 @@
99
* published by the Free Software Foundation.
1010
*/
1111

12+
#include <linux/interrupt.h>
13+
#include <linux/irq.h>
14+
#include <linux/kernel.h>
1215
#include <linux/kexec.h>
1316
#include <linux/page-flags.h>
1417
#include <linux/smp.h>
@@ -143,11 +146,15 @@ void machine_kexec(struct kimage *kimage)
143146
{
144147
phys_addr_t reboot_code_buffer_phys;
145148
void *reboot_code_buffer;
149+
bool in_kexec_crash = (kimage == kexec_crash_image);
150+
bool stuck_cpus = cpus_are_stuck_in_kernel();
146151

147152
/*
148153
* New cpus may have become stuck_in_kernel after we loaded the image.
149154
*/
150-
BUG_ON(cpus_are_stuck_in_kernel() || (num_online_cpus() > 1));
155+
BUG_ON(!in_kexec_crash && (stuck_cpus || (num_online_cpus() > 1)));
156+
WARN(in_kexec_crash && (stuck_cpus || smp_crash_stop_failed()),
157+
"Some CPUs may be stale, kdump will be unreliable.\n");
151158

152159
reboot_code_buffer_phys = page_to_phys(kimage->control_code_page);
153160
reboot_code_buffer = phys_to_virt(reboot_code_buffer_phys);
@@ -199,15 +206,58 @@ void machine_kexec(struct kimage *kimage)
199206
* relocation is complete.
200207
*/
201208

202-
cpu_soft_restart(1, reboot_code_buffer_phys, kimage->head,
203-
kimage->start, 0);
209+
cpu_soft_restart(kimage != kexec_crash_image,
210+
reboot_code_buffer_phys, kimage->head, kimage->start, 0);
204211

205212
BUG(); /* Should never get here. */
206213
}
207214

215+
static void machine_kexec_mask_interrupts(void)
216+
{
217+
unsigned int i;
218+
struct irq_desc *desc;
219+
220+
for_each_irq_desc(i, desc) {
221+
struct irq_chip *chip;
222+
int ret;
223+
224+
chip = irq_desc_get_chip(desc);
225+
if (!chip)
226+
continue;
227+
228+
/*
229+
* First try to remove the active state. If this
230+
* fails, try to EOI the interrupt.
231+
*/
232+
ret = irq_set_irqchip_state(i, IRQCHIP_STATE_ACTIVE, false);
233+
234+
if (ret && irqd_irq_inprogress(&desc->irq_data) &&
235+
chip->irq_eoi)
236+
chip->irq_eoi(&desc->irq_data);
237+
238+
if (chip->irq_mask)
239+
chip->irq_mask(&desc->irq_data);
240+
241+
if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
242+
chip->irq_disable(&desc->irq_data);
243+
}
244+
}
245+
246+
/**
247+
* machine_crash_shutdown - shutdown non-crashing cpus and save registers
248+
*/
208249
void machine_crash_shutdown(struct pt_regs *regs)
209250
{
210-
/* Empty routine needed to avoid build errors. */
251+
local_irq_disable();
252+
253+
/* shutdown non-crashing cpus */
254+
smp_send_crash_stop();
255+
256+
/* for crashing cpu */
257+
crash_save_cpu(regs, smp_processor_id());
258+
machine_kexec_mask_interrupts();
259+
260+
pr_info("Starting crashdump kernel...\n");
211261
}
212262

213263
void arch_kexec_protect_crashkres(void)

arch/arm64/kernel/smp.c

+68
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@
3939
#include <linux/completion.h>
4040
#include <linux/of.h>
4141
#include <linux/irq_work.h>
42+
#include <linux/kexec.h>
4243

4344
#include <asm/alternative.h>
4445
#include <asm/atomic.h>
@@ -76,6 +77,7 @@ enum ipi_msg_type {
7677
IPI_RESCHEDULE,
7778
IPI_CALL_FUNC,
7879
IPI_CPU_STOP,
80+
IPI_CPU_CRASH_STOP,
7981
IPI_TIMER,
8082
IPI_IRQ_WORK,
8183
IPI_WAKEUP
@@ -756,6 +758,7 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
756758
S(IPI_RESCHEDULE, "Rescheduling interrupts"),
757759
S(IPI_CALL_FUNC, "Function call interrupts"),
758760
S(IPI_CPU_STOP, "CPU stop interrupts"),
761+
S(IPI_CPU_CRASH_STOP, "CPU stop (for crash dump) interrupts"),
759762
S(IPI_TIMER, "Timer broadcast interrupts"),
760763
S(IPI_IRQ_WORK, "IRQ work interrupts"),
761764
S(IPI_WAKEUP, "CPU wake-up interrupts"),
@@ -830,6 +833,29 @@ static void ipi_cpu_stop(unsigned int cpu)
830833
cpu_relax();
831834
}
832835

836+
#ifdef CONFIG_KEXEC_CORE
837+
static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0);
838+
#endif
839+
840+
static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
841+
{
842+
#ifdef CONFIG_KEXEC_CORE
843+
crash_save_cpu(regs, cpu);
844+
845+
atomic_dec(&waiting_for_crash_ipi);
846+
847+
local_irq_disable();
848+
849+
#ifdef CONFIG_HOTPLUG_CPU
850+
if (cpu_ops[cpu]->cpu_die)
851+
cpu_ops[cpu]->cpu_die(cpu);
852+
#endif
853+
854+
/* just in case */
855+
cpu_park_loop();
856+
#endif
857+
}
858+
833859
/*
834860
* Main handler for inter-processor interrupts
835861
*/
@@ -860,6 +886,15 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
860886
irq_exit();
861887
break;
862888

889+
case IPI_CPU_CRASH_STOP:
890+
if (IS_ENABLED(CONFIG_KEXEC_CORE)) {
891+
irq_enter();
892+
ipi_cpu_crash_stop(cpu, regs);
893+
894+
unreachable();
895+
}
896+
break;
897+
863898
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
864899
case IPI_TIMER:
865900
irq_enter();
@@ -932,6 +967,39 @@ void smp_send_stop(void)
932967
cpumask_pr_args(cpu_online_mask));
933968
}
934969

970+
#ifdef CONFIG_KEXEC_CORE
971+
void smp_send_crash_stop(void)
972+
{
973+
cpumask_t mask;
974+
unsigned long timeout;
975+
976+
if (num_online_cpus() == 1)
977+
return;
978+
979+
cpumask_copy(&mask, cpu_online_mask);
980+
cpumask_clear_cpu(smp_processor_id(), &mask);
981+
982+
atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
983+
984+
pr_crit("SMP: stopping secondary CPUs\n");
985+
smp_cross_call(&mask, IPI_CPU_CRASH_STOP);
986+
987+
/* Wait up to one second for other CPUs to stop */
988+
timeout = USEC_PER_SEC;
989+
while ((atomic_read(&waiting_for_crash_ipi) > 0) && timeout--)
990+
udelay(1);
991+
992+
if (atomic_read(&waiting_for_crash_ipi) > 0)
993+
pr_warning("SMP: failed to stop secondary CPUs %*pbl\n",
994+
cpumask_pr_args(&mask));
995+
}
996+
997+
bool smp_crash_stop_failed(void)
998+
{
999+
return (atomic_read(&waiting_for_crash_ipi) > 0);
1000+
}
1001+
#endif
1002+
9351003
/*
9361004
* not supported here
9371005
*/

0 commit comments

Comments
 (0)