Commit MetaInfo

Revision1e09177acae32a61586af26d83ca5ef591cdcaf5 (tree)
Time2018-07-11 08:18:09
AuthorLinus Torvalds <torvalds@linu...>
CommiterLinus Torvalds

Log Message

A couple more MIPS fixes for 4.18:

- Use async IPIs for arch_trigger_cpumask_backtrace() in order to
avoid warnings & deadlocks, fixing a problem introduced in v3.19
with the fix trivial to backport as far as v4.9.

- Fix ioremap()'s MMU/TLB backed path to avoid spuriously rejecting
valid requests due to an incorrect belief that the memory region is
backed by potentially-in-use RAM. This fixes a regression in v4.2.
-----BEGIN PGP SIGNATURE-----

iIsEABYIADMWIQRgLjeFAZEXQzy86/s+p5+stXUA3QUCW0UoBRUccGF1bC5idXJ0
b25AbWlwcy5jb20ACgkQPqefrLV1AN2QnQD9EzZbdcitwdyd/wCPHi/r2yPJ7eD6
vXuIqWEjs2TujpMA/iVa4uC2myYAP56vjOx66KpgfIXufnQg6b8f5ARgYMIK
=gA7+
-----END PGP SIGNATURE-----

Merge tag 'mips_fixes_4.18_3' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux

Pull MIPS fixes from Paul Burton:

"A couple more MIPS fixes for 4.18:
- Use async IPIs for arch_trigger_cpumask_backtrace() in order to
avoid warnings & deadlocks, fixing a problem introduced in v3.19
with the fix trivial to backport as far as v4.9.
- Fix ioremap()'s MMU/TLB backed path to avoid spuriously rejecting
valid requests due to an incorrect belief that the memory region is
backed by potentially-in-use RAM. This fixes a regression in v4.2"

* tag 'mips_fixes_4.18_3' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux:

MIPS: Fix ioremap() RAM check
MIPS: Use async IPIs for arch_trigger_cpumask_backtrace()
MIPS: Call dump_stack() from show_regs()

Change Summary

Incremental Difference

--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -29,6 +29,7 @@
2929 #include <linux/kallsyms.h>
3030 #include <linux/random.h>
3131 #include <linux/prctl.h>
32+#include <linux/nmi.h>
3233
3334 #include <asm/asm.h>
3435 #include <asm/bootinfo.h>
@@ -655,28 +656,42 @@ unsigned long arch_align_stack(unsigned long sp)
655656 return sp & ALMASK;
656657 }
657658
658-static void arch_dump_stack(void *info)
659+static DEFINE_PER_CPU(call_single_data_t, backtrace_csd);
660+static struct cpumask backtrace_csd_busy;
661+
662+static void handle_backtrace(void *info)
659663 {
660- struct pt_regs *regs;
664+ nmi_cpu_backtrace(get_irq_regs());
665+ cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
666+}
661667
662- regs = get_irq_regs();
668+static void raise_backtrace(cpumask_t *mask)
669+{
670+ call_single_data_t *csd;
671+ int cpu;
663672
664- if (regs)
665- show_regs(regs);
673+ for_each_cpu(cpu, mask) {
674+ /*
675+ * If we previously sent an IPI to the target CPU & it hasn't
676+ * cleared its bit in the busy cpumask then it didn't handle
677+ * our previous IPI & it's not safe for us to reuse the
678+ * call_single_data_t.
679+ */
680+ if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
681+ pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
682+ cpu);
683+ continue;
684+ }
666685
667- dump_stack();
686+ csd = &per_cpu(backtrace_csd, cpu);
687+ csd->func = handle_backtrace;
688+ smp_call_function_single_async(cpu, csd);
689+ }
668690 }
669691
670692 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
671693 {
672- long this_cpu = get_cpu();
673-
674- if (cpumask_test_cpu(this_cpu, mask) && !exclude_self)
675- dump_stack();
676-
677- smp_call_function_many(mask, arch_dump_stack, NULL, 1);
678-
679- put_cpu();
694+ nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
680695 }
681696
682697 int mips_get_process_fp_mode(struct task_struct *task)
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -351,6 +351,7 @@ static void __show_regs(const struct pt_regs *regs)
351351 void show_regs(struct pt_regs *regs)
352352 {
353353 __show_regs((struct pt_regs *)regs);
354+ dump_stack();
354355 }
355356
356357 void show_registers(struct pt_regs *regs)
--- a/arch/mips/mm/ioremap.c
+++ b/arch/mips/mm/ioremap.c
@@ -9,6 +9,7 @@
99 #include <linux/export.h>
1010 #include <asm/addrspace.h>
1111 #include <asm/byteorder.h>
12+#include <linux/ioport.h>
1213 #include <linux/sched.h>
1314 #include <linux/slab.h>
1415 #include <linux/vmalloc.h>
@@ -98,6 +99,20 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
9899 return error;
99100 }
100101
102+static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
103+ void *arg)
104+{
105+ unsigned long i;
106+
107+ for (i = 0; i < nr_pages; i++) {
108+ if (pfn_valid(start_pfn + i) &&
109+ !PageReserved(pfn_to_page(start_pfn + i)))
110+ return 1;
111+ }
112+
113+ return 0;
114+}
115+
101116 /*
102117 * Generic mapping function (not visible outside):
103118 */
@@ -116,8 +131,8 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
116131
117132 void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags)
118133 {
134+ unsigned long offset, pfn, last_pfn;
119135 struct vm_struct * area;
120- unsigned long offset;
121136 phys_addr_t last_addr;
122137 void * addr;
123138
@@ -137,18 +152,16 @@ void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long
137152 return (void __iomem *) CKSEG1ADDR(phys_addr);
138153
139154 /*
140- * Don't allow anybody to remap normal RAM that we're using..
155+ * Don't allow anybody to remap RAM that may be allocated by the page
156+ * allocator, since that could lead to races & data clobbering.
141157 */
142- if (phys_addr < virt_to_phys(high_memory)) {
143- char *t_addr, *t_end;
144- struct page *page;
145-
146- t_addr = __va(phys_addr);
147- t_end = t_addr + (size - 1);
148-
149- for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
150- if(!PageReserved(page))
151- return NULL;
158+ pfn = PFN_DOWN(phys_addr);
159+ last_pfn = PFN_DOWN(last_addr);
160+ if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
161+ __ioremap_check_ram) == 1) {
162+ WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
163+ &phys_addr, &last_addr);
164+ return NULL;
152165 }
153166
154167 /*
Show on old repository browser