aboutsummaryrefslogtreecommitdiffstats
path: root/arch/loongarch/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/loongarch/kernel')
-rw-r--r--arch/loongarch/kernel/acpi.c81
-rw-r--r--arch/loongarch/kernel/paravirt.c15
-rw-r--r--arch/loongarch/kernel/process.c16
-rw-r--r--arch/loongarch/kernel/setup.c3
-rw-r--r--arch/loongarch/kernel/smp.c5
-rw-r--r--arch/loongarch/kernel/traps.c5
-rw-r--r--arch/loongarch/kernel/vdso.c8
7 files changed, 91 insertions, 42 deletions
diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c
index f1a74b80f22c..382a09a7152c 100644
--- a/arch/loongarch/kernel/acpi.c
+++ b/arch/loongarch/kernel/acpi.c
@@ -58,48 +58,48 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
return ioremap_cache(phys, size);
}
-static int cpu_enumerated = 0;
-
#ifdef CONFIG_SMP
-static int set_processor_mask(u32 id, u32 flags)
+static int set_processor_mask(u32 id, u32 pass)
{
- int nr_cpus;
- int cpu, cpuid = id;
-
- if (!cpu_enumerated)
- nr_cpus = NR_CPUS;
- else
- nr_cpus = nr_cpu_ids;
+ int cpu = -1, cpuid = id;
- if (num_processors >= nr_cpus) {
+ if (num_processors >= NR_CPUS) {
pr_warn(PREFIX "nr_cpus limit of %i reached."
- " processor 0x%x ignored.\n", nr_cpus, cpuid);
+ " processor 0x%x ignored.\n", NR_CPUS, cpuid);
return -ENODEV;
}
+
if (cpuid == loongson_sysconf.boot_cpu_id)
cpu = 0;
- else
- cpu = find_first_zero_bit(cpumask_bits(cpu_present_mask), NR_CPUS);
-
- if (!cpu_enumerated)
- set_cpu_possible(cpu, true);
- if (flags & ACPI_MADT_ENABLED) {
+ switch (pass) {
+ case 1: /* Pass 1 handle enabled processors */
+ if (cpu < 0)
+ cpu = find_first_zero_bit(cpumask_bits(cpu_present_mask), NR_CPUS);
num_processors++;
set_cpu_present(cpu, true);
- __cpu_number_map[cpuid] = cpu;
- __cpu_logical_map[cpu] = cpuid;
- } else
+ break;
+ case 2: /* Pass 2 handle disabled processors */
+ if (cpu < 0)
+ cpu = find_first_zero_bit(cpumask_bits(cpu_possible_mask), NR_CPUS);
disabled_cpus++;
+ break;
+ default:
+ return cpu;
+ }
+
+ set_cpu_possible(cpu, true);
+ __cpu_number_map[cpuid] = cpu;
+ __cpu_logical_map[cpu] = cpuid;
return cpu;
}
#endif
static int __init
-acpi_parse_processor(union acpi_subtable_headers *header, const unsigned long end)
+acpi_parse_p1_processor(union acpi_subtable_headers *header, const unsigned long end)
{
struct acpi_madt_core_pic *processor = NULL;
@@ -110,13 +110,30 @@ acpi_parse_processor(union acpi_subtable_headers *header, const unsigned long en
acpi_table_print_madt_entry(&header->common);
#ifdef CONFIG_SMP
acpi_core_pic[processor->core_id] = *processor;
- set_processor_mask(processor->core_id, processor->flags);
+ if (processor->flags & ACPI_MADT_ENABLED)
+ set_processor_mask(processor->core_id, 1);
#endif
return 0;
}
static int __init
+acpi_parse_p2_processor(union acpi_subtable_headers *header, const unsigned long end)
+{
+ struct acpi_madt_core_pic *processor = NULL;
+
+ processor = (struct acpi_madt_core_pic *)header;
+ if (BAD_MADT_ENTRY(processor, end))
+ return -EINVAL;
+
+#ifdef CONFIG_SMP
+ if (!(processor->flags & ACPI_MADT_ENABLED))
+ set_processor_mask(processor->core_id, 2);
+#endif
+
+ return 0;
+}
+static int __init
acpi_parse_eio_master(union acpi_subtable_headers *header, const unsigned long end)
{
static int core = 0;
@@ -143,12 +160,14 @@ static void __init acpi_process_madt(void)
}
#endif
acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC,
- acpi_parse_processor, MAX_CORE_PIC);
+ acpi_parse_p1_processor, MAX_CORE_PIC);
+
+ acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC,
+ acpi_parse_p2_processor, MAX_CORE_PIC);
acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC,
acpi_parse_eio_master, MAX_IO_PICS);
- cpu_enumerated = 1;
loongson_sysconf.nr_cpus = num_processors;
}
@@ -310,6 +329,10 @@ static int __ref acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
int nid;
nid = acpi_get_node(handle);
+
+ if (nid != NUMA_NO_NODE)
+ nid = early_cpu_to_node(cpu);
+
if (nid != NUMA_NO_NODE) {
set_cpuid_to_node(physid, nid);
node_set(nid, numa_nodes_parsed);
@@ -324,12 +347,14 @@ int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu
{
int cpu;
- cpu = set_processor_mask(physid, ACPI_MADT_ENABLED);
- if (cpu < 0) {
+ cpu = cpu_number_map(physid);
+ if (cpu < 0 || cpu >= nr_cpu_ids) {
pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
- return cpu;
+ return -ERANGE;
}
+ num_processors++;
+ set_cpu_present(cpu, true);
acpi_map_cpu2node(handle, cpu, physid);
*pcpu = cpu;
diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c
index a5fc61f8b348..e5a39bbad078 100644
--- a/arch/loongarch/kernel/paravirt.c
+++ b/arch/loongarch/kernel/paravirt.c
@@ -51,11 +51,18 @@ static u64 paravt_steal_clock(int cpu)
}
#ifdef CONFIG_SMP
+static struct smp_ops native_ops;
+
static void pv_send_ipi_single(int cpu, unsigned int action)
{
int min, old;
irq_cpustat_t *info = &per_cpu(irq_stat, cpu);
+ if (unlikely(action == ACTION_BOOT_CPU)) {
+ native_ops.send_ipi_single(cpu, action);
+ return;
+ }
+
old = atomic_fetch_or(BIT(action), &info->message);
if (old)
return;
@@ -75,6 +82,11 @@ static void pv_send_ipi_mask(const struct cpumask *mask, unsigned int action)
if (cpumask_empty(mask))
return;
+ if (unlikely(action == ACTION_BOOT_CPU)) {
+ native_ops.send_ipi_mask(mask, action);
+ return;
+ }
+
action = BIT(action);
for_each_cpu(i, mask) {
info = &per_cpu(irq_stat, i);
@@ -147,6 +159,8 @@ static void pv_init_ipi(void)
{
int r, swi;
+ /* Init native ipi irq for ACTION_BOOT_CPU */
+ native_ops.init_ipi();
swi = get_percpu_irq(INT_SWI0);
if (swi < 0)
panic("SWI0 IRQ mapping failed\n");
@@ -193,6 +207,7 @@ int __init pv_ipi_init(void)
return 0;
#ifdef CONFIG_SMP
+ native_ops = mp_ops;
mp_ops.init_ipi = pv_init_ipi;
mp_ops.send_ipi_single = pv_send_ipi_single;
mp_ops.send_ipi_mask = pv_send_ipi_mask;
diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c
index f2ff8b5d591e..6e58f65455c7 100644
--- a/arch/loongarch/kernel/process.c
+++ b/arch/loongarch/kernel/process.c
@@ -293,13 +293,15 @@ unsigned long stack_top(void)
{
unsigned long top = TASK_SIZE & PAGE_MASK;
- /* Space for the VDSO & data page */
- top -= PAGE_ALIGN(current->thread.vdso->size);
- top -= VVAR_SIZE;
-
- /* Space to randomize the VDSO base */
- if (current->flags & PF_RANDOMIZE)
- top -= VDSO_RANDOMIZE_SIZE;
+ if (current->thread.vdso) {
+ /* Space for the VDSO & data page */
+ top -= PAGE_ALIGN(current->thread.vdso->size);
+ top -= VVAR_SIZE;
+
+ /* Space to randomize the VDSO base */
+ if (current->flags & PF_RANDOMIZE)
+ top -= VDSO_RANDOMIZE_SIZE;
+ }
return top;
}
diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
index 00e307203ddb..cbd3c09a93c1 100644
--- a/arch/loongarch/kernel/setup.c
+++ b/arch/loongarch/kernel/setup.c
@@ -55,6 +55,7 @@
#define SMBIOS_FREQHIGH_OFFSET 0x17
#define SMBIOS_FREQLOW_MASK 0xFF
#define SMBIOS_CORE_PACKAGE_OFFSET 0x23
+#define SMBIOS_THREAD_PACKAGE_OFFSET 0x25
#define LOONGSON_EFI_ENABLE (1 << 3)
unsigned long fw_arg0, fw_arg1, fw_arg2;
@@ -125,7 +126,7 @@ static void __init parse_cpu_table(const struct dmi_header *dm)
cpu_clock_freq = freq_temp * 1000000;
loongson_sysconf.cpuname = (void *)dmi_string_parse(dm, dmi_data[16]);
- loongson_sysconf.cores_per_package = *(dmi_data + SMBIOS_CORE_PACKAGE_OFFSET);
+ loongson_sysconf.cores_per_package = *(dmi_data + SMBIOS_THREAD_PACKAGE_OFFSET);
pr_info("CpuClock = %llu\n", cpu_clock_freq);
}
diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c
index 9afc2d8b3414..5d59e9ce2772 100644
--- a/arch/loongarch/kernel/smp.c
+++ b/arch/loongarch/kernel/smp.c
@@ -302,7 +302,7 @@ static void __init fdt_smp_setup(void)
__cpu_number_map[cpuid] = cpu;
__cpu_logical_map[cpu] = cpuid;
- early_numa_add_cpu(cpu, 0);
+ early_numa_add_cpu(cpuid, 0);
set_cpuid_to_node(cpuid, 0);
}
@@ -331,11 +331,11 @@ void __init loongson_prepare_cpus(unsigned int max_cpus)
int i = 0;
parse_acpi_topology();
+ cpu_data[0].global_id = cpu_logical_map(0);
for (i = 0; i < loongson_sysconf.nr_cpus; i++) {
set_cpu_present(i, true);
csr_mail_send(0, __cpu_logical_map[i], 0);
- cpu_data[i].global_id = __cpu_logical_map[i];
}
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
@@ -380,6 +380,7 @@ void loongson_init_secondary(void)
cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
cpu_data[cpu].core = pptt_enabled ? cpu_data[cpu].core :
cpu_logical_map(cpu) % loongson_sysconf.cores_per_package;
+ cpu_data[cpu].global_id = cpu_logical_map(cpu);
}
void loongson_smp_finish(void)
diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c
index f9f4eb00c92e..c57b4134f3e8 100644
--- a/arch/loongarch/kernel/traps.c
+++ b/arch/loongarch/kernel/traps.c
@@ -555,6 +555,9 @@ asmlinkage void noinstr do_ale(struct pt_regs *regs)
#else
unsigned int *pc;
+ if (regs->csr_prmd & CSR_PRMD_PIE)
+ local_irq_enable();
+
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr);
/*
@@ -579,6 +582,8 @@ sigbus:
die_if_kernel("Kernel ale access", regs);
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
out:
+ if (regs->csr_prmd & CSR_PRMD_PIE)
+ local_irq_disable();
#endif
irqentry_exit(regs, state);
}
diff --git a/arch/loongarch/kernel/vdso.c b/arch/loongarch/kernel/vdso.c
index f6fcc52aefae..2c0d852ca536 100644
--- a/arch/loongarch/kernel/vdso.c
+++ b/arch/loongarch/kernel/vdso.c
@@ -34,7 +34,6 @@ static union {
struct loongarch_vdso_data vdata;
} loongarch_vdso_data __page_aligned_data;
-static struct page *vdso_pages[] = { NULL };
struct vdso_data *vdso_data = generic_vdso_data.data;
struct vdso_pcpu_data *vdso_pdata = loongarch_vdso_data.vdata.pdata;
struct vdso_rng_data *vdso_rng_data = &loongarch_vdso_data.vdata.rng_data;
@@ -85,10 +84,8 @@ static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
struct loongarch_vdso_info vdso_info = {
.vdso = vdso_start,
- .size = PAGE_SIZE,
.code_mapping = {
.name = "[vdso]",
- .pages = vdso_pages,
.mremap = vdso_mremap,
},
.data_mapping = {
@@ -103,11 +100,14 @@ static int __init init_vdso(void)
unsigned long i, cpu, pfn;
BUG_ON(!PAGE_ALIGNED(vdso_info.vdso));
- BUG_ON(!PAGE_ALIGNED(vdso_info.size));
for_each_possible_cpu(cpu)
vdso_pdata[cpu].node = cpu_to_node(cpu);
+ vdso_info.size = PAGE_ALIGN(vdso_end - vdso_start);
+ vdso_info.code_mapping.pages =
+ kcalloc(vdso_info.size / PAGE_SIZE, sizeof(struct page *), GFP_KERNEL);
+
pfn = __phys_to_pfn(__pa_symbol(vdso_info.vdso));
for (i = 0; i < vdso_info.size / PAGE_SIZE; i++)
vdso_info.code_mapping.pages[i] = pfn_to_page(pfn + i);