aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig14
-rw-r--r--lib/Kconfig.debug24
-rw-r--r--lib/Makefile2
-rw-r--r--lib/asn1_encoder.c2
-rw-r--r--lib/buildid.c74
-rw-r--r--lib/devres.c2
-rw-r--r--lib/dump_stack.c13
-rw-r--r--lib/dynamic_debug.c2
-rw-r--r--lib/fonts/font_pearl_8x8.c2
-rw-r--r--lib/kfifo.c2
-rw-r--r--lib/list_sort.c2
-rw-r--r--lib/logic_iomem.c318
-rw-r--r--lib/nlattr.c4
-rw-r--r--lib/oid_registry.c2
-rw-r--r--lib/pldmfw/pldmfw.c2
-rw-r--r--lib/reed_solomon/test_rslib.c2
-rw-r--r--lib/refcount.c2
-rw-r--r--lib/rhashtable.c2
-rw-r--r--lib/sbitmap.c2
-rw-r--r--lib/scatterlist.c4
-rw-r--r--lib/seq_buf.c2
-rw-r--r--lib/sort.c2
-rw-r--r--lib/stackdepot.c2
-rw-r--r--lib/test_bitops.c2
-rw-r--r--lib/test_bpf.c2
-rw-r--r--lib/test_kasan.c2
-rw-r--r--lib/test_kmod.c6
-rw-r--r--lib/test_scanf.c2
-rw-r--r--lib/vsprintf.c10
29 files changed, 453 insertions, 54 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index ac3b30697b2b..d241fe476fda 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -102,6 +102,20 @@ config INDIRECT_PIO
When in doubt, say N.
+config INDIRECT_IOMEM
+ bool
+ help
+ This is selected by other options/architectures to provide the
+ emulated iomem accessors.
+
+config INDIRECT_IOMEM_FALLBACK
+ bool
+ depends on INDIRECT_IOMEM
+ help
+ If INDIRECT_IOMEM is selected, this enables falling back to plain
+ mmio accesses when the IO memory address is not a registered
+ emulated region.
+
config CRC_CCITT
tristate "CRC-CCITT functions"
help
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 8acc01d7d816..831212722924 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -35,6 +35,17 @@ config PRINTK_CALLER
no option to enable/disable at the kernel command line parameter or
sysfs interface.
+config STACKTRACE_BUILD_ID
+ bool "Show build ID information in stacktraces"
+ depends on PRINTK
+ help
+ Selecting this option adds build ID information for symbols in
+ stacktraces printed with the printk format '%p[SR]b'.
+
+ This option is intended for distros where debuginfo is not easily
+ accessible but can be downloaded given the build ID of the vmlinux or
+ kernel module where the function is located.
+
config CONSOLE_LOGLEVEL_DEFAULT
int "Default console loglevel (1-15)"
range 1 15
@@ -313,9 +324,6 @@ config DEBUG_INFO_BTF
config PAHOLE_HAS_SPLIT_BTF
def_bool $(success, test `$(PAHOLE) --version | sed -E 's/v([0-9]+)\.([0-9]+)/\1\2/'` -ge "119")
-config PAHOLE_HAS_ZEROSIZE_PERCPU_SUPPORT
- def_bool $(success, test `$(PAHOLE) --version | sed -E 's/v([0-9]+)\.([0-9]+)/\1\2/'` -ge "122")
-
config DEBUG_INFO_BTF_MODULES
def_bool y
depends on DEBUG_INFO_BTF && MODULES && PAHOLE_HAS_SPLIT_BTF
@@ -403,8 +411,8 @@ config SECTION_MISMATCH_WARN_ONLY
If unsure, say Y.
-config DEBUG_FORCE_FUNCTION_ALIGN_32B
- bool "Force all function address 32B aligned" if EXPERT
+config DEBUG_FORCE_FUNCTION_ALIGN_64B
+ bool "Force all function address 64B aligned" if EXPERT
help
There are cases that a commit from one domain changes the function
address alignment of other domains, and cause magic performance
@@ -1282,7 +1290,7 @@ config PROVE_RAW_LOCK_NESTING
option expect lockdep splats until these problems have been fully
addressed which is work in progress. This config switch allows to
identify and analyze these problems. It will be removed and the
- check permanentely enabled once the main issues have been fixed.
+ check permanently enabled once the main issues have been fixed.
If unsure, select N.
@@ -1448,7 +1456,7 @@ config DEBUG_LOCKING_API_SELFTESTS
Say Y here if you want the kernel to run a short self-test during
bootup. The self-test checks whether common types of locking bugs
are detected by debugging mechanisms or not. (if you disable
- lock debugging then those bugs wont be detected of course.)
+ lock debugging then those bugs won't be detected of course.)
The following locking APIs are covered: spinlocks, rwlocks,
mutexes and rwsems.
@@ -1928,7 +1936,7 @@ config FAIL_IO_TIMEOUT
thus exercising the error handling.
Only works with drivers that use the generic timeout handling,
- for others it wont do anything.
+ for others it won't do anything.
config FAIL_FUTEX
bool "Fault-injection capability for futexes"
diff --git a/lib/Makefile b/lib/Makefile
index 6d765d5fb8ac..5efd1b435a37 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -148,6 +148,8 @@ obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
lib-y += logic_pio.o
+lib-$(CONFIG_INDIRECT_IOMEM) += logic_iomem.o
+
obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
obj-$(CONFIG_BTREE) += btree.o
diff --git a/lib/asn1_encoder.c b/lib/asn1_encoder.c
index 41e71aae3ef6..27bbe891714f 100644
--- a/lib/asn1_encoder.c
+++ b/lib/asn1_encoder.c
@@ -181,7 +181,7 @@ EXPORT_SYMBOL_GPL(asn1_encode_oid);
/**
* asn1_encode_length() - encode a length to follow an ASN.1 tag
* @data: pointer to encode at
- * @data_len: pointer to remaning length (adjusted by routine)
+ * @data_len: pointer to remaining length (adjusted by routine)
* @len: length to encode
*
* This routine can encode lengths up to 65535 using the ASN.1 rules.
diff --git a/lib/buildid.c b/lib/buildid.c
index 6156997c3895..dfc62625cae4 100644
--- a/lib/buildid.c
+++ b/lib/buildid.c
@@ -1,36 +1,31 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/buildid.h>
+#include <linux/cache.h>
#include <linux/elf.h>
+#include <linux/kernel.h>
#include <linux/pagemap.h>
#define BUILD_ID 3
+
/*
* Parse build id from the note segment. This logic can be shared between
* 32-bit and 64-bit system, because Elf32_Nhdr and Elf64_Nhdr are
* identical.
*/
-static inline int parse_build_id(void *page_addr,
- unsigned char *build_id,
- __u32 *size,
- void *note_start,
- Elf32_Word note_size)
+static int parse_build_id_buf(unsigned char *build_id,
+ __u32 *size,
+ const void *note_start,
+ Elf32_Word note_size)
{
Elf32_Word note_offs = 0, new_offs;
- /* check for overflow */
- if (note_start < page_addr || note_start + note_size < note_start)
- return -EINVAL;
-
- /* only supports note that fits in the first page */
- if (note_start + note_size > page_addr + PAGE_SIZE)
- return -EINVAL;
-
while (note_offs + sizeof(Elf32_Nhdr) < note_size) {
Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_offs);
if (nhdr->n_type == BUILD_ID &&
nhdr->n_namesz == sizeof("GNU") &&
+ !strcmp((char *)(nhdr + 1), "GNU") &&
nhdr->n_descsz > 0 &&
nhdr->n_descsz <= BUILD_ID_SIZE_MAX) {
memcpy(build_id,
@@ -49,11 +44,29 @@ static inline int parse_build_id(void *page_addr,
break;
note_offs = new_offs;
}
+
return -EINVAL;
}
+static inline int parse_build_id(const void *page_addr,
+ unsigned char *build_id,
+ __u32 *size,
+ const void *note_start,
+ Elf32_Word note_size)
+{
+ /* check for overflow */
+ if (note_start < page_addr || note_start + note_size < note_start)
+ return -EINVAL;
+
+ /* only supports note that fits in the first page */
+ if (note_start + note_size > page_addr + PAGE_SIZE)
+ return -EINVAL;
+
+ return parse_build_id_buf(build_id, size, note_start, note_size);
+}
+
/* Parse build ID from 32-bit ELF */
-static int get_build_id_32(void *page_addr, unsigned char *build_id,
+static int get_build_id_32(const void *page_addr, unsigned char *build_id,
__u32 *size)
{
Elf32_Ehdr *ehdr = (Elf32_Ehdr *)page_addr;
@@ -78,7 +91,7 @@ static int get_build_id_32(void *page_addr, unsigned char *build_id,
}
/* Parse build ID from 64-bit ELF */
-static int get_build_id_64(void *page_addr, unsigned char *build_id,
+static int get_build_id_64(const void *page_addr, unsigned char *build_id,
__u32 *size)
{
Elf64_Ehdr *ehdr = (Elf64_Ehdr *)page_addr;
@@ -108,7 +121,7 @@ static int get_build_id_64(void *page_addr, unsigned char *build_id,
* @build_id: buffer to store build id, at least BUILD_ID_SIZE long
* @size: returns actual build id size in case of success
*
- * Returns 0 on success, otherwise error (< 0).
+ * Return: 0 on success, -EINVAL otherwise
*/
int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
__u32 *size)
@@ -147,3 +160,32 @@ out:
put_page(page);
return ret;
}
+
+/**
+ * build_id_parse_buf - Get build ID from a buffer
+ * @buf: Elf note section(s) to parse
+ * @buf_size: Size of @buf in bytes
+ * @build_id: Build ID parsed from @buf, at least BUILD_ID_SIZE_MAX long
+ *
+ * Return: 0 on success, -EINVAL otherwise
+ */
+int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size)
+{
+ return parse_build_id_buf(build_id, NULL, buf, buf_size);
+}
+
+#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_CRASH_CORE)
+unsigned char vmlinux_build_id[BUILD_ID_SIZE_MAX] __ro_after_init;
+
+/**
+ * init_vmlinux_build_id - Compute and stash the running kernel's build ID
+ */
+void __init init_vmlinux_build_id(void)
+{
+ extern const void __start_notes __weak;
+ extern const void __stop_notes __weak;
+ unsigned int size = &__stop_notes - &__start_notes;
+
+ build_id_parse_buf(&__start_notes, vmlinux_build_id, size);
+}
+#endif
diff --git a/lib/devres.c b/lib/devres.c
index bdb06898a977..b0e1c6702c71 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -355,7 +355,7 @@ static void pcim_iomap_release(struct device *gendev, void *res)
* detach.
*
* This function might sleep when the table is first allocated but can
- * be safely called without context and guaranteed to succed once
+ * be safely called without context and guaranteed to succeed once
* allocated.
*/
void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index 27f16872320d..cd3387bb34e5 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -5,6 +5,7 @@
*/
#include <linux/kernel.h>
+#include <linux/buildid.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
@@ -36,6 +37,14 @@ void __init dump_stack_set_arch_desc(const char *fmt, ...)
va_end(args);
}
+#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID)
+#define BUILD_ID_FMT " %20phN"
+#define BUILD_ID_VAL vmlinux_build_id
+#else
+#define BUILD_ID_FMT "%s"
+#define BUILD_ID_VAL ""
+#endif
+
/**
* dump_stack_print_info - print generic debug info for dump_stack()
* @log_lvl: log level
@@ -45,13 +54,13 @@ void __init dump_stack_set_arch_desc(const char *fmt, ...)
*/
void dump_stack_print_info(const char *log_lvl)
{
- printk("%sCPU: %d PID: %d Comm: %.20s %s%s %s %.*s\n",
+ printk("%sCPU: %d PID: %d Comm: %.20s %s%s %s %.*s" BUILD_ID_FMT "\n",
log_lvl, raw_smp_processor_id(), current->pid, current->comm,
kexec_crash_loaded() ? "Kdump: loaded " : "",
print_tainted(),
init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
- init_utsname()->version);
+ init_utsname()->version, BUILD_ID_VAL);
if (dump_stack_arch_desc_str[0] != '\0')
printk("%sHardware name: %s\n",
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index d3ce78298832..cb5abb42c16a 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -991,7 +991,7 @@ static int ddebug_dyndbg_param_cb(char *param, char *val,
ddebug_exec_queries((val ? val : "+p"), modname);
- return 0; /* query failure shouldnt stop module load */
+ return 0; /* query failure shouldn't stop module load */
}
/* handle both dyndbg and $module.dyndbg params at boot */
diff --git a/lib/fonts/font_pearl_8x8.c b/lib/fonts/font_pearl_8x8.c
index b1678ed0017c..ae98ca17982e 100644
--- a/lib/fonts/font_pearl_8x8.c
+++ b/lib/fonts/font_pearl_8x8.c
@@ -3,7 +3,7 @@
/* */
/* Font file generated by cpi2fnt */
/* ------------------------------ */
-/* Combined with the alpha-numeric */
+/* Combined with the alphanumeric */
/* portion of Greg Harp's old PEARL */
/* font (from earlier versions of */
/* linux-m86k) by John Shifflett */
diff --git a/lib/kfifo.c b/lib/kfifo.c
index 70dab9ac7827..12f5a347aa13 100644
--- a/lib/kfifo.c
+++ b/lib/kfifo.c
@@ -415,7 +415,7 @@ static unsigned int __kfifo_peek_n(struct __kfifo *fifo, size_t recsize)
)
/*
- * __kfifo_poke_n internal helper function for storeing the length of
+ * __kfifo_poke_n internal helper function for storing the length of
* the record into the fifo
*/
static void __kfifo_poke_n(struct __kfifo *fifo, unsigned int n, size_t recsize)
diff --git a/lib/list_sort.c b/lib/list_sort.c
index 1e1e37762799..0fb59e92ca2d 100644
--- a/lib/list_sort.c
+++ b/lib/list_sort.c
@@ -104,7 +104,7 @@ static void merge_final(void *priv, list_cmp_func_t cmp, struct list_head *head,
* @head: the list to sort
* @cmp: the elements comparison function
*
- * The comparison funtion @cmp must return > 0 if @a should sort after
+ * The comparison function @cmp must return > 0 if @a should sort after
* @b ("@a > @b" if you want an ascending sort), and <= 0 if @a should
* sort before @b *or* their original order should be preserved. It is
* always called with the element that came first in the input in @a,
diff --git a/lib/logic_iomem.c b/lib/logic_iomem.c
new file mode 100644
index 000000000000..b76b92dd0f1f
--- /dev/null
+++ b/lib/logic_iomem.c
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Intel Corporation
+ * Author: Johannes Berg <johannes@sipsolutions.net>
+ */
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/logic_iomem.h>
+
+struct logic_iomem_region {
+ const struct resource *res;
+ const struct logic_iomem_region_ops *ops;
+ struct list_head list;
+};
+
+struct logic_iomem_area {
+ const struct logic_iomem_ops *ops;
+ void *priv;
+};
+
+#define AREA_SHIFT 24
+#define MAX_AREA_SIZE (1 << AREA_SHIFT)
+#define MAX_AREAS ((1ULL<<32) / MAX_AREA_SIZE)
+#define AREA_BITS ((MAX_AREAS - 1) << AREA_SHIFT)
+#define AREA_MASK (MAX_AREA_SIZE - 1)
+#ifdef CONFIG_64BIT
+#define IOREMAP_BIAS 0xDEAD000000000000UL
+#define IOREMAP_MASK 0xFFFFFFFF00000000UL
+#else
+#define IOREMAP_BIAS 0
+#define IOREMAP_MASK 0
+#endif
+
+static DEFINE_MUTEX(regions_mtx);
+static LIST_HEAD(regions_list);
+static struct logic_iomem_area mapped_areas[MAX_AREAS];
+
+int logic_iomem_add_region(struct resource *resource,
+ const struct logic_iomem_region_ops *ops)
+{
+ struct logic_iomem_region *rreg;
+ int err;
+
+ if (WARN_ON(!resource || !ops))
+ return -EINVAL;
+
+ if (WARN_ON((resource->flags & IORESOURCE_TYPE_BITS) != IORESOURCE_MEM))
+ return -EINVAL;
+
+ rreg = kzalloc(sizeof(*rreg), GFP_KERNEL);
+ if (!rreg)
+ return -ENOMEM;
+
+ err = request_resource(&iomem_resource, resource);
+ if (err) {
+ kfree(rreg);
+ return -ENOMEM;
+ }
+
+ mutex_lock(&regions_mtx);
+ rreg->res = resource;
+ rreg->ops = ops;
+ list_add_tail(&rreg->list, &regions_list);
+ mutex_unlock(&regions_mtx);
+
+ return 0;
+}
+EXPORT_SYMBOL(logic_iomem_add_region);
+
+#ifndef CONFIG_LOGIC_IOMEM_FALLBACK
+static void __iomem *real_ioremap(phys_addr_t offset, size_t size)
+{
+ WARN(1, "invalid ioremap(0x%llx, 0x%zx)\n",
+ (unsigned long long)offset, size);
+ return NULL;
+}
+
+static void real_iounmap(void __iomem *addr)
+{
+ WARN(1, "invalid iounmap for addr 0x%llx\n",
+ (unsigned long long)addr);
+}
+#endif /* CONFIG_LOGIC_IOMEM_FALLBACK */
+
+void __iomem *ioremap(phys_addr_t offset, size_t size)
+{
+ void __iomem *ret = NULL;
+ struct logic_iomem_region *rreg, *found = NULL;
+ int i;
+
+ mutex_lock(&regions_mtx);
+ list_for_each_entry(rreg, &regions_list, list) {
+ if (rreg->res->start > offset)
+ continue;
+ if (rreg->res->end < offset + size - 1)
+ continue;
+ found = rreg;
+ break;
+ }
+
+ if (!found)
+ goto out;
+
+ for (i = 0; i < MAX_AREAS; i++) {
+ long offs;
+
+ if (mapped_areas[i].ops)
+ continue;
+
+ offs = rreg->ops->map(offset - found->res->start,
+ size, &mapped_areas[i].ops,
+ &mapped_areas[i].priv);
+ if (offs < 0) {
+ mapped_areas[i].ops = NULL;
+ break;
+ }
+
+ if (WARN_ON(!mapped_areas[i].ops)) {
+ mapped_areas[i].ops = NULL;
+ break;
+ }
+
+ ret = (void __iomem *)(IOREMAP_BIAS + (i << AREA_SHIFT) + offs);
+ break;
+ }
+out:
+ mutex_unlock(&regions_mtx);
+ if (ret)
+ return ret;
+ return real_ioremap(offset, size);
+}
+EXPORT_SYMBOL(ioremap);
+
+static inline struct logic_iomem_area *
+get_area(const volatile void __iomem *addr)
+{
+ unsigned long a = (unsigned long)addr;
+ unsigned int idx;
+
+ if (WARN_ON((a & IOREMAP_MASK) != IOREMAP_BIAS))
+ return NULL;
+
+ idx = (a & AREA_BITS) >> AREA_SHIFT;
+
+ if (mapped_areas[idx].ops)
+ return &mapped_areas[idx];
+
+ return NULL;
+}
+
+void iounmap(void __iomem *addr)
+{
+ struct logic_iomem_area *area = get_area(addr);
+
+ if (!area) {
+ real_iounmap(addr);
+ return;
+ }
+
+ if (area->ops->unmap)
+ area->ops->unmap(area->priv);
+
+ mutex_lock(&regions_mtx);
+ area->ops = NULL;
+ area->priv = NULL;
+ mutex_unlock(&regions_mtx);
+}
+EXPORT_SYMBOL(iounmap);
+
+#ifndef CONFIG_LOGIC_IOMEM_FALLBACK
+#define MAKE_FALLBACK(op, sz) \
+static u##sz real_raw_read ## op(const volatile void __iomem *addr) \
+{ \
+ WARN(1, "Invalid read" #op " at address %llx\n", \
+ (unsigned long long)addr); \
+ return (u ## sz)~0ULL; \
+} \
+ \
+void real_raw_write ## op(u ## sz val, volatile void __iomem *addr) \
+{ \
+ WARN(1, "Invalid writeq" #op " of 0x%llx at address %llx\n", \
+ (unsigned long long)val, (unsigned long long)addr); \
+} \
+
+MAKE_FALLBACK(b, 8);
+MAKE_FALLBACK(w, 16);
+MAKE_FALLBACK(l, 32);
+#ifdef CONFIG_64BIT
+MAKE_FALLBACK(q, 64);
+#endif
+
+static void real_memset_io(volatile void __iomem *addr, int value, size_t size)
+{
+ WARN(1, "Invalid memset_io at address 0x%llx\n",
+ (unsigned long long)addr);
+}
+
+static void real_memcpy_fromio(void *buffer, const volatile void __iomem *addr,
+ size_t size)
+{
+ WARN(1, "Invalid memcpy_fromio at address 0x%llx\n",
+ (unsigned long long)addr);
+
+ memset(buffer, 0xff, size);
+}
+
+static void real_memcpy_toio(volatile void __iomem *addr, const void *buffer,
+ size_t size)
+{
+ WARN(1, "Invalid memcpy_toio at address 0x%llx\n",
+ (unsigned long long)addr);
+}
+#endif /* CONFIG_LOGIC_IOMEM_FALLBACK */
+
+#define MAKE_OP(op, sz) \
+u##sz __raw_read ## op(const volatile void __iomem *addr) \
+{ \
+ struct logic_iomem_area *area = get_area(addr); \
+ \
+ if (!area) \
+ return real_raw_read ## op(addr); \
+ \
+ return (u ## sz) area->ops->read(area->priv, \
+ (unsigned long)addr & AREA_MASK,\
+ sz / 8); \
+} \
+EXPORT_SYMBOL(__raw_read ## op); \
+ \
+void __raw_write ## op(u ## sz val, volatile void __iomem *addr) \
+{ \
+ struct logic_iomem_area *area = get_area(addr); \
+ \
+ if (!area) { \
+ real_raw_write ## op(val, addr); \
+ return; \
+ } \
+ \
+ area->ops->write(area->priv, \
+ (unsigned long)addr & AREA_MASK, \
+ sz / 8, val); \
+} \
+EXPORT_SYMBOL(__raw_write ## op)
+
+MAKE_OP(b, 8);
+MAKE_OP(w, 16);
+MAKE_OP(l, 32);
+#ifdef CONFIG_64BIT
+MAKE_OP(q, 64);
+#endif
+
+void memset_io(volatile void __iomem *addr, int value, size_t size)
+{
+ struct logic_iomem_area *area = get_area(addr);
+ unsigned long offs, start;
+
+ if (!area) {
+ real_memset_io(addr, value, size);
+ return;
+ }
+
+ start = (unsigned long)addr & AREA_MASK;
+
+ if (area->ops->set) {
+ area->ops->set(area->priv, start, value, size);
+ return;
+ }
+
+ for (offs = 0; offs < size; offs++)
+ area->ops->write(area->priv, start + offs, 1, value);
+}
+EXPORT_SYMBOL(memset_io);
+
+void memcpy_fromio(void *buffer, const volatile void __iomem *addr,
+ size_t size)
+{
+ struct logic_iomem_area *area = get_area(addr);
+ u8 *buf = buffer;
+ unsigned long offs, start;
+
+ if (!area) {
+ real_memcpy_fromio(buffer, addr, size);
+ return;
+ }
+
+ start = (unsigned long)addr & AREA_MASK;
+
+ if (area->ops->copy_from) {
+ area->ops->copy_from(area->priv, buffer, start, size);
+ return;
+ }
+
+ for (offs = 0; offs < size; offs++)
+ buf[offs] = area->ops->read(area->priv, start + offs, 1);
+}
+EXPORT_SYMBOL(memcpy_fromio);
+
+void memcpy_toio(volatile void __iomem *addr, const void *buffer, size_t size)
+{
+ struct logic_iomem_area *area = get_area(addr);
+ const u8 *buf = buffer;
+ unsigned long offs, start;
+
+ if (!area) {
+ real_memcpy_toio(addr, buffer, size);
+ return;
+ }
+
+ start = (unsigned long)addr & AREA_MASK;
+
+ if (area->ops->copy_to) {
+ area->ops->copy_to(area->priv, start, buffer, size);
+ return;
+ }
+
+ for (offs = 0; offs < size; offs++)
+ area->ops->write(area->priv, start + offs, 1, buf[offs]);
+}
+EXPORT_SYMBOL(memcpy_toio);
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 1d051ef66afe..86029ad5ead4 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -619,7 +619,7 @@ static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype,
* Validates all attributes in the specified attribute stream against the
* specified policy. Validation depends on the validate flags passed, see
* &enum netlink_validation for more details on that.
- * See documenation of struct nla_policy for more details.
+ * See documentation of struct nla_policy for more details.
*
* Returns 0 on success or a negative error code.
*/
@@ -633,7 +633,7 @@ int __nla_validate(const struct nlattr *head, int len, int maxtype,
EXPORT_SYMBOL(__nla_validate);
/**
- * nla_policy_len - Determin the max. length of a policy
+ * nla_policy_len - Determine the max. length of a policy
* @policy: policy to use
* @n: number of policies
*
diff --git a/lib/oid_registry.c b/lib/oid_registry.c
index 3dfaa836e7c5..e592d48b1974 100644
--- a/lib/oid_registry.c
+++ b/lib/oid_registry.c
@@ -124,7 +124,7 @@ EXPORT_SYMBOL_GPL(parse_OID);
* @bufsize: The size of the buffer
*
* The OID is rendered into the buffer in "a.b.c.d" format and the number of
- * bytes is returned. -EBADMSG is returned if the data could not be intepreted
+ * bytes is returned. -EBADMSG is returned if the data could not be interpreted
* and -ENOBUFS if the buffer was too small.
*/
int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize)
diff --git a/lib/pldmfw/pldmfw.c b/lib/pldmfw/pldmfw.c
index e5d4b3b2af81..6e77eb6d8e72 100644
--- a/lib/pldmfw/pldmfw.c
+++ b/lib/pldmfw/pldmfw.c
@@ -82,7 +82,7 @@ pldm_check_fw_space(struct pldmfw_priv *data, size_t offset, size_t length)
* @bytes_to_move: number of bytes to move the offset forward by
*
* Check that there is enough space past the current offset, and then move the
- * offset forward by this ammount.
+ * offset forward by this amount.
*
* Returns: zero on success, or -EFAULT if the image is too small to fit the
* expected length.
diff --git a/lib/reed_solomon/test_rslib.c b/lib/reed_solomon/test_rslib.c
index 4eb29f365ece..d9d1c33aebda 100644
--- a/lib/reed_solomon/test_rslib.c
+++ b/lib/reed_solomon/test_rslib.c
@@ -385,7 +385,7 @@ static void test_bc(struct rs_control *rs, int len, int errs,
/*
* We check that the returned word is actually a
- * codeword. The obious way to do this would be to
+ * codeword. The obvious way to do this would be to
* compute the syndrome, but we don't want to replicate
* that code here. However, all the codes are in
* systematic form, and therefore we can encode the
diff --git a/lib/refcount.c b/lib/refcount.c
index ebac8b7d15a7..a207a8f22b3c 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -164,7 +164,7 @@ EXPORT_SYMBOL(refcount_dec_and_lock);
* @flags: saved IRQ-flags if the is acquired
*
* Same as refcount_dec_and_lock() above except that the spinlock is acquired
- * with disabled interupts.
+ * with disabled interrupts.
*
* Return: true and hold spinlock if able to decrement refcount to 0, false
* otherwise
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index c949c1e3b87c..e12bbfb240b8 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -703,7 +703,7 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
*
* Returns zero if successful.
*
- * Returns -EAGAIN if resize event occured. Note that the iterator
+ * Returns -EAGAIN if resize event occurred. Note that the iterator
* will rewind back to the beginning and you may use it immediately
* by calling rhashtable_walk_next.
*
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 47b3691058eb..b25db9be938a 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -583,7 +583,7 @@ void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
/*
* Once the clear bit is set, the bit may be allocated out.
*
- * Orders READ/WRITE on the asssociated instance(such as request
+ * Orders READ/WRITE on the associated instance(such as request
* of blk_mq) by this bit for avoiding race with re-allocation,
* and its pair is the memory barrier implied in __sbitmap_get_word.
*
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index a59778946404..27efa6178153 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -38,7 +38,7 @@ EXPORT_SYMBOL(sg_next);
* @sg: The scatterlist
*
* Description:
- * Allows to know how many entries are in sg, taking into acount
+ * Allows to know how many entries are in sg, taking into account
* chaining as well
*
**/
@@ -59,7 +59,7 @@ EXPORT_SYMBOL(sg_nents);
*
* Description:
* Determines the number of entries in sg that are required to meet
- * the supplied length, taking into acount chaining as well
+ * the supplied length, taking into account chaining as well
*
* Returns:
* the number of sg entries needed, negative error on failure
diff --git a/lib/seq_buf.c b/lib/seq_buf.c
index 6dafde851333..0a68f7aa85d6 100644
--- a/lib/seq_buf.c
+++ b/lib/seq_buf.c
@@ -289,7 +289,7 @@ int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc)
}
/**
- * seq_buf_to_user - copy the squence buffer to user space
+ * seq_buf_to_user - copy the sequence buffer to user space
* @s: seq_buf descriptor
* @ubuf: The userspace memory location to copy to
* @cnt: The amount to copy
diff --git a/lib/sort.c b/lib/sort.c
index 3ad454411997..aa18153864d2 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -51,7 +51,7 @@ static bool is_aligned(const void *base, size_t size, unsigned char align)
* which basically all CPUs have, to minimize loop overhead computations.
*
* For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the
- * bottom of the loop, even though the zero flag is stil valid from the
+ * bottom of the loop, even though the zero flag is still valid from the
* subtract (since the intervening mov instructions don't alter the flags).
* Gcc 8.1.0 doesn't have that problem.
*/
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index df9179f4f441..0a2e417f83cb 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -11,7 +11,7 @@
* Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
* and free stacks repeat a lot, we save about 100x space.
* Stacks are never removed from depot, so we store them contiguously one after
- * another in a contiguos memory allocation.
+ * another in a contiguous memory allocation.
*
* Author: Alexander Potapenko <glider@google.com>
* Copyright (C) 2016 Google, Inc.
diff --git a/lib/test_bitops.c b/lib/test_bitops.c
index 471141ddd691..3b7bcbee84db 100644
--- a/lib/test_bitops.c
+++ b/lib/test_bitops.c
@@ -15,7 +15,7 @@
* get_count_order/long
*/
-/* use an enum because thats the most common BITMAP usage */
+/* use an enum because that's the most common BITMAP usage */
enum bitops_fun {
BITOPS_4 = 4,
BITOPS_7 = 7,
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 4dc4dcbecd12..d500320778c7 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -1095,7 +1095,7 @@ static struct bpf_test tests[] = {
{
"RET_A",
.u.insns = {
- /* check that unitialized X and A contain zeros */
+ /* check that uninitialized X and A contain zeros */
BPF_STMT(BPF_MISC | BPF_TXA, 0),
BPF_STMT(BPF_RET | BPF_A, 0)
},
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index f5f54766cbc2..8f7b0b2f6e11 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -651,7 +651,7 @@ static void kasan_global_oob(struct kunit *test)
{
/*
* Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
- * from failing here and panicing the kernel, access the array via a
+ * from failing here and panicking the kernel, access the array via a
* volatile pointer, which will prevent the compiler from being able to
* determine the array bounds.
*
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index 38c250fbace3..ce1589391413 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -286,7 +286,7 @@ static int tally_work_test(struct kmod_test_device_info *info)
* If this ran it means *all* tasks were created fine and we
* are now just collecting results.
*
- * Only propagate errors, do not override with a subsequent sucess case.
+ * Only propagate errors, do not override with a subsequent success case.
*/
static void tally_up_work(struct kmod_test_device *test_dev)
{
@@ -543,7 +543,7 @@ static int trigger_config_run(struct kmod_test_device *test_dev)
* wrong with the setup of the test. If the test setup went fine
* then userspace must just check the result of config->test_result.
* One issue with relying on the return from a call in the kernel
- * is if the kernel returns a possitive value using this trigger
+ * is if the kernel returns a positive value using this trigger
* will not return the value to userspace, it would be lost.
*
* By not relying on capturing the return value of tests we are using
@@ -585,7 +585,7 @@ trigger_config_store(struct device *dev,
* Note: any return > 0 will be treated as success
* and the error value will not be available to userspace.
* Do not rely on trying to send to userspace a test value
- * return value as possitive return errors will be lost.
+ * return value as positive return errors will be lost.
*/
if (WARN_ON(ret > 0))
return -EINVAL;
diff --git a/lib/test_scanf.c b/lib/test_scanf.c
index 48ff5747a4da..84fe09eaf55e 100644
--- a/lib/test_scanf.c
+++ b/lib/test_scanf.c
@@ -600,7 +600,7 @@ static void __init numbers_prefix_overflow(void)
/*
* 0x prefix in a field of width 2 using %i conversion: first field
* converts to 0. Next field scan starts at the character after "0x",
- * which will convert if can be intepreted as decimal but will fail
+ * which will convert if can be interpreted as decimal but will fail
* if it contains any hex digits (since no 0x prefix).
*/
test_number_prefix(long long, "0x67", "%2lli%lli", 0, 67, 2, check_ll);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 2926cc27623f..26c83943748a 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -993,8 +993,12 @@ char *symbol_string(char *buf, char *end, void *ptr,
value = (unsigned long)ptr;
#ifdef CONFIG_KALLSYMS
- if (*fmt == 'B')
+ if (*fmt == 'B' && fmt[1] == 'b')
+ sprint_backtrace_build_id(sym, value);
+ else if (*fmt == 'B')
sprint_backtrace(sym, value);
+ else if (*fmt == 'S' && (fmt[1] == 'b' || (fmt[1] == 'R' && fmt[2] == 'b')))
+ sprint_symbol_build_id(sym, value);
else if (*fmt != 's')
sprint_symbol(sym, value);
else
@@ -2263,9 +2267,11 @@ early_param("no_hash_pointers", no_hash_pointers_enable);
* - 'S' For symbolic direct pointers (or function descriptors) with offset
* - 's' For symbolic direct pointers (or function descriptors) without offset
* - '[Ss]R' as above with __builtin_extract_return_addr() translation
+ * - 'S[R]b' as above with module build ID (for use in backtraces)
* - '[Ff]' %pf and %pF were obsoleted and later removed in favor of
* %ps and %pS. Be careful when re-using these specifiers.
* - 'B' For backtraced symbolic direct pointers with offset
+ * - 'Bb' as above with module build ID (for use in backtraces)
* - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
* - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
* - 'b[l]' For a bitmap, the number of bits is determined by the field
@@ -3417,7 +3423,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
while (*fmt) {
/* skip any white space in format */
- /* white space in format matchs any amount of
+ /* white space in format matches any amount of
* white space, including none, in the input.
*/
if (isspace(*fmt)) {