diff options
Diffstat (limited to 'drivers')
240 files changed, 2392 insertions, 1632 deletions
diff --git a/drivers/accel/habanalabs/common/memory.c b/drivers/accel/habanalabs/common/memory.c index 601fdbe70179..61472a381904 100644 --- a/drivers/accel/habanalabs/common/memory.c +++ b/drivers/accel/habanalabs/common/memory.c @@ -1829,9 +1829,6 @@ static void hl_release_dmabuf(struct dma_buf *dmabuf) struct hl_dmabuf_priv *hl_dmabuf = dmabuf->priv; struct hl_ctx *ctx; - if (!hl_dmabuf) - return; - ctx = hl_dmabuf->ctx; if (hl_dmabuf->memhash_hnode) @@ -1859,7 +1856,12 @@ static int export_dmabuf(struct hl_ctx *ctx, { DEFINE_DMA_BUF_EXPORT_INFO(exp_info); struct hl_device *hdev = ctx->hdev; - int rc, fd; + CLASS(get_unused_fd, fd)(flags); + + if (fd < 0) { + dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf, %d\n", fd); + return fd; + } exp_info.ops = &habanalabs_dmabuf_ops; exp_info.size = total_size; @@ -1872,13 +1874,6 @@ static int export_dmabuf(struct hl_ctx *ctx, return PTR_ERR(hl_dmabuf->dmabuf); } - fd = dma_buf_fd(hl_dmabuf->dmabuf, flags); - if (fd < 0) { - dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf, %d\n", fd); - rc = fd; - goto err_dma_buf_put; - } - hl_dmabuf->ctx = ctx; hl_ctx_get(hl_dmabuf->ctx); atomic_inc(&ctx->hdev->dmabuf_export_cnt); @@ -1890,13 +1885,9 @@ static int export_dmabuf(struct hl_ctx *ctx, get_file(ctx->hpriv->file_priv->filp); *dmabuf_fd = fd; + fd_install(take_fd(fd), hl_dmabuf->dmabuf->file); return 0; - -err_dma_buf_put: - hl_dmabuf->dmabuf->priv = NULL; - dma_buf_put(hl_dmabuf->dmabuf); - return rc; } static int validate_export_params_common(struct hl_device *hdev, u64 addr, u64 size, u64 offset) diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2.c b/drivers/accel/habanalabs/gaudi2/gaudi2.c index a38b88baadf2..5722e4128d3c 100644 --- a/drivers/accel/habanalabs/gaudi2/gaudi2.c +++ b/drivers/accel/habanalabs/gaudi2/gaudi2.c @@ -10437,7 +10437,7 @@ end: (u64 *)(lin_dma_pkts_arr), DEBUGFS_WRITE64); WREG32(sob_addr, 0); - kfree(lin_dma_pkts_arr); + kvfree(lin_dma_pkts_arr); return rc; } diff --git a/drivers/acpi/apei/einj-core.c b/drivers/acpi/apei/einj-core.c index bf8dc92a373a..2561b045acc7 100644 --- a/drivers/acpi/apei/einj-core.c +++ b/drivers/acpi/apei/einj-core.c @@ -315,7 +315,7 @@ static void __iomem *einj_get_parameter_address(void) memcpy_fromio(&v5param, p, v5param_size); acpi5 = 1; check_vendor_extension(pa_v5, &v5param); - if (available_error_type & ACPI65_EINJV2_SUPP) { + if (is_v2 && available_error_type & ACPI65_EINJV2_SUPP) { len = v5param.einjv2_struct.length; offset = offsetof(struct einjv2_extension_struct, component_arr); max_nr_components = (len - offset) / @@ -540,6 +540,9 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, struct set_error_type_with_address *v5param; v5param = kmalloc(v5param_size, GFP_KERNEL); + if (!v5param) + return -ENOMEM; + memcpy_fromio(v5param, einj_param, v5param_size); v5param->type = type; if (type & ACPI5_VENDOR_BIT) { @@ -1091,7 +1094,7 @@ err_put_table: return rc; } -static void __exit einj_remove(struct faux_device *fdev) +static void einj_remove(struct faux_device *fdev) { struct apei_exec_context ctx; @@ -1114,15 +1117,9 @@ static void __exit einj_remove(struct faux_device *fdev) } static struct faux_device *einj_dev; -/* - * einj_remove() lives in .exit.text. For drivers registered via - * platform_driver_probe() this is ok because they cannot get unbound at - * runtime. So mark the driver struct with __refdata to prevent modpost - * triggering a section mismatch warning. - */ -static struct faux_device_ops einj_device_ops __refdata = { +static struct faux_device_ops einj_device_ops = { .probe = einj_probe, - .remove = __exit_p(einj_remove), + .remove = einj_remove, }; static int __init einj_init(void) diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 75c7db8b156a..7855bbf752b1 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -2033,7 +2033,7 @@ void __init acpi_ec_ecdt_probe(void) goto out; } - if (!strstarts(ecdt_ptr->id, "\\")) { + if (!strlen(ecdt_ptr->id)) { /* * The ECDT table on some MSI notebooks contains invalid data, together * with an empty ID string (""). @@ -2042,9 +2042,13 @@ void __init acpi_ec_ecdt_probe(void) * a "fully qualified reference to the (...) embedded controller device", * so this string always has to start with a backslash. * - * By verifying this we can avoid such faulty ECDT tables in a safe way. + * However some ThinkBook machines have a ECDT table with a valid EC + * description but an invalid ID string ("_SB.PC00.LPCB.EC0"). + * + * Because of this we only check if the ID string is empty in order to + * avoid the obvious cases. */ - pr_err(FW_BUG "Ignoring ECDT due to invalid ID string \"%s\"\n", ecdt_ptr->id); + pr_err(FW_BUG "Ignoring ECDT due to empty ID string\n"); goto out; } diff --git a/drivers/acpi/pfr_update.c b/drivers/acpi/pfr_update.c index 318683744ed1..11b1c2828005 100644 --- a/drivers/acpi/pfr_update.c +++ b/drivers/acpi/pfr_update.c @@ -329,7 +329,7 @@ static bool applicable_image(const void *data, struct pfru_update_cap_info *cap, if (type == PFRU_CODE_INJECT_TYPE) return payload_hdr->rt_ver >= cap->code_rt_version; - return payload_hdr->rt_ver >= cap->drv_rt_version; + return payload_hdr->svn_ver >= cap->drv_svn; } static void print_update_debug_info(struct pfru_updated_result *result, diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 755003bf3a45..8972446b7162 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -180,7 +180,7 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy) struct acpi_processor *pr = per_cpu(processors, cpu); int ret; - if (!pr || !pr->performance) + if (!pr) continue; /* @@ -197,6 +197,9 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy) pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu, ret); + if (!pr->performance) + continue; + ret = acpi_processor_get_platform_limit(pr); if (ret) pr_err("Failed to update freq constraint for CPU%d (%d)\n", diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 2946ae6d4b2c..2586e77ebf45 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -2075,7 +2075,7 @@ out: * Check if a link is established. This is a relaxed version of * ata_phys_link_online() which accounts for the fact that this is potentially * called after changing the link power management policy, which may not be - * reflected immediately in the SSTAUS register (e.g., we may still be seeing + * reflected immediately in the SStatus register (e.g., we may still be seeing * the PHY in partial, slumber or devsleep Partial power management state. * So check that: * - A device is still present, that is, DET is 1h (Device presence detected @@ -2089,8 +2089,13 @@ static bool ata_eh_link_established(struct ata_link *link) u32 sstatus; u8 det, ipm; + /* + * For old IDE/PATA adapters that do not have a valid scr_read method, + * or if reading the SStatus register fails, assume that the device is + * present. Device probe will determine if that is really the case. + */ if (sata_scr_read(link, SCR_STATUS, &sstatus)) - return false; + return true; det = sstatus & 0x0f; ipm = (sstatus >> 8) & 0x0f; diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 57f674f51b0c..2ded5e476d6e 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -3904,21 +3904,16 @@ static int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc, /* Check cdl_ctrl */ switch (buf[0] & 0x03) { case 0: - /* Disable CDL if it is enabled */ - if (!(dev->flags & ATA_DFLAG_CDL_ENABLED)) - return 0; + /* Disable CDL */ ata_dev_dbg(dev, "Disabling CDL\n"); cdl_action = 0; dev->flags &= ~ATA_DFLAG_CDL_ENABLED; break; case 0x02: /* - * Enable CDL if not already enabled. Since this is mutually - * exclusive with NCQ priority, allow this only if NCQ priority - * is disabled. + * Enable CDL. Since CDL is mutually exclusive with NCQ + * priority, allow this only if NCQ priority is disabled. */ - if (dev->flags & ATA_DFLAG_CDL_ENABLED) - return 0; if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED) { ata_dev_err(dev, "NCQ priority must be disabled to enable CDL\n"); diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index e21492981f7d..f6d6276974ee 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -380,6 +380,9 @@ enum { /* this is/was a write request */ __EE_WRITE, + /* hand back using mempool_free(e, drbd_buffer_page_pool) */ + __EE_RELEASE_TO_MEMPOOL, + /* this is/was a write same request */ __EE_WRITE_SAME, @@ -402,6 +405,7 @@ enum { #define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE) #define EE_SUBMITTED (1<<__EE_SUBMITTED) #define EE_WRITE (1<<__EE_WRITE) +#define EE_RELEASE_TO_MEMPOOL (1<<__EE_RELEASE_TO_MEMPOOL) #define EE_WRITE_SAME (1<<__EE_WRITE_SAME) #define EE_APPLICATION (1<<__EE_APPLICATION) #define EE_RS_THIN_REQ (1<<__EE_RS_THIN_REQ) @@ -858,7 +862,6 @@ struct drbd_device { struct list_head sync_ee; /* IO in progress (P_RS_DATA_REPLY gets written to disk) */ struct list_head done_ee; /* need to send P_WRITE_ACK */ struct list_head read_ee; /* [RS]P_DATA_REQUEST being read */ - struct list_head net_ee; /* zero-copy network send in progress */ struct list_head resync_reads; atomic_t pp_in_use; /* allocated from page pool */ @@ -1329,24 +1332,6 @@ extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */ extern mempool_t drbd_request_mempool; extern mempool_t drbd_ee_mempool; -/* drbd's page pool, used to buffer data received from the peer, - * or data requested by the peer. - * - * This does not have an emergency reserve. - * - * When allocating from this pool, it first takes pages from the pool. - * Only if the pool is depleted will try to allocate from the system. - * - * The assumption is that pages taken from this pool will be processed, - * and given back, "quickly", and then can be recycled, so we can avoid - * frequent calls to alloc_page(), and still will be able to make progress even - * under memory pressure. - */ -extern struct page *drbd_pp_pool; -extern spinlock_t drbd_pp_lock; -extern int drbd_pp_vacant; -extern wait_queue_head_t drbd_pp_wait; - /* We also need a standard (emergency-reserve backed) page pool * for meta data IO (activity log, bitmap). * We can keep it global, as long as it is used as "N pages at a time". @@ -1354,6 +1339,7 @@ extern wait_queue_head_t drbd_pp_wait; */ #define DRBD_MIN_POOL_PAGES 128 extern mempool_t drbd_md_io_page_pool; +extern mempool_t drbd_buffer_page_pool; /* We also need to make sure we get a bio * when we need it for housekeeping purposes */ @@ -1488,10 +1474,7 @@ extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, sector_t, unsigned int, unsigned int, gfp_t) __must_hold(local); -extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *, - int); -#define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0) -#define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1) +extern void drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *req); extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool); extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed); extern int drbd_connected(struct drbd_peer_device *); @@ -1610,16 +1593,6 @@ static inline struct page *page_chain_next(struct page *page) for (; page && ({ n = page_chain_next(page); 1; }); page = n) -static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req) -{ - struct page *page = peer_req->pages; - page_chain_for_each(page) { - if (page_count(page) > 1) - return 1; - } - return 0; -} - static inline union drbd_state drbd_read_state(struct drbd_device *device) { struct drbd_resource *resource = device->resource; diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 52724b79be30..c73376886e7a 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -114,20 +114,10 @@ struct kmem_cache *drbd_al_ext_cache; /* activity log extents */ mempool_t drbd_request_mempool; mempool_t drbd_ee_mempool; mempool_t drbd_md_io_page_pool; +mempool_t drbd_buffer_page_pool; struct bio_set drbd_md_io_bio_set; struct bio_set drbd_io_bio_set; -/* I do not use a standard mempool, because: - 1) I want to hand out the pre-allocated objects first. - 2) I want to be able to interrupt sleeping allocation with a signal. - Note: This is a single linked list, the next pointer is the private - member of struct page. - */ -struct page *drbd_pp_pool; -DEFINE_SPINLOCK(drbd_pp_lock); -int drbd_pp_vacant; -wait_queue_head_t drbd_pp_wait; - DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5); static const struct block_device_operations drbd_ops = { @@ -1611,6 +1601,7 @@ static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *b static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device, struct drbd_peer_request *peer_req) { + bool use_sendpage = !(peer_req->flags & EE_RELEASE_TO_MEMPOOL); struct page *page = peer_req->pages; unsigned len = peer_req->i.size; int err; @@ -1619,8 +1610,13 @@ static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device, page_chain_for_each(page) { unsigned l = min_t(unsigned, len, PAGE_SIZE); - err = _drbd_send_page(peer_device, page, 0, l, - page_chain_next(page) ? MSG_MORE : 0); + if (likely(use_sendpage)) + err = _drbd_send_page(peer_device, page, 0, l, + page_chain_next(page) ? MSG_MORE : 0); + else + err = _drbd_no_send_page(peer_device, page, 0, l, + page_chain_next(page) ? MSG_MORE : 0); + if (err) return err; len -= l; @@ -1962,7 +1958,6 @@ void drbd_init_set_defaults(struct drbd_device *device) INIT_LIST_HEAD(&device->sync_ee); INIT_LIST_HEAD(&device->done_ee); INIT_LIST_HEAD(&device->read_ee); - INIT_LIST_HEAD(&device->net_ee); INIT_LIST_HEAD(&device->resync_reads); INIT_LIST_HEAD(&device->resync_work.list); INIT_LIST_HEAD(&device->unplug_work.list); @@ -2043,7 +2038,6 @@ void drbd_device_cleanup(struct drbd_device *device) D_ASSERT(device, list_empty(&device->sync_ee)); D_ASSERT(device, list_empty(&device->done_ee)); D_ASSERT(device, list_empty(&device->read_ee)); - D_ASSERT(device, list_empty(&device->net_ee)); D_ASSERT(device, list_empty(&device->resync_reads)); D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q)); D_ASSERT(device, list_empty(&device->resync_work.list)); @@ -2055,19 +2049,11 @@ void drbd_device_cleanup(struct drbd_device *device) static void drbd_destroy_mempools(void) { - struct page *page; - - while (drbd_pp_pool) { - page = drbd_pp_pool; - drbd_pp_pool = (struct page *)page_private(page); - __free_page(page); - drbd_pp_vacant--; - } - /* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */ bioset_exit(&drbd_io_bio_set); bioset_exit(&drbd_md_io_bio_set); + mempool_exit(&drbd_buffer_page_pool); mempool_exit(&drbd_md_io_page_pool); mempool_exit(&drbd_ee_mempool); mempool_exit(&drbd_request_mempool); @@ -2086,9 +2072,8 @@ static void drbd_destroy_mempools(void) static int drbd_create_mempools(void) { - struct page *page; const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count; - int i, ret; + int ret; /* caches */ drbd_request_cache = kmem_cache_create( @@ -2125,6 +2110,10 @@ static int drbd_create_mempools(void) if (ret) goto Enomem; + ret = mempool_init_page_pool(&drbd_buffer_page_pool, number, 0); + if (ret) + goto Enomem; + ret = mempool_init_slab_pool(&drbd_request_mempool, number, drbd_request_cache); if (ret) @@ -2134,15 +2123,6 @@ static int drbd_create_mempools(void) if (ret) goto Enomem; - for (i = 0; i < number; i++) { - page = alloc_page(GFP_HIGHUSER); - if (!page) - goto Enomem; - set_page_private(page, (unsigned long)drbd_pp_pool); - drbd_pp_pool = page; - } - drbd_pp_vacant = number; - return 0; Enomem: @@ -2169,10 +2149,6 @@ static void drbd_release_all_peer_reqs(struct drbd_device *device) rr = drbd_free_peer_reqs(device, &device->done_ee); if (rr) drbd_err(device, "%d EEs in done list found!\n", rr); - - rr = drbd_free_peer_reqs(device, &device->net_ee); - if (rr) - drbd_err(device, "%d EEs in net list found!\n", rr); } /* caution. no locking. */ @@ -2863,11 +2839,6 @@ static int __init drbd_init(void) return err; } - /* - * allocate all necessary structs - */ - init_waitqueue_head(&drbd_pp_wait); - drbd_proc = NULL; /* play safe for drbd_cleanup */ idr_init(&drbd_devices); diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 975024cf03c5..caaf2781136d 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -33,6 +33,7 @@ #include <linux/string.h> #include <linux/scatterlist.h> #include <linux/part_stat.h> +#include <linux/mempool.h> #include "drbd_int.h" #include "drbd_protocol.h" #include "drbd_req.h" @@ -63,182 +64,31 @@ static int e_end_block(struct drbd_work *, int); #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN) -/* - * some helper functions to deal with single linked page lists, - * page->private being our "next" pointer. - */ - -/* If at least n pages are linked at head, get n pages off. - * Otherwise, don't modify head, and return NULL. - * Locking is the responsibility of the caller. - */ -static struct page *page_chain_del(struct page **head, int n) -{ - struct page *page; - struct page *tmp; - - BUG_ON(!n); - BUG_ON(!head); - - page = *head; - - if (!page) - return NULL; - - while (page) { - tmp = page_chain_next(page); - if (--n == 0) - break; /* found sufficient pages */ - if (tmp == NULL) - /* insufficient pages, don't use any of them. */ - return NULL; - page = tmp; - } - - /* add end of list marker for the returned list */ - set_page_private(page, 0); - /* actual return value, and adjustment of head */ - page = *head; - *head = tmp; - return page; -} - -/* may be used outside of locks to find the tail of a (usually short) - * "private" page chain, before adding it back to a global chain head - * with page_chain_add() under a spinlock. */ -static struct page *page_chain_tail(struct page *page, int *len) -{ - struct page *tmp; - int i = 1; - while ((tmp = page_chain_next(page))) { - ++i; - page = tmp; - } - if (len) - *len = i; - return page; -} - -static int page_chain_free(struct page *page) -{ - struct page *tmp; - int i = 0; - page_chain_for_each_safe(page, tmp) { - put_page(page); - ++i; - } - return i; -} - -static void page_chain_add(struct page **head, - struct page *chain_first, struct page *chain_last) -{ -#if 1 - struct page *tmp; - tmp = page_chain_tail(chain_first, NULL); - BUG_ON(tmp != chain_last); -#endif - - /* add chain to head */ - set_page_private(chain_last, (unsigned long)*head); - *head = chain_first; -} - -static struct page *__drbd_alloc_pages(struct drbd_device *device, - unsigned int number) +static struct page *__drbd_alloc_pages(unsigned int number) { struct page *page = NULL; struct page *tmp = NULL; unsigned int i = 0; - /* Yes, testing drbd_pp_vacant outside the lock is racy. - * So what. It saves a spin_lock. */ - if (drbd_pp_vacant >= number) { - spin_lock(&drbd_pp_lock); - page = page_chain_del(&drbd_pp_pool, number); - if (page) - drbd_pp_vacant -= number; - spin_unlock(&drbd_pp_lock); - if (page) - return page; - } - /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD * "criss-cross" setup, that might cause write-out on some other DRBD, * which in turn might block on the other node at this very place. */ for (i = 0; i < number; i++) { - tmp = alloc_page(GFP_TRY); + tmp = mempool_alloc(&drbd_buffer_page_pool, GFP_TRY); if (!tmp) - break; + goto fail; set_page_private(tmp, (unsigned long)page); page = tmp; } - - if (i == number) - return page; - - /* Not enough pages immediately available this time. - * No need to jump around here, drbd_alloc_pages will retry this - * function "soon". */ - if (page) { - tmp = page_chain_tail(page, NULL); - spin_lock(&drbd_pp_lock); - page_chain_add(&drbd_pp_pool, page, tmp); - drbd_pp_vacant += i; - spin_unlock(&drbd_pp_lock); + return page; +fail: + page_chain_for_each_safe(page, tmp) { + set_page_private(page, 0); + mempool_free(page, &drbd_buffer_page_pool); } return NULL; } -static void reclaim_finished_net_peer_reqs(struct drbd_device *device, - struct list_head *to_be_freed) -{ - struct drbd_peer_request *peer_req, *tmp; - - /* The EEs are always appended to the end of the list. Since - they are sent in order over the wire, they have to finish - in order. As soon as we see the first not finished we can - stop to examine the list... */ - - list_for_each_entry_safe(peer_req, tmp, &device->net_ee, w.list) { - if (drbd_peer_req_has_active_page(peer_req)) - break; - list_move(&peer_req->w.list, to_be_freed); - } -} - -static void drbd_reclaim_net_peer_reqs(struct drbd_device *device) -{ - LIST_HEAD(reclaimed); - struct drbd_peer_request *peer_req, *t; - - spin_lock_irq(&device->resource->req_lock); - reclaim_finished_net_peer_reqs(device, &reclaimed); - spin_unlock_irq(&device->resource->req_lock); - list_for_each_entry_safe(peer_req, t, &reclaimed, w.list) - drbd_free_net_peer_req(device, peer_req); -} - -static void conn_reclaim_net_peer_reqs(struct drbd_connection *connection) -{ - struct drbd_peer_device *peer_device; - int vnr; - - rcu_read_lock(); - idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { - struct drbd_device *device = peer_device->device; - if (!atomic_read(&device->pp_in_use_by_net)) - continue; - - kref_get(&device->kref); - rcu_read_unlock(); - drbd_reclaim_net_peer_reqs(device); - kref_put(&device->kref, drbd_destroy_device); - rcu_read_lock(); - } - rcu_read_unlock(); -} - /** * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled) * @peer_device: DRBD device. @@ -263,9 +113,8 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int bool retry) { struct drbd_device *device = peer_device->device; - struct page *page = NULL; + struct page *page; struct net_conf *nc; - DEFINE_WAIT(wait); unsigned int mxb; rcu_read_lock(); @@ -273,37 +122,9 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int mxb = nc ? nc->max_buffers : 1000000; rcu_read_unlock(); - if (atomic_read(&device->pp_in_use) < mxb) - page = __drbd_alloc_pages(device, number); - - /* Try to keep the fast path fast, but occasionally we need - * to reclaim the pages we lended to the network stack. */ - if (page && atomic_read(&device->pp_in_use_by_net) > 512) - drbd_reclaim_net_peer_reqs(device); - - while (page == NULL) { - prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE); - - drbd_reclaim_net_peer_reqs(device); - - if (atomic_read(&device->pp_in_use) < mxb) { - page = __drbd_alloc_pages(device, number); - if (page) - break; - } - - if (!retry) - break; - - if (signal_pending(current)) { - drbd_warn(device, "drbd_alloc_pages interrupted!\n"); - break; - } - - if (schedule_timeout(HZ/10) == 0) - mxb = UINT_MAX; - } - finish_wait(&drbd_pp_wait, &wait); + if (atomic_read(&device->pp_in_use) >= mxb) + schedule_timeout_interruptible(HZ / 10); + page = __drbd_alloc_pages(number); if (page) atomic_add(number, &device->pp_in_use); @@ -314,29 +135,25 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int * Is also used from inside an other spin_lock_irq(&resource->req_lock); * Either links the page chain back to the global pool, * or returns all pages to the system. */ -static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net) +static void drbd_free_pages(struct drbd_device *device, struct page *page) { - atomic_t *a = is_net ? &device->pp_in_use_by_net : &device->pp_in_use; - int i; + struct page *tmp; + int i = 0; if (page == NULL) return; - if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count) - i = page_chain_free(page); - else { - struct page *tmp; - tmp = page_chain_tail(page, &i); - spin_lock(&drbd_pp_lock); - page_chain_add(&drbd_pp_pool, page, tmp); - drbd_pp_vacant += i; - spin_unlock(&drbd_pp_lock); - } - i = atomic_sub_return(i, a); + page_chain_for_each_safe(page, tmp) { + set_page_private(page, 0); + if (page_count(page) == 1) + mempool_free(page, &drbd_buffer_page_pool); + else + put_page(page); + i++; + } + i = atomic_sub_return(i, &device->pp_in_use); if (i < 0) - drbd_warn(device, "ASSERTION FAILED: %s: %d < 0\n", - is_net ? "pp_in_use_by_net" : "pp_in_use", i); - wake_up(&drbd_pp_wait); + drbd_warn(device, "ASSERTION FAILED: pp_in_use: %d < 0\n", i); } /* @@ -380,6 +197,8 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto gfpflags_allow_blocking(gfp_mask)); if (!page) goto fail; + if (!mempool_is_saturated(&drbd_buffer_page_pool)) + peer_req->flags |= EE_RELEASE_TO_MEMPOOL; } memset(peer_req, 0, sizeof(*peer_req)); @@ -403,13 +222,12 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto return NULL; } -void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req, - int is_net) +void drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req) { might_sleep(); if (peer_req->flags & EE_HAS_DIGEST) kfree(peer_req->digest); - drbd_free_pages(device, peer_req->pages, is_net); + drbd_free_pages(device, peer_req->pages); D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0); D_ASSERT(device, drbd_interval_empty(&peer_req->i)); if (!expect(device, !(peer_req->flags & EE_CALL_AL_COMPLETE_IO))) { @@ -424,14 +242,13 @@ int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list) LIST_HEAD(work_list); struct drbd_peer_request *peer_req, *t; int count = 0; - int is_net = list == &device->net_ee; spin_lock_irq(&device->resource->req_lock); list_splice_init(list, &work_list); spin_unlock_irq(&device->resource->req_lock); list_for_each_entry_safe(peer_req, t, &work_list, w.list) { - __drbd_free_peer_req(device, peer_req, is_net); + drbd_free_peer_req(device, peer_req); count++; } return count; @@ -443,18 +260,13 @@ int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list) static int drbd_finish_peer_reqs(struct drbd_device *device) { LIST_HEAD(work_list); - LIST_HEAD(reclaimed); struct drbd_peer_request *peer_req, *t; int err = 0; spin_lock_irq(&device->resource->req_lock); - reclaim_finished_net_peer_reqs(device, &reclaimed); list_splice_init(&device->done_ee, &work_list); spin_unlock_irq(&device->resource->req_lock); - list_for_each_entry_safe(peer_req, t, &reclaimed, w.list) - drbd_free_net_peer_req(device, peer_req); - /* possible callbacks here: * e_end_block, and e_end_resync_block, e_send_superseded. * all ignore the last argument. @@ -1975,7 +1787,7 @@ static int drbd_drain_block(struct drbd_peer_device *peer_device, int data_size) data_size -= len; } kunmap(page); - drbd_free_pages(peer_device->device, page, 0); + drbd_free_pages(peer_device->device, page); return err; } @@ -5224,16 +5036,6 @@ static int drbd_disconnected(struct drbd_peer_device *peer_device) put_ldev(device); } - /* tcp_close and release of sendpage pages can be deferred. I don't - * want to use SO_LINGER, because apparently it can be deferred for - * more than 20 seconds (longest time I checked). - * - * Actually we don't care for exactly when the network stack does its - * put_page(), but release our reference on these pages right here. - */ - i = drbd_free_peer_reqs(device, &device->net_ee); - if (i) - drbd_info(device, "net_ee not empty, killed %u entries\n", i); i = atomic_read(&device->pp_in_use_by_net); if (i) drbd_info(device, "pp_in_use_by_net = %d, expected 0\n", i); @@ -5980,8 +5782,6 @@ int drbd_ack_receiver(struct drbd_thread *thi) while (get_t_state(thi) == RUNNING) { drbd_thread_current_set_cpu(thi); - conn_reclaim_net_peer_reqs(connection); - if (test_and_clear_bit(SEND_PING, &connection->flags)) { if (drbd_send_ping(connection)) { drbd_err(connection, "drbd_send_ping has failed\n"); diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index a6ea737b3b71..dea3e79d044f 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -1030,22 +1030,6 @@ out: return 1; } -/* helper */ -static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_request *peer_req) -{ - if (drbd_peer_req_has_active_page(peer_req)) { - /* This might happen if sendpage() has not finished */ - int i = PFN_UP(peer_req->i.size); - atomic_add(i, &device->pp_in_use_by_net); - atomic_sub(i, &device->pp_in_use); - spin_lock_irq(&device->resource->req_lock); - list_add_tail(&peer_req->w.list, &device->net_ee); - spin_unlock_irq(&device->resource->req_lock); - wake_up(&drbd_pp_wait); - } else - drbd_free_peer_req(device, peer_req); -} - /** * w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST * @w: work object. @@ -1059,9 +1043,8 @@ int w_e_end_data_req(struct drbd_work *w, int cancel) int err; if (unlikely(cancel)) { - drbd_free_peer_req(device, peer_req); - dec_unacked(device); - return 0; + err = 0; + goto out; } if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { @@ -1074,12 +1057,12 @@ int w_e_end_data_req(struct drbd_work *w, int cancel) err = drbd_send_ack(peer_device, P_NEG_DREPLY, peer_req); } - dec_unacked(device); - - move_to_net_ee_or_free(device, peer_req); - if (unlikely(err)) drbd_err(device, "drbd_send_block() failed\n"); +out: + dec_unacked(device); + drbd_free_peer_req(device, peer_req); + return err; } @@ -1120,9 +1103,8 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel) int err; if (unlikely(cancel)) { - drbd_free_peer_req(device, peer_req); - dec_unacked(device); - return 0; + err = 0; + goto out; } if (get_ldev_if_state(device, D_FAILED)) { @@ -1155,13 +1137,12 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel) /* update resync data with failure */ drbd_rs_failed_io(peer_device, peer_req->i.sector, peer_req->i.size); } - - dec_unacked(device); - - move_to_net_ee_or_free(device, peer_req); - if (unlikely(err)) drbd_err(device, "drbd_send_block() failed\n"); +out: + dec_unacked(device); + drbd_free_peer_req(device, peer_req); + return err; } @@ -1176,9 +1157,8 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel) int err, eq = 0; if (unlikely(cancel)) { - drbd_free_peer_req(device, peer_req); - dec_unacked(device); - return 0; + err = 0; + goto out; } if (get_ldev(device)) { @@ -1220,12 +1200,12 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel) if (drbd_ratelimit()) drbd_err(device, "Sending NegDReply. I guess it gets messy.\n"); } - - dec_unacked(device); - move_to_net_ee_or_free(device, peer_req); - if (unlikely(err)) drbd_err(device, "drbd_send_block/ack() failed\n"); +out: + dec_unacked(device); + drbd_free_peer_req(device, peer_req); + return err; } diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 1b6ee91f8eb9..57263c273f0f 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -137,20 +137,29 @@ static void loop_global_unlock(struct loop_device *lo, bool global) static int max_part; static int part_shift; -static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file) +static loff_t lo_calculate_size(struct loop_device *lo, struct file *file) { + struct kstat stat; loff_t loopsize; + int ret; - /* Compute loopsize in bytes */ - loopsize = i_size_read(file->f_mapping->host); - if (offset > 0) - loopsize -= offset; + /* + * Get the accurate file size. This provides better results than + * cached inode data, particularly for network filesystems where + * metadata may be stale. + */ + ret = vfs_getattr_nosec(&file->f_path, &stat, STATX_SIZE, 0); + if (ret) + return 0; + + loopsize = stat.size; + if (lo->lo_offset > 0) + loopsize -= lo->lo_offset; /* offset is beyond i_size, weird but possible */ if (loopsize < 0) return 0; - - if (sizelimit > 0 && sizelimit < loopsize) - loopsize = sizelimit; + if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize) + loopsize = lo->lo_sizelimit; /* * Unfortunately, if we want to do I/O on the device, * the number of 512-byte sectors has to fit into a sector_t. @@ -158,11 +167,6 @@ static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file) return loopsize >> 9; } -static loff_t get_loop_size(struct loop_device *lo, struct file *file) -{ - return get_size(lo->lo_offset, lo->lo_sizelimit, file); -} - /* * We support direct I/O only if lo_offset is aligned with the logical I/O size * of backing device, and the logical block size of loop is bigger than that of @@ -569,7 +573,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, error = -EINVAL; /* size of the new backing store needs to be the same */ - if (get_loop_size(lo, file) != get_loop_size(lo, old_file)) + if (lo_calculate_size(lo, file) != lo_calculate_size(lo, old_file)) goto out_err; /* @@ -1063,7 +1067,7 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode, loop_update_dio(lo); loop_sysfs_init(lo); - size = get_loop_size(lo, file); + size = lo_calculate_size(lo, file); loop_set_size(lo, size); /* Order wrt reading lo_state in loop_validate_file(). */ @@ -1255,8 +1259,7 @@ out_unfreeze: if (partscan) clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state); if (!err && size_changed) { - loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit, - lo->lo_backing_file); + loff_t new_size = lo_calculate_size(lo, lo->lo_backing_file); loop_set_size(lo, new_size); } out_unlock: @@ -1399,7 +1402,7 @@ static int loop_set_capacity(struct loop_device *lo) if (unlikely(lo->lo_state != Lo_bound)) return -ENXIO; - size = get_loop_size(lo, lo->lo_backing_file); + size = lo_calculate_size(lo, lo->lo_backing_file); loop_set_size(lo, size); return 0; diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 6561d2a561fa..99abd67b708b 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -235,7 +235,7 @@ struct ublk_device { struct completion completion; unsigned int nr_queues_ready; - unsigned int nr_privileged_daemon; + bool unprivileged_daemons; struct mutex cancel_mutex; bool canceling; pid_t ublksrv_tgid; @@ -1389,7 +1389,7 @@ static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq, { blk_status_t res; - if (unlikely(ubq->fail_io)) + if (unlikely(READ_ONCE(ubq->fail_io))) return BLK_STS_TARGET; /* With recovery feature enabled, force_abort is set in @@ -1401,7 +1401,8 @@ static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq, * Note: force_abort is guaranteed to be seen because it is set * before request queue is unqiuesced. */ - if (ublk_nosrv_should_queue_io(ubq) && unlikely(ubq->force_abort)) + if (ublk_nosrv_should_queue_io(ubq) && + unlikely(READ_ONCE(ubq->force_abort))) return BLK_STS_IOERR; if (check_cancel && unlikely(ubq->canceling)) @@ -1550,7 +1551,7 @@ static void ublk_reset_ch_dev(struct ublk_device *ub) /* set to NULL, otherwise new tasks cannot mmap io_cmd_buf */ ub->mm = NULL; ub->nr_queues_ready = 0; - ub->nr_privileged_daemon = 0; + ub->unprivileged_daemons = false; ub->ublksrv_tgid = -1; } @@ -1644,7 +1645,6 @@ static int ublk_ch_release(struct inode *inode, struct file *filp) * Transition the device to the nosrv state. What exactly this * means depends on the recovery flags */ - blk_mq_quiesce_queue(disk->queue); if (ublk_nosrv_should_stop_dev(ub)) { /* * Allow any pending/future I/O to pass through quickly @@ -1652,8 +1652,7 @@ static int ublk_ch_release(struct inode *inode, struct file *filp) * waits for all pending I/O to complete */ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) - ublk_get_queue(ub, i)->force_abort = true; - blk_mq_unquiesce_queue(disk->queue); + WRITE_ONCE(ublk_get_queue(ub, i)->force_abort, true); ublk_stop_dev_unlocked(ub); } else { @@ -1663,9 +1662,8 @@ static int ublk_ch_release(struct inode *inode, struct file *filp) } else { ub->dev_info.state = UBLK_S_DEV_FAIL_IO; for (i = 0; i < ub->dev_info.nr_hw_queues; i++) - ublk_get_queue(ub, i)->fail_io = true; + WRITE_ONCE(ublk_get_queue(ub, i)->fail_io, true); } - blk_mq_unquiesce_queue(disk->queue); } unlock: mutex_unlock(&ub->mutex); @@ -1980,12 +1978,10 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq) __must_hold(&ub->mutex) { ubq->nr_io_ready++; - if (ublk_queue_ready(ubq)) { + if (ublk_queue_ready(ubq)) ub->nr_queues_ready++; - - if (capable(CAP_SYS_ADMIN)) - ub->nr_privileged_daemon++; - } + if (!ub->unprivileged_daemons && !capable(CAP_SYS_ADMIN)) + ub->unprivileged_daemons = true; if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues) { /* now we are ready for handling ublk io request */ @@ -2880,8 +2876,8 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, ublk_apply_params(ub); - /* don't probe partitions if any one ubq daemon is un-trusted */ - if (ub->nr_privileged_daemon != ub->nr_queues_ready) + /* don't probe partitions if any daemon task is un-trusted */ + if (ub->unprivileged_daemons) set_bit(GD_SUPPRESS_PART_SCAN, &disk->state); ublk_get_device(ub); diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c index 4390fd571dbd..a8c520dc09e1 100644 --- a/drivers/bluetooth/btmtk.c +++ b/drivers/bluetooth/btmtk.c @@ -642,12 +642,7 @@ static int btmtk_usb_hci_wmt_sync(struct hci_dev *hdev, * WMT command. */ err = wait_on_bit_timeout(&data->flags, BTMTK_TX_WAIT_VND_EVT, - TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT); - if (err == -EINTR) { - bt_dev_err(hdev, "Execution of wmt command interrupted"); - clear_bit(BTMTK_TX_WAIT_VND_EVT, &data->flags); - goto err_free_wc; - } + TASK_UNINTERRUPTIBLE, HCI_INIT_TIMEOUT); if (err) { bt_dev_err(hdev, "Execution of wmt command timed out"); diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c index 73a4a325c867..76e7f857fb7d 100644 --- a/drivers/bluetooth/btnxpuart.c +++ b/drivers/bluetooth/btnxpuart.c @@ -543,10 +543,10 @@ static int ps_setup(struct hci_dev *hdev) } if (psdata->wakeup_source) { - ret = devm_request_irq(&serdev->dev, psdata->irq_handler, - ps_host_wakeup_irq_handler, - IRQF_ONESHOT | IRQF_TRIGGER_FALLING, - dev_name(&serdev->dev), nxpdev); + ret = devm_request_threaded_irq(&serdev->dev, psdata->irq_handler, + NULL, ps_host_wakeup_irq_handler, + IRQF_ONESHOT, + dev_name(&serdev->dev), nxpdev); if (ret) bt_dev_info(hdev, "error setting wakeup IRQ handler, ignoring\n"); disable_irq(psdata->irq_handler); diff --git a/drivers/cdx/controller/cdx_rpmsg.c b/drivers/cdx/controller/cdx_rpmsg.c index 04b578a0be17..61f1a290ff08 100644 --- a/drivers/cdx/controller/cdx_rpmsg.c +++ b/drivers/cdx/controller/cdx_rpmsg.c @@ -129,8 +129,7 @@ static int cdx_rpmsg_probe(struct rpmsg_device *rpdev) chinfo.src = RPMSG_ADDR_ANY; chinfo.dst = rpdev->dst; - strscpy(chinfo.name, cdx_rpmsg_id_table[0].name, - strlen(cdx_rpmsg_id_table[0].name)); + strscpy(chinfo.name, cdx_rpmsg_id_table[0].name, sizeof(chinfo.name)); cdx_mcdi->ept = rpmsg_create_ept(rpdev, cdx_rpmsg_cb, NULL, chinfo); if (!cdx_mcdi->ept) { diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c index 23b7178522ae..7e2f2b1a1c36 100644 --- a/drivers/comedi/comedi_fops.c +++ b/drivers/comedi/comedi_fops.c @@ -1587,6 +1587,9 @@ static int do_insnlist_ioctl(struct comedi_device *dev, memset(&data[n], 0, (MIN_SAMPLES - n) * sizeof(unsigned int)); } + } else { + memset(data, 0, max_t(unsigned int, n, MIN_SAMPLES) * + sizeof(unsigned int)); } ret = parse_insn(dev, insns + i, data, file); if (ret < 0) @@ -1670,6 +1673,8 @@ static int do_insn_ioctl(struct comedi_device *dev, memset(&data[insn->n], 0, (MIN_SAMPLES - insn->n) * sizeof(unsigned int)); } + } else { + memset(data, 0, n_data * sizeof(unsigned int)); } ret = parse_insn(dev, insn, data, file); if (ret < 0) diff --git a/drivers/comedi/drivers.c b/drivers/comedi/drivers.c index f1dc854928c1..c9ebaadc5e82 100644 --- a/drivers/comedi/drivers.c +++ b/drivers/comedi/drivers.c @@ -620,11 +620,9 @@ static int insn_rw_emulate_bits(struct comedi_device *dev, unsigned int chan = CR_CHAN(insn->chanspec); unsigned int base_chan = (chan < 32) ? 0 : chan; unsigned int _data[2]; + unsigned int i; int ret; - if (insn->n == 0) - return 0; - memset(_data, 0, sizeof(_data)); memset(&_insn, 0, sizeof(_insn)); _insn.insn = INSN_BITS; @@ -635,18 +633,21 @@ static int insn_rw_emulate_bits(struct comedi_device *dev, if (insn->insn == INSN_WRITE) { if (!(s->subdev_flags & SDF_WRITABLE)) return -EINVAL; - _data[0] = 1U << (chan - base_chan); /* mask */ - _data[1] = data[0] ? (1U << (chan - base_chan)) : 0; /* bits */ + _data[0] = 1U << (chan - base_chan); /* mask */ } + for (i = 0; i < insn->n; i++) { + if (insn->insn == INSN_WRITE) + _data[1] = data[i] ? _data[0] : 0; /* bits */ - ret = s->insn_bits(dev, s, &_insn, _data); - if (ret < 0) - return ret; + ret = s->insn_bits(dev, s, &_insn, _data); + if (ret < 0) + return ret; - if (insn->insn == INSN_READ) - data[0] = (_data[1] >> (chan - base_chan)) & 1; + if (insn->insn == INSN_READ) + data[i] = (_data[1] >> (chan - base_chan)) & 1; + } - return 1; + return insn->n; } static int __comedi_device_postconfig_async(struct comedi_device *dev, diff --git a/drivers/comedi/drivers/pcl726.c b/drivers/comedi/drivers/pcl726.c index 0430630e6ebb..b542896fa0e4 100644 --- a/drivers/comedi/drivers/pcl726.c +++ b/drivers/comedi/drivers/pcl726.c @@ -328,7 +328,8 @@ static int pcl726_attach(struct comedi_device *dev, * Hook up the external trigger source interrupt only if the * user config option is valid and the board supports interrupts. */ - if (it->options[1] && (board->irq_mask & (1 << it->options[1]))) { + if (it->options[1] > 0 && it->options[1] < 16 && + (board->irq_mask & (1U << it->options[1]))) { ret = request_irq(it->options[1], pcl726_interrupt, 0, dev->board_name, dev); if (ret == 0) { diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 06a1c7dd081f..f366d35c5840 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -2793,6 +2793,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { X86_MATCH(INTEL_GRANITERAPIDS_X, core_funcs), X86_MATCH(INTEL_ATOM_CRESTMONT, core_funcs), X86_MATCH(INTEL_ATOM_CRESTMONT_X, core_funcs), + X86_MATCH(INTEL_ATOM_DARKMONT_X, core_funcs), {} }; #endif diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 52d5d26fc7c6..b2e3d0b0a116 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -97,6 +97,14 @@ static inline int which_bucket(u64 duration_ns) static DEFINE_PER_CPU(struct menu_device, menu_devices); +static void menu_update_intervals(struct menu_device *data, unsigned int interval_us) +{ + /* Update the repeating-pattern data. */ + data->intervals[data->interval_ptr++] = interval_us; + if (data->interval_ptr >= INTERVALS) + data->interval_ptr = 0; +} + static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev); /* @@ -222,6 +230,14 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, if (data->needs_update) { menu_update(drv, dev); data->needs_update = 0; + } else if (!dev->last_residency_ns) { + /* + * This happens when the driver rejects the previously selected + * idle state and returns an error, so update the recent + * intervals table to prevent invalid information from being + * used going forward. + */ + menu_update_intervals(data, UINT_MAX); } /* Find the shortest expected idle interval. */ @@ -271,20 +287,15 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, return 0; } - if (tick_nohz_tick_stopped()) { - /* - * If the tick is already stopped, the cost of possible short - * idle duration misprediction is much higher, because the CPU - * may be stuck in a shallow idle state for a long time as a - * result of it. In that case say we might mispredict and use - * the known time till the closest timer event for the idle - * state selection. - */ - if (predicted_ns < TICK_NSEC) - predicted_ns = data->next_timer_ns; - } else if (latency_req > predicted_ns) { - latency_req = predicted_ns; - } + /* + * If the tick is already stopped, the cost of possible short idle + * duration misprediction is much higher, because the CPU may be stuck + * in a shallow idle state for a long time as a result of it. In that + * case, say we might mispredict and use the known time till the closest + * timer event for the idle state selection. + */ + if (tick_nohz_tick_stopped() && predicted_ns < TICK_NSEC) + predicted_ns = data->next_timer_ns; /* * Find the idle state with the lowest power while satisfying @@ -300,13 +311,15 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, if (idx == -1) idx = i; /* first enabled state */ + if (s->exit_latency_ns > latency_req) + break; + if (s->target_residency_ns > predicted_ns) { /* * Use a physical idle state, not busy polling, unless * a timer is going to trigger soon enough. */ if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) && - s->exit_latency_ns <= latency_req && s->target_residency_ns <= data->next_timer_ns) { predicted_ns = s->target_residency_ns; idx = i; @@ -338,8 +351,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, return idx; } - if (s->exit_latency_ns > latency_req) - break; idx = i; } @@ -482,10 +493,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) data->correction_factor[data->bucket] = new_factor; - /* update the repeating-pattern data */ - data->intervals[data->interval_ptr++] = ktime_to_us(measured_ns); - if (data->interval_ptr >= INTERVALS) - data->interval_ptr = 0; + menu_update_intervals(data, ktime_to_us(measured_ns)); } /** diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c index d28477d84697..1d1c2d8f85ae 100644 --- a/drivers/firewire/core-transaction.c +++ b/drivers/firewire/core-transaction.c @@ -550,6 +550,23 @@ const struct fw_address_region fw_unit_space_region = { .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, }; #endif /* 0 */ +static void complete_address_handler(struct kref *kref) +{ + struct fw_address_handler *handler = container_of(kref, struct fw_address_handler, kref); + + complete(&handler->done); +} + +static void get_address_handler(struct fw_address_handler *handler) +{ + kref_get(&handler->kref); +} + +static int put_address_handler(struct fw_address_handler *handler) +{ + return kref_put(&handler->kref, complete_address_handler); +} + /** * fw_core_add_address_handler() - register for incoming requests * @handler: callback @@ -596,6 +613,8 @@ int fw_core_add_address_handler(struct fw_address_handler *handler, if (other != NULL) { handler->offset += other->length; } else { + init_completion(&handler->done); + kref_init(&handler->kref); list_add_tail_rcu(&handler->link, &address_handler_list); ret = 0; break; @@ -621,6 +640,9 @@ void fw_core_remove_address_handler(struct fw_address_handler *handler) list_del_rcu(&handler->link); synchronize_rcu(); + + if (!put_address_handler(handler)) + wait_for_completion(&handler->done); } EXPORT_SYMBOL(fw_core_remove_address_handler); @@ -914,22 +936,31 @@ static void handle_exclusive_region_request(struct fw_card *card, handler = lookup_enclosing_address_handler(&address_handler_list, offset, request->length); if (handler) - handler->address_callback(card, request, tcode, destination, source, - p->generation, offset, request->data, - request->length, handler->callback_data); + get_address_handler(handler); } - if (!handler) + if (!handler) { fw_send_response(card, request, RCODE_ADDRESS_ERROR); + return; + } + + // Outside the RCU read-side critical section. Without spinlock. With reference count. + handler->address_callback(card, request, tcode, destination, source, p->generation, offset, + request->data, request->length, handler->callback_data); + put_address_handler(handler); } +// To use kmalloc allocator efficiently, this should be power of two. +#define BUFFER_ON_KERNEL_STACK_SIZE 4 + static void handle_fcp_region_request(struct fw_card *card, struct fw_packet *p, struct fw_request *request, unsigned long long offset) { - struct fw_address_handler *handler; - int tcode, destination, source; + struct fw_address_handler *buffer_on_kernel_stack[BUFFER_ON_KERNEL_STACK_SIZE]; + struct fw_address_handler *handler, **handlers; + int tcode, destination, source, i, count, buffer_size; if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) && offset != (CSR_REGISTER_BASE | CSR_FCP_RESPONSE)) || @@ -950,15 +981,55 @@ static void handle_fcp_region_request(struct fw_card *card, return; } + count = 0; + handlers = buffer_on_kernel_stack; + buffer_size = ARRAY_SIZE(buffer_on_kernel_stack); scoped_guard(rcu) { list_for_each_entry_rcu(handler, &address_handler_list, link) { - if (is_enclosing_handler(handler, offset, request->length)) - handler->address_callback(card, request, tcode, destination, source, - p->generation, offset, request->data, - request->length, handler->callback_data); + if (is_enclosing_handler(handler, offset, request->length)) { + if (count >= buffer_size) { + int next_size = buffer_size * 2; + struct fw_address_handler **buffer_on_kernel_heap; + + if (handlers == buffer_on_kernel_stack) + buffer_on_kernel_heap = NULL; + else + buffer_on_kernel_heap = handlers; + + buffer_on_kernel_heap = + krealloc_array(buffer_on_kernel_heap, next_size, + sizeof(*buffer_on_kernel_heap), GFP_ATOMIC); + // FCP is used for purposes unrelated to significant system + // resources (e.g. storage or networking), so allocation + // failures are not considered so critical. + if (!buffer_on_kernel_heap) + break; + + if (handlers == buffer_on_kernel_stack) { + memcpy(buffer_on_kernel_heap, buffer_on_kernel_stack, + sizeof(buffer_on_kernel_stack)); + } + + handlers = buffer_on_kernel_heap; + buffer_size = next_size; + } + get_address_handler(handler); + handlers[count++] = handler; + } } } + for (i = 0; i < count; ++i) { + handler = handlers[i]; + handler->address_callback(card, request, tcode, destination, source, + p->generation, offset, request->data, + request->length, handler->callback_data); + put_address_handler(handler); + } + + if (handlers != buffer_on_kernel_stack) + kfree(handlers); + fw_send_response(card, request, RCODE_COMPLETE); } diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c index 0be0d569589d..b7629a0e4813 100644 --- a/drivers/fpga/zynq-fpga.c +++ b/drivers/fpga/zynq-fpga.c @@ -405,12 +405,12 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt) } } - priv->dma_nelms = - dma_map_sgtable(mgr->dev.parent, sgt, DMA_TO_DEVICE, 0); - if (priv->dma_nelms == 0) { + err = dma_map_sgtable(mgr->dev.parent, sgt, DMA_TO_DEVICE, 0); + if (err) { dev_err(&mgr->dev, "Unable to DMA map (TO_DEVICE)\n"); - return -ENOMEM; + return err; } + priv->dma_nelms = sgt->nents; /* enable clock */ err = clk_enable(priv->clk); diff --git a/drivers/gpio/gpio-mlxbf3.c b/drivers/gpio/gpio-mlxbf3.c index 9875e34bde72..ed29b07d16c1 100644 --- a/drivers/gpio/gpio-mlxbf3.c +++ b/drivers/gpio/gpio-mlxbf3.c @@ -190,9 +190,7 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev) struct mlxbf3_gpio_context *gs; struct gpio_irq_chip *girq; struct gpio_chip *gc; - char *colon_ptr; int ret, irq; - long num; gs = devm_kzalloc(dev, sizeof(*gs), GFP_KERNEL); if (!gs) @@ -229,39 +227,25 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev) gc->owner = THIS_MODULE; gc->add_pin_ranges = mlxbf3_gpio_add_pin_ranges; - colon_ptr = strchr(dev_name(dev), ':'); - if (!colon_ptr) { - dev_err(dev, "invalid device name format\n"); - return -EINVAL; - } - - ret = kstrtol(++colon_ptr, 16, &num); - if (ret) { - dev_err(dev, "invalid device instance\n"); - return ret; - } - - if (!num) { - irq = platform_get_irq(pdev, 0); - if (irq >= 0) { - girq = &gs->gc.irq; - gpio_irq_chip_set_chip(girq, &gpio_mlxbf3_irqchip); - girq->default_type = IRQ_TYPE_NONE; - /* This will let us handle the parent IRQ in the driver */ - girq->num_parents = 0; - girq->parents = NULL; - girq->parent_handler = NULL; - girq->handler = handle_bad_irq; - - /* - * Directly request the irq here instead of passing - * a flow-handler because the irq is shared. - */ - ret = devm_request_irq(dev, irq, mlxbf3_gpio_irq_handler, - IRQF_SHARED, dev_name(dev), gs); - if (ret) - return dev_err_probe(dev, ret, "failed to request IRQ"); - } + irq = platform_get_irq_optional(pdev, 0); + if (irq >= 0) { + girq = &gs->gc.irq; + gpio_irq_chip_set_chip(girq, &gpio_mlxbf3_irqchip); + girq->default_type = IRQ_TYPE_NONE; + /* This will let us handle the parent IRQ in the driver */ + girq->num_parents = 0; + girq->parents = NULL; + girq->parent_handler = NULL; + girq->handler = handle_bad_irq; + + /* + * Directly request the irq here instead of passing + * a flow-handler because the irq is shared. + */ + ret = devm_request_irq(dev, irq, mlxbf3_gpio_irq_handler, + IRQF_SHARED, dev_name(dev), gs); + if (ret) + return dev_err_probe(dev, ret, "failed to request IRQ"); } platform_set_drvdata(pdev, gs); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index a2adaacf6adb..d3f220be2ef9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1139,6 +1139,9 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) } } + if (!amdgpu_vm_ready(vm)) + return -EINVAL; + r = amdgpu_vm_clear_freed(adev, vm, NULL); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c index 02138aa55793..dfb6cfd83760 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c @@ -88,8 +88,8 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, } r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size, - AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | - AMDGPU_PTE_EXECUTABLE); + AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | + AMDGPU_VM_PAGE_EXECUTABLE); if (r) { DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index ff98c87b2e0b..5743ebb2f1b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -514,7 +514,7 @@ bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev, return false; if (drm_gem_is_imported(obj)) { - struct dma_buf *dma_buf = obj->dma_buf; + struct dma_buf *dma_buf = obj->import_attach->dmabuf; if (dma_buf->ops != &amdgpu_dmabuf_ops) /* No XGMI with non AMD GPUs */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 6626a6e64ff5..d1ccbfcf21fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -317,7 +317,8 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj, */ if (!vm->is_compute_context || !vm->process_info) return 0; - if (!drm_gem_is_imported(obj) || !dma_buf_is_dynamic(obj->dma_buf)) + if (!drm_gem_is_imported(obj) || + !dma_buf_is_dynamic(obj->import_attach->dmabuf)) return 0; mutex_lock_nested(&vm->process_info->lock, 1); if (!WARN_ON(!vm->process_info->eviction_fence)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 0bd51a04be79..23484317a5fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -1039,15 +1039,28 @@ int psp_update_fw_reservation(struct psp_context *psp) { int ret; uint64_t reserv_addr, reserv_addr_ext; - uint32_t reserv_size, reserv_size_ext; + uint32_t reserv_size, reserv_size_ext, mp0_ip_ver; struct amdgpu_device *adev = psp->adev; + mp0_ip_ver = amdgpu_ip_version(adev, MP0_HWIP, 0); + if (amdgpu_sriov_vf(psp->adev)) return 0; - if ((amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(14, 0, 2)) && - (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(14, 0, 3))) + switch (mp0_ip_ver) { + case IP_VERSION(14, 0, 2): + if (adev->psp.sos.fw_version < 0x3b0e0d) + return 0; + break; + + case IP_VERSION(14, 0, 3): + if (adev->psp.sos.fw_version < 0x3a0e14) + return 0; + break; + + default: return 0; + } ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_ADDR, &reserv_addr, &reserv_size); if (ret) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 5cacf5717016..c39bb06ebda1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -654,11 +654,10 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm, * Check if all VM PDs/PTs are ready for updates * * Returns: - * True if VM is not evicting. + * True if VM is not evicting and all VM entities are not stopped */ bool amdgpu_vm_ready(struct amdgpu_vm *vm) { - bool empty; bool ret; amdgpu_vm_eviction_lock(vm); @@ -666,10 +665,18 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm) amdgpu_vm_eviction_unlock(vm); spin_lock(&vm->status_lock); - empty = list_empty(&vm->evicted); + ret &= list_empty(&vm->evicted); spin_unlock(&vm->status_lock); - return ret && empty; + spin_lock(&vm->immediate.lock); + ret &= !vm->immediate.stopped; + spin_unlock(&vm->immediate.lock); + + spin_lock(&vm->delayed.lock); + ret &= !vm->delayed.stopped; + spin_unlock(&vm->delayed.lock); + + return ret; } /** @@ -1276,7 +1283,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, struct drm_gem_object *obj = &bo->tbo.base; if (drm_gem_is_imported(obj) && bo_va->is_xgmi) { - struct dma_buf *dma_buf = obj->dma_buf; + struct dma_buf *dma_buf = obj->import_attach->dmabuf; struct drm_gem_object *gobj = dma_buf->priv; struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 07c936e90d8e..78f9e86ccc09 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -648,9 +648,8 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, list_for_each_entry(block, &vres->blocks, link) vis_usage += amdgpu_vram_mgr_vis_size(adev, block); - amdgpu_vram_mgr_do_reserve(man); - drm_buddy_free_list(mm, &vres->blocks, vres->flags); + amdgpu_vram_mgr_do_reserve(man); mutex_unlock(&mgr->lock); atomic64_sub(vis_usage, &mgr->vis_usage); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index cd0e2976e268..a0ca3b2c6bd8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -7792,6 +7792,9 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn, struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn); int ret; + if (WARN_ON(unlikely(!old_con_state || !new_con_state))) + return -EINVAL; + trace_amdgpu_dm_connector_atomic_check(new_con_state); if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 010172f930ae..45feb404b097 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -299,6 +299,25 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable) irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id); if (enable) { + struct dc *dc = adev->dm.dc; + struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); + struct psr_settings *psr = &acrtc_state->stream->link->psr_settings; + struct replay_settings *pr = &acrtc_state->stream->link->replay_settings; + bool sr_supported = (psr->psr_version != DC_PSR_VERSION_UNSUPPORTED) || + pr->config.replay_supported; + + /* + * IPS & self-refresh feature can cause vblank counter resets between + * vblank disable and enable. + * It may cause system stuck due to waiting for the vblank counter. + * Call this function to estimate missed vblanks by using timestamps and + * update the vblank counter in DRM. + */ + if (dc->caps.ips_support && + dc->config.disable_ips != DMUB_IPS_DISABLE_ALL && + sr_supported && vblank->config.disable_immediate) + drm_crtc_vblank_restore(crtc); + /* vblank irq on -> Only need vupdate irq in vrr mode */ if (amdgpu_dm_crtc_vrr_active(acrtc_state)) rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true); diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index 67f08495b7e6..154fd2c18e88 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -174,11 +174,8 @@ static struct graphics_object_id bios_parser_get_connector_id( return object_id; } - if (tbl->ucNumberOfObjects <= i) { - dm_error("Can't find connector id %d in connector table of size %d.\n", - i, tbl->ucNumberOfObjects); + if (tbl->ucNumberOfObjects <= i) return object_id; - } id = le16_to_cpu(tbl->asObjects[i].usObjectID); object_id = object_id_from_bios_object_id(id); diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c index 2bcae0643e61..58e88778da7f 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c @@ -993,7 +993,7 @@ static enum bp_result set_pixel_clock_v3( allocation.sPCLKInput.usFbDiv = cpu_to_le16((uint16_t)bp_params->feedback_divider); allocation.sPCLKInput.ucFracFbDiv = - (uint8_t)bp_params->fractional_feedback_divider; + (uint8_t)(bp_params->fractional_feedback_divider / 100000); allocation.sPCLKInput.ucPostDiv = (uint8_t)bp_params->pixel_clock_post_divider; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c index f5ad0a177038..dbd6ef1b60a0 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c @@ -72,9 +72,9 @@ static const struct state_dependent_clocks dce80_max_clks_by_state[] = { /* ClocksStateLow */ { .display_clk_khz = 352000, .pixel_clk_khz = 330000}, /* ClocksStateNominal */ -{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 }, +{ .display_clk_khz = 625000, .pixel_clk_khz = 400000 }, /* ClocksStatePerformance */ -{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } }; +{ .display_clk_khz = 625000, .pixel_clk_khz = 400000 } }; int dentist_get_divider_from_did(int did) { @@ -391,8 +391,6 @@ static void dce_pplib_apply_display_requirements( { struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; - pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context); - dce110_fill_display_configs(context, pp_display_cfg); if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) @@ -405,11 +403,9 @@ static void dce_update_clocks(struct clk_mgr *clk_mgr_base, { struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct dm_pp_power_level_change_request level_change_req; - int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz; - - /*TODO: W/A for dal3 linux, investigate why this works */ - if (!clk_mgr_dce->dfs_bypass_active) - patched_disp_clk = patched_disp_clk * 115 / 100; + const int max_disp_clk = + clk_mgr_dce->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz; + int patched_disp_clk = MIN(max_disp_clk, context->bw_ctx.bw.dce.dispclk_khz); level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context); /* get max clock state from PPLIB */ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c index f8409453434c..13cf415e38e5 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c @@ -120,9 +120,15 @@ void dce110_fill_display_configs( const struct dc_state *context, struct dm_pp_display_configuration *pp_display_cfg) { + struct dc *dc = context->clk_mgr->ctx->dc; int j; int num_cfgs = 0; + pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context); + pp_display_cfg->disp_clk_khz = dc->clk_mgr->clks.dispclk_khz; + pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0; + pp_display_cfg->crtc_index = dc->res_pool->res_cap->num_timing_generator; + for (j = 0; j < context->stream_count; j++) { int k; @@ -164,6 +170,23 @@ void dce110_fill_display_configs( cfg->v_refresh /= stream->timing.h_total; cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2) / stream->timing.v_total; + + /* Find first CRTC index and calculate its line time. + * This is necessary for DPM on SI GPUs. + */ + if (cfg->pipe_idx < pp_display_cfg->crtc_index) { + const struct dc_crtc_timing *timing = + &context->streams[0]->timing; + + pp_display_cfg->crtc_index = cfg->pipe_idx; + pp_display_cfg->line_time_in_us = + timing->h_total * 10000 / timing->pix_clk_100hz; + } + } + + if (!num_cfgs) { + pp_display_cfg->crtc_index = 0; + pp_display_cfg->line_time_in_us = 0; } pp_display_cfg->display_count = num_cfgs; @@ -223,25 +246,8 @@ void dce11_pplib_apply_display_requirements( pp_display_cfg->min_engine_clock_deep_sleep_khz = context->bw_ctx.bw.dce.sclk_deep_sleep_khz; - pp_display_cfg->avail_mclk_switch_time_us = - dce110_get_min_vblank_time_us(context); - /* TODO: dce11.2*/ - pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0; - - pp_display_cfg->disp_clk_khz = dc->clk_mgr->clks.dispclk_khz; - dce110_fill_display_configs(context, pp_display_cfg); - /* TODO: is this still applicable?*/ - if (pp_display_cfg->display_count == 1) { - const struct dc_crtc_timing *timing = - &context->streams[0]->timing; - - pp_display_cfg->crtc_index = - pp_display_cfg->disp_configs[0].pipe_idx; - pp_display_cfg->line_time_in_us = timing->h_total * 10000 / timing->pix_clk_100hz; - } - if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg); } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c index 0267644717b2..a39641a0ff09 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c @@ -83,22 +83,13 @@ static const struct state_dependent_clocks dce60_max_clks_by_state[] = { static int dce60_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); - int dprefclk_wdivider; - int dp_ref_clk_khz; - int target_div; + struct dc_context *ctx = clk_mgr_base->ctx; + int dp_ref_clk_khz = 0; - /* DCE6 has no DPREFCLK_CNTL to read DP Reference Clock source */ - - /* Read the mmDENTIST_DISPCLK_CNTL to get the currently - * programmed DID DENTIST_DPREFCLK_WDIVIDER*/ - REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider); - - /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/ - target_div = dentist_get_divider_from_did(dprefclk_wdivider); - - /* Calculate the current DFS clock, in kHz.*/ - dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR - * clk_mgr->base.dentist_vco_freq_khz) / target_div; + if (ASIC_REV_IS_TAHITI_P(ctx->asic_id.hw_internal_rev)) + dp_ref_clk_khz = ctx->dc_bios->fw_info.default_display_engine_pll_frequency; + else + dp_ref_clk_khz = clk_mgr_base->clks.dispclk_khz; return dce_adjust_dp_ref_freq_for_ss(clk_mgr, dp_ref_clk_khz); } @@ -109,8 +100,6 @@ static void dce60_pplib_apply_display_requirements( { struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; - pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context); - dce110_fill_display_configs(context, pp_display_cfg); if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) @@ -123,11 +112,9 @@ static void dce60_update_clocks(struct clk_mgr *clk_mgr_base, { struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct dm_pp_power_level_change_request level_change_req; - int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz; - - /*TODO: W/A for dal3 linux, investigate why this works */ - if (!clk_mgr_dce->dfs_bypass_active) - patched_disp_clk = patched_disp_clk * 115 / 100; + const int max_disp_clk = + clk_mgr_dce->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz; + int patched_disp_clk = MIN(max_disp_clk, context->bw_ctx.bw.dce.dispclk_khz); level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context); /* get max clock state from PPLIB */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 9ab0ee20ca6f..dcc48b5238e5 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -217,11 +217,24 @@ static bool create_links( connectors_num, num_virtual_links); - // condition loop on link_count to allow skipping invalid indices + /* When getting the number of connectors, the VBIOS reports the number of valid indices, + * but it doesn't say which indices are valid, and not every index has an actual connector. + * So, if we don't find a connector on an index, that is not an error. + * + * - There is no guarantee that the first N indices will be valid + * - VBIOS may report a higher amount of valid indices than there are actual connectors + * - Some VBIOS have valid configurations for more connectors than there actually are + * on the card. This may be because the manufacturer used the same VBIOS for different + * variants of the same card. + */ for (i = 0; dc->link_count < connectors_num && i < MAX_LINKS; i++) { + struct graphics_object_id connector_id = bios->funcs->get_connector_id(bios, i); struct link_init_data link_init_params = {0}; struct dc_link *link; + if (connector_id.id == CONNECTOR_ID_UNKNOWN) + continue; + DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count); link_init_params.ctx = dc->ctx; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c index 4a9d07c31bc5..0c50fe266c8a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c @@ -896,13 +896,13 @@ void dce110_link_encoder_construct( enc110->base.id, &bp_cap_info); /* Override features with DCE-specific values */ - if (BP_RESULT_OK == result) { + if (result == BP_RESULT_OK) { enc110->base.features.flags.bits.IS_HBR2_CAPABLE = bp_cap_info.DP_HBR2_EN; enc110->base.features.flags.bits.IS_HBR3_CAPABLE = bp_cap_info.DP_HBR3_EN; enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; - } else { + } else if (result != BP_RESULT_NORECORD) { DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n", __func__, result); @@ -1798,13 +1798,13 @@ void dce60_link_encoder_construct( enc110->base.id, &bp_cap_info); /* Override features with DCE-specific values */ - if (BP_RESULT_OK == result) { + if (result == BP_RESULT_OK) { enc110->base.features.flags.bits.IS_HBR2_CAPABLE = bp_cap_info.DP_HBR2_EN; enc110->base.features.flags.bits.IS_HBR3_CAPABLE = bp_cap_info.DP_HBR3_EN; enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; - } else { + } else if (result != BP_RESULT_NORECORD) { DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n", __func__, result); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c index e7a318e26d38..fcd3d86ad517 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c @@ -4,7 +4,6 @@ #include "dc.h" #include "dc_dmub_srv.h" -#include "dc_dp_types.h" #include "dmub/dmub_srv.h" #include "core_types.h" #include "dmub_replay.h" @@ -44,45 +43,21 @@ static void dmub_replay_get_state(struct dmub_replay *dmub, enum replay_state *s /* * Enable/Disable Replay. */ -static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait, uint8_t panel_inst, - struct dc_link *link) +static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait, uint8_t panel_inst) { union dmub_rb_cmd cmd; struct dc_context *dc = dmub->ctx; uint32_t retry_count; enum replay_state state = REPLAY_STATE_0; - struct pipe_ctx *pipe_ctx = NULL; - struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx; - uint8_t i; memset(&cmd, 0, sizeof(cmd)); cmd.replay_enable.header.type = DMUB_CMD__REPLAY; cmd.replay_enable.data.panel_inst = panel_inst; cmd.replay_enable.header.sub_type = DMUB_CMD__REPLAY_ENABLE; - if (enable) { + if (enable) cmd.replay_enable.data.enable = REPLAY_ENABLE; - // hpo stream/link encoder assignments are not static, need to update everytime we try to enable replay - if (link->cur_link_settings.link_rate >= LINK_RATE_UHBR10) { - for (i = 0; i < MAX_PIPES; i++) { - if (res_ctx && - res_ctx->pipe_ctx[i].stream && - res_ctx->pipe_ctx[i].stream->link && - res_ctx->pipe_ctx[i].stream->link == link && - res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) { - pipe_ctx = &res_ctx->pipe_ctx[i]; - //TODO: refactor for multi edp support - break; - } - } - - if (!pipe_ctx) - return; - - cmd.replay_enable.data.hpo_stream_enc_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst; - cmd.replay_enable.data.hpo_link_enc_inst = pipe_ctx->link_res.hpo_dp_link_enc->inst; - } - } else + else cmd.replay_enable.data.enable = REPLAY_DISABLE; cmd.replay_enable.header.payload_bytes = sizeof(struct dmub_rb_cmd_replay_enable_data); @@ -174,17 +149,6 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub, copy_settings_data->digbe_inst = replay_context->digbe_inst; copy_settings_data->digfe_inst = replay_context->digfe_inst; - if (link->cur_link_settings.link_rate >= LINK_RATE_UHBR10) { - if (pipe_ctx->stream_res.hpo_dp_stream_enc) - copy_settings_data->hpo_stream_enc_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst; - else - copy_settings_data->hpo_stream_enc_inst = 0; - if (pipe_ctx->link_res.hpo_dp_link_enc) - copy_settings_data->hpo_link_enc_inst = pipe_ctx->link_res.hpo_dp_link_enc->inst; - else - copy_settings_data->hpo_link_enc_inst = 0; - } - if (pipe_ctx->plane_res.dpp) copy_settings_data->dpp_inst = pipe_ctx->plane_res.dpp->inst; else @@ -247,7 +211,6 @@ static void dmub_replay_set_coasting_vtotal(struct dmub_replay *dmub, pCmd->header.type = DMUB_CMD__REPLAY; pCmd->header.sub_type = DMUB_CMD__REPLAY_SET_COASTING_VTOTAL; pCmd->header.payload_bytes = sizeof(struct dmub_cmd_replay_set_coasting_vtotal_data); - pCmd->replay_set_coasting_vtotal_data.panel_inst = panel_inst; pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF); pCmd->replay_set_coasting_vtotal_data.coasting_vtotal_high = (coasting_vtotal & 0xFFFF0000) >> 16; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h index ccbe385e132c..e6346c0ffc0e 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h @@ -19,7 +19,7 @@ struct dmub_replay_funcs { void (*replay_get_state)(struct dmub_replay *dmub, enum replay_state *state, uint8_t panel_inst); void (*replay_enable)(struct dmub_replay *dmub, bool enable, bool wait, - uint8_t panel_inst, struct dc_link *link); + uint8_t panel_inst); bool (*replay_copy_settings)(struct dmub_replay *dmub, struct dc_link *link, struct replay_context *replay_context, uint8_t panel_inst); void (*replay_set_power_opt)(struct dmub_replay *dmub, unsigned int power_opt, diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c index e7927b8f5ba3..98ec9b5a559c 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -944,7 +944,7 @@ bool edp_set_replay_allow_active(struct dc_link *link, const bool *allow_active, // TODO: Handle mux change case if force_static is set // If force_static is set, just change the replay_allow_active state directly if (replay != NULL && link->replay_settings.replay_feature_enabled) - replay->funcs->replay_enable(replay, *allow_active, wait, panel_inst, link); + replay->funcs->replay_enable(replay, *allow_active, wait, panel_inst); link->replay_settings.replay_allow_active = *allow_active; } diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index c587b3441e07..6a69a788abe8 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -4048,14 +4048,6 @@ struct dmub_cmd_replay_copy_settings_data { */ uint8_t digbe_inst; /** - * @hpo_stream_enc_inst: HPO stream encoder instance - */ - uint8_t hpo_stream_enc_inst; - /** - * @hpo_link_enc_inst: HPO link encoder instance - */ - uint8_t hpo_link_enc_inst; - /** * AUX HW instance. */ uint8_t aux_inst; @@ -4159,18 +4151,6 @@ struct dmub_rb_cmd_replay_enable_data { * This does not support HDMI/DP2 for now. */ uint8_t phy_rate; - /** - * @hpo_stream_enc_inst: HPO stream encoder instance - */ - uint8_t hpo_stream_enc_inst; - /** - * @hpo_link_enc_inst: HPO link encoder instance - */ - uint8_t hpo_link_enc_inst; - /** - * @pad: Align structure to 4 byte boundary. - */ - uint8_t pad[2]; }; /** diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c index e58e7b93810b..6b7db8ec9a53 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c @@ -260,6 +260,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp) return MOD_HDCP_STATUS_FAILURE; } + if (!display) + return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND; + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf; mutex_lock(&psp->hdcp_context.mutex); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c index 3aea32baea3d..f32474af90b3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c @@ -1697,9 +1697,11 @@ static int smu_v14_0_2_get_power_limit(struct smu_context *smu, uint32_t *min_power_limit) { struct smu_table_context *table_context = &smu->smu_table; + struct smu_14_0_2_powerplay_table *powerplay_table = + table_context->power_play_table; PPTable_t *pptable = table_context->driver_pptable; CustomSkuTable_t *skutable = &pptable->CustomSkuTable; - uint32_t power_limit; + uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0; uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC]; if (smu_v14_0_get_current_power_limit(smu, &power_limit)) @@ -1712,11 +1714,29 @@ static int smu_v14_0_2_get_power_limit(struct smu_context *smu, if (default_power_limit) *default_power_limit = power_limit; - if (max_power_limit) - *max_power_limit = msg_limit; + if (powerplay_table) { + if (smu->od_enabled && + smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) { + od_percent_upper = pptable->SkuTable.OverDriveLimitsBasicMax.Ppt; + od_percent_lower = pptable->SkuTable.OverDriveLimitsBasicMin.Ppt; + } else if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) { + od_percent_upper = 0; + od_percent_lower = pptable->SkuTable.OverDriveLimitsBasicMin.Ppt; + } + } + + dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n", + od_percent_upper, od_percent_lower, power_limit); + + if (max_power_limit) { + *max_power_limit = msg_limit * (100 + od_percent_upper); + *max_power_limit /= 100; + } - if (min_power_limit) - *min_power_limit = 0; + if (min_power_limit) { + *min_power_limit = power_limit * (100 + od_percent_lower); + *min_power_limit /= 100; + } return 0; } diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index ed35e567d117..efe534977d12 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -1474,8 +1474,8 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data) dp = devm_drm_bridge_alloc(dev, struct analogix_dp_device, bridge, &analogix_dp_bridge_funcs); - if (!dp) - return ERR_PTR(-ENOMEM); + if (IS_ERR(dp)) + return ERR_CAST(dp); dp->dev = &pdev->dev; dp->dpms_mode = DRM_MODE_DPMS_OFF; diff --git a/drivers/gpu/drm/bridge/aux-bridge.c b/drivers/gpu/drm/bridge/aux-bridge.c index b63304d3a80f..b3e4cdff61d6 100644 --- a/drivers/gpu/drm/bridge/aux-bridge.c +++ b/drivers/gpu/drm/bridge/aux-bridge.c @@ -18,6 +18,7 @@ static void drm_aux_bridge_release(struct device *dev) { struct auxiliary_device *adev = to_auxiliary_dev(dev); + of_node_put(dev->of_node); ida_free(&drm_aux_bridge_ida, adev->id); kfree(adev); @@ -65,6 +66,7 @@ int drm_aux_bridge_register(struct device *parent) ret = auxiliary_device_init(adev); if (ret) { + of_node_put(adev->dev.of_node); ida_free(&drm_aux_bridge_ida, adev->id); kfree(adev); return ret; diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index dd45d9b504d8..4bde00083047 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -1227,6 +1227,7 @@ EXPORT_SYMBOL(drm_atomic_bridge_chain_check); /** * drm_bridge_detect - check if anything is attached to the bridge output * @bridge: bridge control structure + * @connector: attached connector * * If the bridge supports output detection, as reported by the * DRM_BRIDGE_OP_DETECT bridge ops flag, call &drm_bridge_funcs.detect for the diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c index bbc7fecb6f4a..74d949995a72 100644 --- a/drivers/gpu/drm/drm_gpuvm.c +++ b/drivers/gpu/drm/drm_gpuvm.c @@ -2432,6 +2432,8 @@ static const struct drm_gpuvm_ops lock_ops = { * * The expected usage is: * + * .. code-block:: c + * * vm_bind { * struct drm_exec exec; * diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs index 09a9b452e8b7..50c286c5cee8 100644 --- a/drivers/gpu/drm/drm_panic_qr.rs +++ b/drivers/gpu/drm/drm_panic_qr.rs @@ -381,6 +381,26 @@ struct DecFifo { len: usize, } +// On arm32 architecture, dividing an `u64` by a constant will generate a call +// to `__aeabi_uldivmod` which is not present in the kernel. +// So use the multiply by inverse method for this architecture. +fn div10(val: u64) -> u64 { + if cfg!(target_arch = "arm") { + let val_h = val >> 32; + let val_l = val & 0xFFFFFFFF; + let b_h: u64 = 0x66666666; + let b_l: u64 = 0x66666667; + + let tmp1 = val_h * b_l + ((val_l * b_l) >> 32); + let tmp2 = val_l * b_h + (tmp1 & 0xffffffff); + let tmp3 = val_h * b_h + (tmp1 >> 32) + (tmp2 >> 32); + + tmp3 >> 2 + } else { + val / 10 + } +} + impl DecFifo { fn push(&mut self, data: u64, len: usize) { let mut chunk = data; @@ -389,7 +409,7 @@ impl DecFifo { } for i in 0..len { self.decimals[i] = (chunk % 10) as u8; - chunk /= 10; + chunk = div10(chunk); } self.len += len; } diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c index 74f7832ea53e..0726cb5b736e 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c +++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c @@ -325,6 +325,17 @@ static int hibmc_dp_link_downgrade_training_eq(struct hibmc_dp_dev *dp) return hibmc_dp_link_reduce_rate(dp); } +static void hibmc_dp_update_caps(struct hibmc_dp_dev *dp) +{ + dp->link.cap.link_rate = dp->dpcd[DP_MAX_LINK_RATE]; + if (dp->link.cap.link_rate > DP_LINK_BW_8_1 || !dp->link.cap.link_rate) + dp->link.cap.link_rate = DP_LINK_BW_8_1; + + dp->link.cap.lanes = dp->dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; + if (dp->link.cap.lanes > HIBMC_DP_LANE_NUM_MAX) + dp->link.cap.lanes = HIBMC_DP_LANE_NUM_MAX; +} + int hibmc_dp_link_training(struct hibmc_dp_dev *dp) { struct hibmc_dp_link *link = &dp->link; @@ -334,8 +345,7 @@ int hibmc_dp_link_training(struct hibmc_dp_dev *dp) if (ret) drm_err(dp->dev, "dp aux read dpcd failed, ret: %d\n", ret); - dp->link.cap.link_rate = dp->dpcd[DP_MAX_LINK_RATE]; - dp->link.cap.lanes = 0x2; + hibmc_dp_update_caps(dp); ret = hibmc_dp_get_serdes_rate_cfg(dp); if (ret < 0) diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c index 768b97f9e74a..289304500ab0 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c @@ -32,7 +32,7 @@ DEFINE_DRM_GEM_FOPS(hibmc_fops); -static const char *g_irqs_names_map[HIBMC_MAX_VECTORS] = { "vblank", "hpd" }; +static const char *g_irqs_names_map[HIBMC_MAX_VECTORS] = { "hibmc-vblank", "hibmc-hpd" }; static irqreturn_t hibmc_interrupt(int irq, void *arg) { @@ -115,6 +115,8 @@ static const struct drm_mode_config_funcs hibmc_mode_funcs = { static int hibmc_kms_init(struct hibmc_drm_private *priv) { struct drm_device *dev = &priv->dev; + struct drm_encoder *encoder; + u32 clone_mask = 0; int ret; ret = drmm_mode_config_init(dev); @@ -154,6 +156,12 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv) return ret; } + drm_for_each_encoder(encoder, dev) + clone_mask |= drm_encoder_mask(encoder); + + drm_for_each_encoder(encoder, dev) + encoder->possible_clones = clone_mask; + return 0; } @@ -277,7 +285,6 @@ static void hibmc_unload(struct drm_device *dev) static int hibmc_msi_init(struct drm_device *dev) { struct pci_dev *pdev = to_pci_dev(dev->dev); - char name[32] = {0}; int valid_irq_num; int irq; int ret; @@ -292,9 +299,6 @@ static int hibmc_msi_init(struct drm_device *dev) valid_irq_num = ret; for (int i = 0; i < valid_irq_num; i++) { - snprintf(name, ARRAY_SIZE(name) - 1, "%s-%s-%s", - dev->driver->name, pci_name(pdev), g_irqs_names_map[i]); - irq = pci_irq_vector(pdev, i); if (i) @@ -302,10 +306,10 @@ static int hibmc_msi_init(struct drm_device *dev) ret = devm_request_threaded_irq(&pdev->dev, irq, hibmc_dp_interrupt, hibmc_dp_hpd_isr, - IRQF_SHARED, name, dev); + IRQF_SHARED, g_irqs_names_map[i], dev); else ret = devm_request_irq(&pdev->dev, irq, hibmc_interrupt, - IRQF_SHARED, name, dev); + IRQF_SHARED, g_irqs_names_map[i], dev); if (ret) { drm_err(dev, "install irq failed: %d\n", ret); return ret; @@ -323,13 +327,13 @@ static int hibmc_load(struct drm_device *dev) ret = hibmc_hw_init(priv); if (ret) - goto err; + return ret; ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); if (ret) { drm_err(dev, "Error initializing VRAM MM; %d\n", ret); - goto err; + return ret; } ret = hibmc_kms_init(priv); diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h index 274feabe7df0..ca8502e2760c 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h @@ -69,6 +69,7 @@ int hibmc_de_init(struct hibmc_drm_private *priv); int hibmc_vdac_init(struct hibmc_drm_private *priv); int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *connector); +void hibmc_ddc_del(struct hibmc_vdac *vdac); int hibmc_dp_init(struct hibmc_drm_private *priv); diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c index 99b3b77b5445..44860011855e 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c @@ -95,3 +95,8 @@ int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *vdac) return i2c_bit_add_bus(&vdac->adapter); } + +void hibmc_ddc_del(struct hibmc_vdac *vdac) +{ + i2c_del_adapter(&vdac->adapter); +} diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c index e8a527ede854..841e81f47b68 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c @@ -53,7 +53,7 @@ static void hibmc_connector_destroy(struct drm_connector *connector) { struct hibmc_vdac *vdac = to_hibmc_vdac(connector); - i2c_del_adapter(&vdac->adapter); + hibmc_ddc_del(vdac); drm_connector_cleanup(connector); } @@ -110,7 +110,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv) ret = drmm_encoder_init(dev, encoder, NULL, DRM_MODE_ENCODER_DAC, NULL); if (ret) { drm_err(dev, "failed to init encoder: %d\n", ret); - return ret; + goto err; } drm_encoder_helper_add(encoder, &hibmc_encoder_helper_funcs); @@ -121,7 +121,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv) &vdac->adapter); if (ret) { drm_err(dev, "failed to init connector: %d\n", ret); - return ret; + goto err; } drm_connector_helper_add(connector, &hibmc_connector_helper_funcs); @@ -131,4 +131,9 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv) connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; return 0; + +err: + hibmc_ddc_del(vdac); + + return ret; } diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c index fb25ec8adae3..68157f177b6a 100644 --- a/drivers/gpu/drm/i915/display/intel_display_irq.c +++ b/drivers/gpu/drm/i915/display/intel_display_irq.c @@ -1506,10 +1506,14 @@ u32 gen11_gu_misc_irq_ack(struct intel_display *display, const u32 master_ctl) if (!(master_ctl & GEN11_GU_MISC_IRQ)) return 0; + intel_display_rpm_assert_block(display); + iir = intel_de_read(display, GEN11_GU_MISC_IIR); if (likely(iir)) intel_de_write(display, GEN11_GU_MISC_IIR, iir); + intel_display_rpm_assert_unblock(display); + return iir; } diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 6e26cb4c5724..685ac98bd001 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -552,10 +552,6 @@ static void ilk_fbc_deactivate(struct intel_fbc *fbc) if (dpfc_ctl & DPFC_CTL_EN) { dpfc_ctl &= ~DPFC_CTL_EN; intel_de_write(display, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl); - - /* wa_18038517565 Enable DPFC clock gating after FBC disable */ - if (display->platform.dg2 || DISPLAY_VER(display) >= 14) - fbc_compressor_clkgate_disable_wa(fbc, false); } } @@ -1710,6 +1706,10 @@ static void __intel_fbc_disable(struct intel_fbc *fbc) __intel_fbc_cleanup_cfb(fbc); + /* wa_18038517565 Enable DPFC clock gating after FBC disable */ + if (display->platform.dg2 || DISPLAY_VER(display) >= 14) + fbc_compressor_clkgate_disable_wa(fbc, false); + fbc->state.plane = NULL; fbc->flip_pending = false; fbc->busy_bits = 0; diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index ae9053919211..41988e193a41 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -3275,7 +3275,9 @@ static void intel_psr_configure_full_frame_update(struct intel_dp *intel_dp) static void _psr_invalidate_handle(struct intel_dp *intel_dp) { - if (intel_dp->psr.psr2_sel_fetch_enabled) { + struct intel_display *display = to_intel_display(intel_dp); + + if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) { if (!intel_dp->psr.psr2_sel_fetch_cff_enabled) { intel_dp->psr.psr2_sel_fetch_cff_enabled = true; intel_psr_configure_full_frame_update(intel_dp); @@ -3361,7 +3363,7 @@ static void _psr_flush_handle(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); - if (intel_dp->psr.psr2_sel_fetch_enabled) { + if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) { if (intel_dp->psr.psr2_sel_fetch_cff_enabled) { /* can we turn CFF off? */ if (intel_dp->psr.busy_frontbuffer_bits == 0) @@ -3378,11 +3380,13 @@ static void _psr_flush_handle(struct intel_dp *intel_dp) * existing SU configuration */ intel_psr_configure_full_frame_update(intel_dp); - } - intel_psr_force_update(intel_dp); + intel_psr_force_update(intel_dp); + } else { + intel_psr_exit(intel_dp); + } - if (!intel_dp->psr.psr2_sel_fetch_enabled && !intel_dp->psr.active && + if ((!intel_dp->psr.psr2_sel_fetch_enabled || DISPLAY_VER(display) >= 20) && !intel_dp->psr.busy_frontbuffer_bits) queue_work(display->wq.unordered, &intel_dp->psr.work); } diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 3bc57579fe53..668ef139391b 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -23,6 +23,7 @@ #include "intel_modeset_lock.h" #include "intel_tc.h" +#define DP_PIN_ASSIGNMENT_NONE 0x0 #define DP_PIN_ASSIGNMENT_C 0x3 #define DP_PIN_ASSIGNMENT_D 0x4 #define DP_PIN_ASSIGNMENT_E 0x5 @@ -66,6 +67,7 @@ struct intel_tc_port { enum tc_port_mode init_mode; enum phy_fia phy_fia; u8 phy_fia_idx; + u8 max_lane_count; }; static enum intel_display_power_domain @@ -307,6 +309,8 @@ static int lnl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port) REG_FIELD_GET(TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK, val); switch (pin_assignment) { + case DP_PIN_ASSIGNMENT_NONE: + return 0; default: MISSING_CASE(pin_assignment); fallthrough; @@ -365,12 +369,12 @@ static int intel_tc_port_get_max_lane_count(struct intel_digital_port *dig_port) } } -int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port) +static int get_max_lane_count(struct intel_tc_port *tc) { - struct intel_display *display = to_intel_display(dig_port); - struct intel_tc_port *tc = to_tc_port(dig_port); + struct intel_display *display = to_intel_display(tc->dig_port); + struct intel_digital_port *dig_port = tc->dig_port; - if (!intel_encoder_is_tc(&dig_port->base) || tc->mode != TC_PORT_DP_ALT) + if (tc->mode != TC_PORT_DP_ALT) return 4; assert_tc_cold_blocked(tc); @@ -384,6 +388,25 @@ int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port) return intel_tc_port_get_max_lane_count(dig_port); } +static void read_pin_configuration(struct intel_tc_port *tc) +{ + tc->max_lane_count = get_max_lane_count(tc); +} + +int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port) +{ + struct intel_display *display = to_intel_display(dig_port); + struct intel_tc_port *tc = to_tc_port(dig_port); + + if (!intel_encoder_is_tc(&dig_port->base)) + return 4; + + if (DISPLAY_VER(display) < 20) + return get_max_lane_count(tc); + + return tc->max_lane_count; +} + void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port, int required_lanes) { @@ -596,9 +619,12 @@ static void icl_tc_phy_get_hw_state(struct intel_tc_port *tc) tc_cold_wref = __tc_cold_block(tc, &domain); tc->mode = tc_phy_get_current_mode(tc); - if (tc->mode != TC_PORT_DISCONNECTED) + if (tc->mode != TC_PORT_DISCONNECTED) { tc->lock_wakeref = tc_cold_block(tc); + read_pin_configuration(tc); + } + __tc_cold_unblock(tc, domain, tc_cold_wref); } @@ -656,8 +682,11 @@ static bool icl_tc_phy_connect(struct intel_tc_port *tc, tc->lock_wakeref = tc_cold_block(tc); - if (tc->mode == TC_PORT_TBT_ALT) + if (tc->mode == TC_PORT_TBT_ALT) { + read_pin_configuration(tc); + return true; + } if ((!tc_phy_is_ready(tc) || !icl_tc_phy_take_ownership(tc, true)) && @@ -668,6 +697,7 @@ static bool icl_tc_phy_connect(struct intel_tc_port *tc, goto out_unblock_tc_cold; } + read_pin_configuration(tc); if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes)) goto out_release_phy; @@ -858,9 +888,12 @@ static void adlp_tc_phy_get_hw_state(struct intel_tc_port *tc) port_wakeref = intel_display_power_get(display, port_power_domain); tc->mode = tc_phy_get_current_mode(tc); - if (tc->mode != TC_PORT_DISCONNECTED) + if (tc->mode != TC_PORT_DISCONNECTED) { tc->lock_wakeref = tc_cold_block(tc); + read_pin_configuration(tc); + } + intel_display_power_put(display, port_power_domain, port_wakeref); } @@ -873,6 +906,9 @@ static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes) if (tc->mode == TC_PORT_TBT_ALT) { tc->lock_wakeref = tc_cold_block(tc); + + read_pin_configuration(tc); + return true; } @@ -894,6 +930,8 @@ static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes) tc->lock_wakeref = tc_cold_block(tc); + read_pin_configuration(tc); + if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes)) goto out_unblock_tc_cold; @@ -1124,9 +1162,18 @@ static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc) tc_cold_wref = __tc_cold_block(tc, &domain); tc->mode = tc_phy_get_current_mode(tc); - if (tc->mode != TC_PORT_DISCONNECTED) + if (tc->mode != TC_PORT_DISCONNECTED) { tc->lock_wakeref = tc_cold_block(tc); + read_pin_configuration(tc); + /* + * Set a valid lane count value for a DP-alt sink which got + * disconnected. The driver can only disable the output on this PHY. + */ + if (tc->max_lane_count == 0) + tc->max_lane_count = 4; + } + drm_WARN_ON(display->drm, (tc->mode == TC_PORT_DP_ALT || tc->mode == TC_PORT_LEGACY) && !xelpdp_tc_phy_tcss_power_is_enabled(tc)); @@ -1138,14 +1185,19 @@ static bool xelpdp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes) { tc->lock_wakeref = tc_cold_block(tc); - if (tc->mode == TC_PORT_TBT_ALT) + if (tc->mode == TC_PORT_TBT_ALT) { + read_pin_configuration(tc); + return true; + } if (!xelpdp_tc_phy_enable_tcss_power(tc, true)) goto out_unblock_tccold; xelpdp_tc_phy_take_ownership(tc, true); + read_pin_configuration(tc); + if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes)) goto out_release_phy; @@ -1226,14 +1278,19 @@ static void tc_phy_get_hw_state(struct intel_tc_port *tc) tc->phy_ops->get_hw_state(tc); } -static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc, - bool phy_is_ready, bool phy_is_owned) +/* Is the PHY owned by display i.e. is it in legacy or DP-alt mode? */ +static bool tc_phy_owned_by_display(struct intel_tc_port *tc, + bool phy_is_ready, bool phy_is_owned) { struct intel_display *display = to_intel_display(tc->dig_port); - drm_WARN_ON(display->drm, phy_is_owned && !phy_is_ready); + if (DISPLAY_VER(display) < 20) { + drm_WARN_ON(display->drm, phy_is_owned && !phy_is_ready); - return phy_is_ready && phy_is_owned; + return phy_is_ready && phy_is_owned; + } else { + return phy_is_owned; + } } static bool tc_phy_is_connected(struct intel_tc_port *tc, @@ -1244,7 +1301,7 @@ static bool tc_phy_is_connected(struct intel_tc_port *tc, bool phy_is_owned = tc_phy_is_owned(tc); bool is_connected; - if (tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) + if (tc_phy_owned_by_display(tc, phy_is_ready, phy_is_owned)) is_connected = port_pll_type == ICL_PORT_DPLL_MG_PHY; else is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT; @@ -1352,7 +1409,7 @@ tc_phy_get_current_mode(struct intel_tc_port *tc) phy_is_ready = tc_phy_is_ready(tc); phy_is_owned = tc_phy_is_owned(tc); - if (!tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) { + if (!tc_phy_owned_by_display(tc, phy_is_ready, phy_is_owned)) { mode = get_tc_mode_in_phy_not_owned_state(tc, live_mode); } else { drm_WARN_ON(display->drm, live_mode == TC_PORT_TBT_ALT); @@ -1441,11 +1498,11 @@ static void intel_tc_port_reset_mode(struct intel_tc_port *tc, intel_display_power_flush_work(display); if (!intel_tc_cold_requires_aux_pw(dig_port)) { enum intel_display_power_domain aux_domain; - bool aux_powered; aux_domain = intel_aux_power_domain(dig_port); - aux_powered = intel_display_power_is_enabled(display, aux_domain); - drm_WARN_ON(display->drm, aux_powered); + if (intel_display_power_is_enabled(display, aux_domain)) + drm_dbg_kms(display->drm, "Port %s: AUX unexpectedly powered\n", + tc->port_name); } tc_phy_disconnect(tc); diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index b37e400f74e5..5a95f06900b5 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -634,6 +634,8 @@ static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine, static void icl_ctx_workarounds_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) { + struct drm_i915_private *i915 = engine->i915; + /* Wa_1406697149 (WaDisableBankHangMode:icl) */ wa_write(wal, GEN8_L3CNTLREG, GEN8_ERRDETBCTRL); @@ -669,6 +671,15 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine, /* Wa_1406306137:icl,ehl */ wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU); + + if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) { + /* + * Disable Repacking for Compression (masked R/W access) + * before rendering compressed surfaces for display. + */ + wa_masked_en(wal, CACHE_MODE_0_GEN7, + DISABLE_REPACKING_FOR_COMPRESSION); + } } /* @@ -2306,15 +2317,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) GEN8_RC_SEMA_IDLE_MSG_DISABLE); } - if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) { - /* - * "Disable Repacking for Compression (masked R/W access) - * before rendering compressed surfaces for display." - */ - wa_masked_en(wal, CACHE_MODE_0_GEN7, - DISABLE_REPACKING_FOR_COMPRESSION); - } - if (GRAPHICS_VER(i915) == 11) { /* This is not an Wa. Enable for better image quality */ wa_masked_en(wal, diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index e1e542126310..805d0a87aa54 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -253,6 +253,7 @@ nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo, int nouveau_framebuffer_new(struct drm_device *dev, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *gem, struct drm_framebuffer **pfb) @@ -260,7 +261,6 @@ nouveau_framebuffer_new(struct drm_device *dev, struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct drm_framebuffer *fb; - const struct drm_format_info *info; unsigned int height, i; uint32_t tile_mode; uint8_t kind; @@ -295,9 +295,6 @@ nouveau_framebuffer_new(struct drm_device *dev, kind = nvbo->kind; } - info = drm_get_format_info(dev, mode_cmd->pixel_format, - mode_cmd->modifier[0]); - for (i = 0; i < info->num_planes; i++) { height = drm_format_info_plane_height(info, mode_cmd->height, @@ -321,7 +318,7 @@ nouveau_framebuffer_new(struct drm_device *dev, if (!(fb = *pfb = kzalloc(sizeof(*fb), GFP_KERNEL))) return -ENOMEM; - drm_helper_mode_fill_fb_struct(dev, fb, NULL, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd); fb->obj[0] = gem; ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs); @@ -344,7 +341,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev, if (!gem) return ERR_PTR(-ENOENT); - ret = nouveau_framebuffer_new(dev, mode_cmd, gem, &fb); + ret = nouveau_framebuffer_new(dev, info, mode_cmd, gem, &fb); if (ret == 0) return fb; diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h index e45f211501f6..470e0910d484 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.h +++ b/drivers/gpu/drm/nouveau/nouveau_display.h @@ -8,8 +8,11 @@ #include <drm/drm_framebuffer.h> +struct drm_format_info; + int nouveau_framebuffer_new(struct drm_device *dev, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *gem, struct drm_framebuffer **pfb); diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c index edbbda78bac9..c4949e815eb3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_exec.c +++ b/drivers/gpu/drm/nouveau/nouveau_exec.c @@ -60,14 +60,14 @@ * virtual address in the GPU's VA space there is no guarantee that the actual * mappings are created in the GPU's MMU. If the given memory is swapped out * at the time the bind operation is executed the kernel will stash the mapping - * details into it's internal alloctor and create the actual MMU mappings once + * details into it's internal allocator and create the actual MMU mappings once * the memory is swapped back in. While this is transparent for userspace, it is * guaranteed that all the backing memory is swapped back in and all the memory * mappings, as requested by userspace previously, are actually mapped once the * DRM_NOUVEAU_EXEC ioctl is called to submit an exec job. * * A VM_BIND job can be executed either synchronously or asynchronously. If - * exectued asynchronously, userspace may provide a list of syncobjs this job + * executed asynchronously, userspace may provide a list of syncobjs this job * will wait for and/or a list of syncobj the kernel will signal once the * VM_BIND job finished execution. If executed synchronously the ioctl will * block until the bind job is finished. For synchronous jobs the kernel will @@ -82,7 +82,7 @@ * Since VM_BIND jobs update the GPU's VA space on job submit, EXEC jobs do have * an up to date view of the VA space. However, the actual mappings might still * be pending. Hence, EXEC jobs require to have the particular fences - of - * the corresponding VM_BIND jobs they depent on - attached to them. + * the corresponding VM_BIND jobs they depend on - attached to them. */ static int diff --git a/drivers/gpu/drm/nouveau/nvif/vmm.c b/drivers/gpu/drm/nouveau/nvif/vmm.c index 99296f03371a..07c1ebc2a941 100644 --- a/drivers/gpu/drm/nouveau/nvif/vmm.c +++ b/drivers/gpu/drm/nouveau/nvif/vmm.c @@ -219,7 +219,8 @@ nvif_vmm_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass, case RAW: args->type = NVIF_VMM_V0_TYPE_RAW; break; default: WARN_ON(1); - return -EINVAL; + ret = -EINVAL; + goto done; } memcpy(args->data, argv, argc); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c index 9d06ff722fea..0dc4782df8c0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c @@ -325,7 +325,7 @@ r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries) rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), info.retries); if (IS_ERR_OR_NULL(rpc)) { - kfree(buf); + kvfree(buf); return rpc; } @@ -334,7 +334,7 @@ r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries) rpc = r535_gsp_msgq_recv_one_elem(gsp, &info); if (IS_ERR_OR_NULL(rpc)) { - kfree(buf); + kvfree(buf); return rpc; } diff --git a/drivers/gpu/drm/nova/file.rs b/drivers/gpu/drm/nova/file.rs index 7e59a34b830d..4fe62cf98a23 100644 --- a/drivers/gpu/drm/nova/file.rs +++ b/drivers/gpu/drm/nova/file.rs @@ -39,7 +39,8 @@ impl File { _ => return Err(EINVAL), }; - getparam.set_value(value); + #[allow(clippy::useless_conversion)] + getparam.set_value(value.into()); Ok(0) } diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c index 30c81e2e5d6b..bb3105556f19 100644 --- a/drivers/gpu/drm/omapdrm/omap_fb.c +++ b/drivers/gpu/drm/omapdrm/omap_fb.c @@ -351,7 +351,7 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev, } } - fb = omap_framebuffer_init(dev, mode_cmd, bos); + fb = omap_framebuffer_init(dev, info, mode_cmd, bos); if (IS_ERR(fb)) goto error; @@ -365,9 +365,9 @@ error: } struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos) { - const struct drm_format_info *format = NULL; struct omap_framebuffer *omap_fb = NULL; struct drm_framebuffer *fb = NULL; unsigned int pitch = mode_cmd->pitches[0]; @@ -377,15 +377,12 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, dev, mode_cmd, mode_cmd->width, mode_cmd->height, (char *)&mode_cmd->pixel_format); - format = drm_get_format_info(dev, mode_cmd->pixel_format, - mode_cmd->modifier[0]); - for (i = 0; i < ARRAY_SIZE(formats); i++) { if (formats[i] == mode_cmd->pixel_format) break; } - if (!format || i == ARRAY_SIZE(formats)) { + if (i == ARRAY_SIZE(formats)) { dev_dbg(dev->dev, "unsupported pixel format: %4.4s\n", (char *)&mode_cmd->pixel_format); ret = -EINVAL; @@ -399,7 +396,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, } fb = &omap_fb->base; - omap_fb->format = format; + omap_fb->format = info; mutex_init(&omap_fb->lock); /* @@ -407,23 +404,23 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, * that the two planes of multiplane formats need the same number of * bytes per pixel. */ - if (format->num_planes == 2 && pitch != mode_cmd->pitches[1]) { + if (info->num_planes == 2 && pitch != mode_cmd->pitches[1]) { dev_dbg(dev->dev, "pitches differ between planes 0 and 1\n"); ret = -EINVAL; goto fail; } - if (pitch % format->cpp[0]) { + if (pitch % info->cpp[0]) { dev_dbg(dev->dev, "buffer pitch (%u bytes) is not a multiple of pixel size (%u bytes)\n", - pitch, format->cpp[0]); + pitch, info->cpp[0]); ret = -EINVAL; goto fail; } - for (i = 0; i < format->num_planes; i++) { + for (i = 0; i < info->num_planes; i++) { struct plane *plane = &omap_fb->planes[i]; - unsigned int vsub = i == 0 ? 1 : format->vsub; + unsigned int vsub = i == 0 ? 1 : info->vsub; unsigned int size; size = pitch * mode_cmd->height / vsub; @@ -440,7 +437,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, plane->dma_addr = 0; } - drm_helper_mode_fill_fb_struct(dev, fb, NULL, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd); ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs); if (ret) { diff --git a/drivers/gpu/drm/omapdrm/omap_fb.h b/drivers/gpu/drm/omapdrm/omap_fb.h index 0873f953cf1d..e6010302a22b 100644 --- a/drivers/gpu/drm/omapdrm/omap_fb.h +++ b/drivers/gpu/drm/omapdrm/omap_fb.h @@ -13,6 +13,7 @@ struct drm_connector; struct drm_device; struct drm_file; struct drm_framebuffer; +struct drm_format_info; struct drm_gem_object; struct drm_mode_fb_cmd2; struct drm_plane_state; @@ -23,6 +24,7 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev, struct drm_file *file, const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd); struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); int omap_framebuffer_pin(struct drm_framebuffer *fb); void omap_framebuffer_unpin(struct drm_framebuffer *fb); diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c index 7b6396890681..948af7ec1130 100644 --- a/drivers/gpu/drm/omapdrm/omap_fbdev.c +++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c @@ -197,7 +197,10 @@ int omap_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, goto fail; } - fb = omap_framebuffer_init(dev, &mode_cmd, &bo); + fb = omap_framebuffer_init(dev, + drm_get_format_info(dev, mode_cmd.pixel_format, + mode_cmd.modifier[0]), + &mode_cmd, &bo); if (IS_ERR(fb)) { dev_err(dev->dev, "failed to allocate fb\n"); /* note: if fb creation failed, we can't rely on fb destroy diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index bb73f2a68a12..85d6289a6eda 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -432,7 +432,7 @@ static void panfrost_gem_debugfs_bo_print(struct panfrost_gem_object *bo, if (!refcount) return; - resident_size = bo->base.pages ? bo->base.base.size : 0; + resident_size = panfrost_gem_rss(&bo->base.base); snprintf(creator_info, sizeof(creator_info), "%s/%d", bo->debugfs.creator.process_name, bo->debugfs.creator.tgid); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index b4bf5dfeea2d..4dc77c398617 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1297,12 +1297,13 @@ static const struct drm_framebuffer_funcs radeon_fb_funcs = { int radeon_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { int ret; fb->obj[0] = obj; - drm_helper_mode_fill_fb_struct(dev, fb, NULL, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd); ret = drm_framebuffer_init(dev, fb, &radeon_fb_funcs); if (ret) { fb->obj[0] = NULL; @@ -1341,7 +1342,7 @@ radeon_user_framebuffer_create(struct drm_device *dev, return ERR_PTR(-ENOMEM); } - ret = radeon_framebuffer_init(dev, fb, mode_cmd, obj); + ret = radeon_framebuffer_init(dev, fb, info, mode_cmd, obj); if (ret) { kfree(fb); drm_gem_object_put(obj); diff --git a/drivers/gpu/drm/radeon/radeon_fbdev.c b/drivers/gpu/drm/radeon/radeon_fbdev.c index e3a481bbee7b..dc81b0c2dbff 100644 --- a/drivers/gpu/drm/radeon/radeon_fbdev.c +++ b/drivers/gpu/drm/radeon/radeon_fbdev.c @@ -53,10 +53,10 @@ static void radeon_fbdev_destroy_pinned_object(struct drm_gem_object *gobj) } static int radeon_fbdev_create_pinned_object(struct drm_fb_helper *fb_helper, + const struct drm_format_info *info, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **gobj_p) { - const struct drm_format_info *info; struct radeon_device *rdev = fb_helper->dev->dev_private; struct drm_gem_object *gobj = NULL; struct radeon_bo *rbo = NULL; @@ -67,8 +67,6 @@ static int radeon_fbdev_create_pinned_object(struct drm_fb_helper *fb_helper, int height = mode_cmd->height; u32 cpp; - info = drm_get_format_info(rdev_to_drm(rdev), mode_cmd->pixel_format, - mode_cmd->modifier[0]); cpp = info->cpp[0]; /* need to align pitch with crtc limits */ @@ -206,6 +204,7 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper, struct drm_fb_helper_surface_size *sizes) { struct radeon_device *rdev = fb_helper->dev->dev_private; + const struct drm_format_info *format_info; struct drm_mode_fb_cmd2 mode_cmd = { }; struct fb_info *info; struct drm_gem_object *gobj; @@ -224,7 +223,9 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper, mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); - ret = radeon_fbdev_create_pinned_object(fb_helper, &mode_cmd, &gobj); + format_info = drm_get_format_info(rdev_to_drm(rdev), mode_cmd.pixel_format, + mode_cmd.modifier[0]); + ret = radeon_fbdev_create_pinned_object(fb_helper, format_info, &mode_cmd, &gobj); if (ret) { DRM_ERROR("failed to create fbcon object %d\n", ret); return ret; @@ -236,7 +237,7 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper, ret = -ENOMEM; goto err_radeon_fbdev_destroy_pinned_object; } - ret = radeon_framebuffer_init(rdev_to_drm(rdev), fb, &mode_cmd, gobj); + ret = radeon_framebuffer_init(rdev_to_drm(rdev), fb, format_info, &mode_cmd, gobj); if (ret) { DRM_ERROR("failed to initialize framebuffer %d\n", ret); goto err_kfree; diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 3102f6c2d055..9e34da2cacef 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -40,6 +40,7 @@ struct drm_fb_helper; struct drm_fb_helper_surface_size; +struct drm_format_info; struct edid; struct drm_edid; @@ -890,6 +891,7 @@ extern void radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on); int radeon_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *rfb, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig index ab525668939a..faf50d872be3 100644 --- a/drivers/gpu/drm/rockchip/Kconfig +++ b/drivers/gpu/drm/rockchip/Kconfig @@ -53,6 +53,7 @@ config ROCKCHIP_CDN_DP bool "Rockchip cdn DP" depends on EXTCON=y || (EXTCON=m && DRM_ROCKCHIP=m) select DRM_DISPLAY_HELPER + select DRM_BRIDGE_CONNECTOR select DRM_DISPLAY_DP_HELPER help This selects support for Rockchip SoC specific extensions diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c index 186f6452a7d3..b50927a824b4 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c @@ -2579,12 +2579,13 @@ static int vop2_win_init(struct vop2 *vop2) } /* - * The window registers are only updated when config done is written. - * Until that they read back the old value. As we read-modify-write - * these registers mark them as non-volatile. This makes sure we read - * the new values from the regmap register cache. + * The window and video port registers are only updated when config + * done is written. Until that they read back the old value. As we + * read-modify-write these registers mark them as non-volatile. This + * makes sure we read the new values from the regmap register cache. */ static const struct regmap_range vop2_nonvolatile_range[] = { + regmap_reg_range(RK3568_VP0_CTRL_BASE, RK3588_VP3_CTRL_BASE + 255), regmap_reg_range(0x1000, 0x23ff), }; diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c index 7299fa8971ce..981dada8f3a8 100644 --- a/drivers/gpu/drm/tests/drm_format_helper_test.c +++ b/drivers/gpu/drm/tests/drm_format_helper_test.c @@ -1033,13 +1033,14 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test) NULL : &result->dst_pitch; drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state); - buf = le32buf_to_cpu(test, buf, dst_size / sizeof(u32)); + buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32)); KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); buf = dst.vaddr; /* restore original value of buf */ memset(buf, 0, dst_size); drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state); + buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32)); KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); } diff --git a/drivers/gpu/drm/xe/regs/xe_bars.h b/drivers/gpu/drm/xe/regs/xe_bars.h index ce05b6ae832f..880140d6ccdc 100644 --- a/drivers/gpu/drm/xe/regs/xe_bars.h +++ b/drivers/gpu/drm/xe/regs/xe_bars.h @@ -7,5 +7,6 @@ #define GTTMMADR_BAR 0 /* MMIO + GTT */ #define LMEM_BAR 2 /* VRAM */ +#define VF_LMEM_BAR 9 /* VF VRAM */ #endif diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c index f08fc4377d25..c17ed1ae8649 100644 --- a/drivers/gpu/drm/xe/xe_hwmon.c +++ b/drivers/gpu/drm/xe/xe_hwmon.c @@ -332,6 +332,7 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, u32 attr, int channe int ret = 0; u32 reg_val, max; struct xe_reg rapl_limit; + u64 max_supp_power_limit = 0; mutex_lock(&hwmon->hwmon_lock); @@ -356,6 +357,20 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, u32 attr, int channe goto unlock; } + /* + * If the sysfs value exceeds the maximum pcode supported power limit value, clamp it to + * the supported maximum (U12.3 format). + * This is to avoid truncation during reg_val calculation below and ensure the valid + * power limit is sent for pcode which would clamp it to card-supported value. + */ + max_supp_power_limit = ((PWR_LIM_VAL) >> hwmon->scl_shift_power) * SF_POWER; + if (value > max_supp_power_limit) { + value = max_supp_power_limit; + drm_info(&hwmon->xe->drm, + "Power limit clamped as selected %s exceeds channel %d limit\n", + PWR_ATTR_TO_STR(attr), channel); + } + /* Computation in 64-bits to avoid overflow. Round to nearest. */ reg_val = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER); @@ -739,9 +754,23 @@ static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, int channel, { int ret; u32 uval; + u64 max_crit_power_curr = 0; mutex_lock(&hwmon->hwmon_lock); + /* + * If the sysfs value exceeds the pcode mailbox cmd POWER_SETUP_SUBCOMMAND_WRITE_I1 + * max supported value, clamp it to the command's max (U10.6 format). + * This is to avoid truncation during uval calculation below and ensure the valid power + * limit is sent for pcode which would clamp it to card-supported value. + */ + max_crit_power_curr = (POWER_SETUP_I1_DATA_MASK >> POWER_SETUP_I1_SHIFT) * scale_factor; + if (value > max_crit_power_curr) { + value = max_crit_power_curr; + drm_info(&hwmon->xe->drm, + "Power limit clamped as selected exceeds channel %d limit\n", + channel); + } uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT, scale_factor); ret = xe_hwmon_pcode_write_i1(hwmon, uval); diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index ba1cff2e4cda..84f412fd3c5d 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -408,7 +408,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile) /* Special layout, prepared below.. */ vm = xe_vm_create(xe, XE_VM_FLAG_MIGRATION | - XE_VM_FLAG_SET_TILE_ID(tile)); + XE_VM_FLAG_SET_TILE_ID(tile), NULL); if (IS_ERR(vm)) return ERR_CAST(vm); @@ -1820,15 +1820,19 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo, if (!IS_ALIGNED(len, XE_CACHELINE_BYTES) || !IS_ALIGNED((unsigned long)buf + offset, XE_CACHELINE_BYTES)) { int buf_offset = 0; + void *bounce; + int err; + + BUILD_BUG_ON(!is_power_of_2(XE_CACHELINE_BYTES)); + bounce = kmalloc(XE_CACHELINE_BYTES, GFP_KERNEL); + if (!bounce) + return -ENOMEM; /* * Less than ideal for large unaligned access but this should be * fairly rare, can fixup if this becomes common. */ do { - u8 bounce[XE_CACHELINE_BYTES]; - void *ptr = (void *)bounce; - int err; int copy_bytes = min_t(int, bytes_left, XE_CACHELINE_BYTES - (offset & XE_CACHELINE_MASK)); @@ -1837,22 +1841,22 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo, err = xe_migrate_access_memory(m, bo, offset & ~XE_CACHELINE_MASK, - (void *)ptr, - sizeof(bounce), 0); + bounce, + XE_CACHELINE_BYTES, 0); if (err) - return err; + break; if (write) { - memcpy(ptr + ptr_offset, buf + buf_offset, copy_bytes); + memcpy(bounce + ptr_offset, buf + buf_offset, copy_bytes); err = xe_migrate_access_memory(m, bo, offset & ~XE_CACHELINE_MASK, - (void *)ptr, - sizeof(bounce), write); + bounce, + XE_CACHELINE_BYTES, write); if (err) - return err; + break; } else { - memcpy(buf + buf_offset, ptr + ptr_offset, + memcpy(buf + buf_offset, bounce + ptr_offset, copy_bytes); } @@ -1861,7 +1865,8 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo, offset += copy_bytes; } while (bytes_left); - return 0; + kfree(bounce); + return err; } dma_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write); @@ -1882,8 +1887,11 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo, else current_bytes = min_t(int, bytes_left, cursor.size); - if (fence) - dma_fence_put(fence); + if (current_bytes & ~PAGE_MASK) { + int pitch = 4; + + current_bytes = min_t(int, current_bytes, S16_MAX * pitch); + } __fence = xe_migrate_vram(m, current_bytes, (unsigned long)buf & ~PAGE_MASK, @@ -1892,11 +1900,15 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo, XE_MIGRATE_COPY_TO_VRAM : XE_MIGRATE_COPY_TO_SRAM); if (IS_ERR(__fence)) { - if (fence) + if (fence) { dma_fence_wait(fence, false); + dma_fence_put(fence); + } fence = __fence; goto out_err; } + + dma_fence_put(fence); fence = __fence; buf += current_bytes; diff --git a/drivers/gpu/drm/xe/xe_pci_sriov.c b/drivers/gpu/drm/xe/xe_pci_sriov.c index 447a7867eecb..af05db07162e 100644 --- a/drivers/gpu/drm/xe/xe_pci_sriov.c +++ b/drivers/gpu/drm/xe/xe_pci_sriov.c @@ -3,6 +3,10 @@ * Copyright Ā© 2023-2024 Intel Corporation */ +#include <linux/bitops.h> +#include <linux/pci.h> + +#include "regs/xe_bars.h" #include "xe_assert.h" #include "xe_device.h" #include "xe_gt_sriov_pf_config.h" @@ -128,6 +132,18 @@ static void pf_engine_activity_stats(struct xe_device *xe, unsigned int num_vfs, } } +static int resize_vf_vram_bar(struct xe_device *xe, int num_vfs) +{ + struct pci_dev *pdev = to_pci_dev(xe->drm.dev); + u32 sizes; + + sizes = pci_iov_vf_bar_get_sizes(pdev, VF_LMEM_BAR, num_vfs); + if (!sizes) + return 0; + + return pci_iov_vf_bar_set_size(pdev, VF_LMEM_BAR, __fls(sizes)); +} + static int pf_enable_vfs(struct xe_device *xe, int num_vfs) { struct pci_dev *pdev = to_pci_dev(xe->drm.dev); @@ -158,6 +174,12 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs) if (err < 0) goto failed; + if (IS_DGFX(xe)) { + err = resize_vf_vram_bar(xe, num_vfs); + if (err) + xe_sriov_info(xe, "Failed to set VF LMEM BAR size: %d\n", err); + } + err = pci_enable_sriov(pdev, num_vfs); if (err < 0) goto failed; diff --git a/drivers/gpu/drm/xe/xe_pxp_submit.c b/drivers/gpu/drm/xe/xe_pxp_submit.c index d92ec0f515b0..ca95f2a4d4ef 100644 --- a/drivers/gpu/drm/xe/xe_pxp_submit.c +++ b/drivers/gpu/drm/xe/xe_pxp_submit.c @@ -101,7 +101,7 @@ static int allocate_gsc_client_resources(struct xe_gt *gt, xe_assert(xe, hwe); /* PXP instructions must be issued from PPGTT */ - vm = xe_vm_create(xe, XE_VM_FLAG_GSC); + vm = xe_vm_create(xe, XE_VM_FLAG_GSC, NULL); if (IS_ERR(vm)) return PTR_ERR(vm); diff --git a/drivers/gpu/drm/xe/xe_shrinker.c b/drivers/gpu/drm/xe/xe_shrinker.c index 1c3c04d52f55..90244fe59b59 100644 --- a/drivers/gpu/drm/xe/xe_shrinker.c +++ b/drivers/gpu/drm/xe/xe_shrinker.c @@ -54,10 +54,10 @@ xe_shrinker_mod_pages(struct xe_shrinker *shrinker, long shrinkable, long purgea write_unlock(&shrinker->lock); } -static s64 xe_shrinker_walk(struct xe_device *xe, - struct ttm_operation_ctx *ctx, - const struct xe_bo_shrink_flags flags, - unsigned long to_scan, unsigned long *scanned) +static s64 __xe_shrinker_walk(struct xe_device *xe, + struct ttm_operation_ctx *ctx, + const struct xe_bo_shrink_flags flags, + unsigned long to_scan, unsigned long *scanned) { unsigned int mem_type; s64 freed = 0, lret; @@ -93,6 +93,48 @@ static s64 xe_shrinker_walk(struct xe_device *xe, return freed; } +/* + * Try shrinking idle objects without writeback first, then if not sufficient, + * try also non-idle objects and finally if that's not sufficient either, + * add writeback. This avoids stalls and explicit writebacks with light or + * moderate memory pressure. + */ +static s64 xe_shrinker_walk(struct xe_device *xe, + struct ttm_operation_ctx *ctx, + const struct xe_bo_shrink_flags flags, + unsigned long to_scan, unsigned long *scanned) +{ + bool no_wait_gpu = true; + struct xe_bo_shrink_flags save_flags = flags; + s64 lret, freed; + + swap(no_wait_gpu, ctx->no_wait_gpu); + save_flags.writeback = false; + lret = __xe_shrinker_walk(xe, ctx, save_flags, to_scan, scanned); + swap(no_wait_gpu, ctx->no_wait_gpu); + if (lret < 0 || *scanned >= to_scan) + return lret; + + freed = lret; + if (!ctx->no_wait_gpu) { + lret = __xe_shrinker_walk(xe, ctx, save_flags, to_scan, scanned); + if (lret < 0) + return lret; + freed += lret; + if (*scanned >= to_scan) + return freed; + } + + if (flags.writeback) { + lret = __xe_shrinker_walk(xe, ctx, flags, to_scan, scanned); + if (lret < 0) + return lret; + freed += lret; + } + + return freed; +} + static unsigned long xe_shrinker_count(struct shrinker *shrink, struct shrink_control *sc) { @@ -199,6 +241,7 @@ static unsigned long xe_shrinker_scan(struct shrinker *shrink, struct shrink_con runtime_pm = xe_shrinker_runtime_pm_get(shrinker, true, 0, can_backup); shrink_flags.purge = false; + lret = xe_shrinker_walk(shrinker->xe, &ctx, shrink_flags, nr_to_scan, &nr_scanned); if (lret >= 0) diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 2035604121e6..ec04bef8ae40 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -1640,7 +1640,7 @@ static void xe_vm_free_scratch(struct xe_vm *vm) } } -struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags) +struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef) { struct drm_gem_object *vm_resv_obj; struct xe_vm *vm; @@ -1661,9 +1661,10 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags) vm->xe = xe; vm->size = 1ull << xe->info.va_bits; - vm->flags = flags; + if (xef) + vm->xef = xe_file_get(xef); /** * GSC VMs are kernel-owned, only used for PXP ops and can sometimes be * manipulated under the PXP mutex. However, the PXP mutex can be taken @@ -1794,6 +1795,20 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags) if (number_tiles > 1) vm->composite_fence_ctx = dma_fence_context_alloc(1); + if (xef && xe->info.has_asid) { + u32 asid; + + down_write(&xe->usm.lock); + err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm, + XA_LIMIT(1, XE_MAX_ASID - 1), + &xe->usm.next_asid, GFP_KERNEL); + up_write(&xe->usm.lock); + if (err < 0) + goto err_unlock_close; + + vm->usm.asid = asid; + } + trace_xe_vm_create(vm); return vm; @@ -1814,6 +1829,8 @@ err_no_resv: for_each_tile(tile, xe, id) xe_range_fence_tree_fini(&vm->rftree[id]); ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move); + if (vm->xef) + xe_file_put(vm->xef); kfree(vm); if (flags & XE_VM_FLAG_LR_MODE) xe_pm_runtime_put(xe); @@ -2059,9 +2076,8 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data, struct xe_device *xe = to_xe_device(dev); struct xe_file *xef = to_xe_file(file); struct drm_xe_vm_create *args = data; - struct xe_tile *tile; struct xe_vm *vm; - u32 id, asid; + u32 id; int err; u32 flags = 0; @@ -2097,29 +2113,10 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data, if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE) flags |= XE_VM_FLAG_FAULT_MODE; - vm = xe_vm_create(xe, flags); + vm = xe_vm_create(xe, flags, xef); if (IS_ERR(vm)) return PTR_ERR(vm); - if (xe->info.has_asid) { - down_write(&xe->usm.lock); - err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm, - XA_LIMIT(1, XE_MAX_ASID - 1), - &xe->usm.next_asid, GFP_KERNEL); - up_write(&xe->usm.lock); - if (err < 0) - goto err_close_and_put; - - vm->usm.asid = asid; - } - - vm->xef = xe_file_get(xef); - - /* Record BO memory for VM pagetable created against client */ - for_each_tile(tile, xe, id) - if (vm->pt_root[id]) - xe_drm_client_add_bo(vm->xef->client, vm->pt_root[id]->bo); - #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM) /* Warning: Security issue - never enable by default */ args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE); @@ -3421,6 +3418,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm, free_bind_ops: if (args->num_binds > 1) kvfree(*bind_ops); + *bind_ops = NULL; return err; } @@ -3527,7 +3525,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) struct xe_exec_queue *q = NULL; u32 num_syncs, num_ufence = 0; struct xe_sync_entry *syncs = NULL; - struct drm_xe_vm_bind_op *bind_ops; + struct drm_xe_vm_bind_op *bind_ops = NULL; struct xe_vma_ops vops; struct dma_fence *fence; int err; diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h index 3475a118f666..2f213737c7e5 100644 --- a/drivers/gpu/drm/xe/xe_vm.h +++ b/drivers/gpu/drm/xe/xe_vm.h @@ -26,7 +26,7 @@ struct xe_sync_entry; struct xe_svm_range; struct drm_exec; -struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags); +struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef); struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id); int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node); diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 73747d20df85..91a7b7e7c0c8 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -1679,7 +1679,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { }; static const struct x86_cpu_id intel_mwait_ids[] __initconst = { - X86_MATCH_VENDOR_FAM_FEATURE(INTEL, 6, X86_FEATURE_MWAIT, NULL), + X86_MATCH_VENDOR_FAM_FEATURE(INTEL, X86_FAMILY_ANY, X86_FEATURE_MWAIT, NULL), {} }; diff --git a/drivers/iio/accel/sca3300.c b/drivers/iio/accel/sca3300.c index bda370c0f660..8380b237831c 100644 --- a/drivers/iio/accel/sca3300.c +++ b/drivers/iio/accel/sca3300.c @@ -477,7 +477,7 @@ static irqreturn_t sca3300_trigger_handler(int irq, void *p) struct iio_dev *indio_dev = pf->indio_dev; struct sca3300_data *data = iio_priv(indio_dev); int bit, ret, val, i = 0; - IIO_DECLARE_BUFFER_WITH_TS(s16, channels, SCA3300_SCAN_MAX); + IIO_DECLARE_BUFFER_WITH_TS(s16, channels, SCA3300_SCAN_MAX) = { }; iio_for_each_active_channel(indio_dev, bit) { ret = sca3300_read_reg(data, indio_dev->channels[bit].address, &val); diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index 6de2abad0197..24f2572c487e 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -1300,7 +1300,7 @@ config RN5T618_ADC config ROHM_BD79124 tristate "Rohm BD79124 ADC driver" - depends on I2C + depends on I2C && GPIOLIB select REGMAP_I2C select IIO_ADC_HELPER help diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c index 9808df2e9242..4d8c6bafd1c3 100644 --- a/drivers/iio/adc/ad7124.c +++ b/drivers/iio/adc/ad7124.c @@ -849,7 +849,7 @@ enum { static int ad7124_syscalib_locked(struct ad7124_state *st, const struct iio_chan_spec *chan) { struct device *dev = &st->sd.spi->dev; - struct ad7124_channel *ch = &st->channels[chan->channel]; + struct ad7124_channel *ch = &st->channels[chan->address]; int ret; if (ch->syscalib_mode == AD7124_SYSCALIB_ZERO_SCALE) { @@ -865,8 +865,8 @@ static int ad7124_syscalib_locked(struct ad7124_state *st, const struct iio_chan if (ret < 0) return ret; - dev_dbg(dev, "offset for channel %d after zero-scale calibration: 0x%x\n", - chan->channel, ch->cfg.calibration_offset); + dev_dbg(dev, "offset for channel %lu after zero-scale calibration: 0x%x\n", + chan->address, ch->cfg.calibration_offset); } else { ch->cfg.calibration_gain = st->gain_default; @@ -880,8 +880,8 @@ static int ad7124_syscalib_locked(struct ad7124_state *st, const struct iio_chan if (ret < 0) return ret; - dev_dbg(dev, "gain for channel %d after full-scale calibration: 0x%x\n", - chan->channel, ch->cfg.calibration_gain); + dev_dbg(dev, "gain for channel %lu after full-scale calibration: 0x%x\n", + chan->address, ch->cfg.calibration_gain); } return 0; @@ -924,7 +924,7 @@ static int ad7124_set_syscalib_mode(struct iio_dev *indio_dev, { struct ad7124_state *st = iio_priv(indio_dev); - st->channels[chan->channel].syscalib_mode = mode; + st->channels[chan->address].syscalib_mode = mode; return 0; } @@ -934,7 +934,7 @@ static int ad7124_get_syscalib_mode(struct iio_dev *indio_dev, { struct ad7124_state *st = iio_priv(indio_dev); - return st->channels[chan->channel].syscalib_mode; + return st->channels[chan->address].syscalib_mode; } static const struct iio_enum ad7124_syscalib_mode_enum = { diff --git a/drivers/iio/adc/ad7173.c b/drivers/iio/adc/ad7173.c index 4413207be28f..683146e83ab2 100644 --- a/drivers/iio/adc/ad7173.c +++ b/drivers/iio/adc/ad7173.c @@ -200,7 +200,7 @@ struct ad7173_channel_config { /* * Following fields are used to compare equality. If you * make adaptations in it, you most likely also have to adapt - * ad7173_find_live_config(), too. + * ad7173_is_setup_equal(), too. */ struct_group(config_props, bool bipolar; @@ -561,12 +561,19 @@ static void ad7173_reset_usage_cnts(struct ad7173_state *st) st->config_usage_counter = 0; } -static struct ad7173_channel_config * -ad7173_find_live_config(struct ad7173_state *st, struct ad7173_channel_config *cfg) +/** + * ad7173_is_setup_equal - Compare two channel setups + * @cfg1: First channel configuration + * @cfg2: Second channel configuration + * + * Compares all configuration options that affect the registers connected to + * SETUP_SEL, namely CONFIGx, FILTERx, GAINx and OFFSETx. + * + * Returns: true if the setups are identical, false otherwise + */ +static bool ad7173_is_setup_equal(const struct ad7173_channel_config *cfg1, + const struct ad7173_channel_config *cfg2) { - struct ad7173_channel_config *cfg_aux; - int i; - /* * This is just to make sure that the comparison is adapted after * struct ad7173_channel_config was changed. @@ -579,14 +586,22 @@ ad7173_find_live_config(struct ad7173_state *st, struct ad7173_channel_config *c u8 ref_sel; })); + return cfg1->bipolar == cfg2->bipolar && + cfg1->input_buf == cfg2->input_buf && + cfg1->odr == cfg2->odr && + cfg1->ref_sel == cfg2->ref_sel; +} + +static struct ad7173_channel_config * +ad7173_find_live_config(struct ad7173_state *st, struct ad7173_channel_config *cfg) +{ + struct ad7173_channel_config *cfg_aux; + int i; + for (i = 0; i < st->num_channels; i++) { cfg_aux = &st->channels[i].cfg; - if (cfg_aux->live && - cfg->bipolar == cfg_aux->bipolar && - cfg->input_buf == cfg_aux->input_buf && - cfg->odr == cfg_aux->odr && - cfg->ref_sel == cfg_aux->ref_sel) + if (cfg_aux->live && ad7173_is_setup_equal(cfg, cfg_aux)) return cfg_aux; } return NULL; @@ -1228,7 +1243,7 @@ static int ad7173_update_scan_mode(struct iio_dev *indio_dev, const unsigned long *scan_mask) { struct ad7173_state *st = iio_priv(indio_dev); - int i, ret; + int i, j, k, ret; for (i = 0; i < indio_dev->num_channels; i++) { if (test_bit(i, scan_mask)) @@ -1239,6 +1254,54 @@ static int ad7173_update_scan_mode(struct iio_dev *indio_dev, return ret; } + /* + * On some chips, there are more channels that setups, so if there were + * more unique setups requested than the number of available slots, + * ad7173_set_channel() will have written over some of the slots. We + * can detect this by making sure each assigned cfg_slot matches the + * requested configuration. If it doesn't, we know that the slot was + * overwritten by a different channel. + */ + for_each_set_bit(i, scan_mask, indio_dev->num_channels) { + const struct ad7173_channel_config *cfg1, *cfg2; + + cfg1 = &st->channels[i].cfg; + + for_each_set_bit(j, scan_mask, indio_dev->num_channels) { + cfg2 = &st->channels[j].cfg; + + /* + * Only compare configs that are assigned to the same + * SETUP_SEL slot and don't compare channel to itself. + */ + if (i == j || cfg1->cfg_slot != cfg2->cfg_slot) + continue; + + /* + * If we find two different configs trying to use the + * same SETUP_SEL slot, then we know that the that we + * have too many unique configurations requested for + * the available slots and at least one was overwritten. + */ + if (!ad7173_is_setup_equal(cfg1, cfg2)) { + /* + * At this point, there isn't a way to tell + * which setups are actually programmed in the + * ADC anymore, so we could read them back to + * see, but it is simpler to just turn off all + * of the live flags so that everything gets + * reprogramed on the next attempt read a sample. + */ + for (k = 0; k < st->num_channels; k++) + st->channels[k].cfg.live = false; + + dev_err(&st->sd.spi->dev, + "Too many unique channel configurations requested for scan\n"); + return -EINVAL; + } + } + } + return 0; } diff --git a/drivers/iio/adc/ad7380.c b/drivers/iio/adc/ad7380.c index 6f7034b6c266..fa251dc1aae6 100644 --- a/drivers/iio/adc/ad7380.c +++ b/drivers/iio/adc/ad7380.c @@ -873,6 +873,7 @@ static const struct ad7380_chip_info adaq4381_4_chip_info = { .has_hardware_gain = true, .available_scan_masks = ad7380_4_channel_scan_masks, .timing_specs = &ad7380_4_timing, + .max_conversion_rate_hz = 4 * MEGA, }; static const struct spi_offload_config ad7380_offload_config = { diff --git a/drivers/iio/adc/rzg2l_adc.c b/drivers/iio/adc/rzg2l_adc.c index 9674d48074c9..cadb0446bc29 100644 --- a/drivers/iio/adc/rzg2l_adc.c +++ b/drivers/iio/adc/rzg2l_adc.c @@ -89,7 +89,6 @@ struct rzg2l_adc { struct completion completion; struct mutex lock; u16 last_val[RZG2L_ADC_MAX_CHANNELS]; - bool was_rpm_active; }; /** @@ -428,6 +427,8 @@ static int rzg2l_adc_probe(struct platform_device *pdev) if (!indio_dev) return -ENOMEM; + platform_set_drvdata(pdev, indio_dev); + adc = iio_priv(indio_dev); adc->hw_params = device_get_match_data(dev); @@ -460,8 +461,6 @@ static int rzg2l_adc_probe(struct platform_device *pdev) if (ret) return ret; - platform_set_drvdata(pdev, indio_dev); - ret = rzg2l_adc_hw_init(dev, adc); if (ret) return dev_err_probe(&pdev->dev, ret, @@ -541,14 +540,9 @@ static int rzg2l_adc_suspend(struct device *dev) }; int ret; - if (pm_runtime_suspended(dev)) { - adc->was_rpm_active = false; - } else { - ret = pm_runtime_force_suspend(dev); - if (ret) - return ret; - adc->was_rpm_active = true; - } + ret = pm_runtime_force_suspend(dev); + if (ret) + return ret; ret = reset_control_bulk_assert(ARRAY_SIZE(resets), resets); if (ret) @@ -557,9 +551,7 @@ static int rzg2l_adc_suspend(struct device *dev) return 0; rpm_restore: - if (adc->was_rpm_active) - pm_runtime_force_resume(dev); - + pm_runtime_force_resume(dev); return ret; } @@ -577,11 +569,9 @@ static int rzg2l_adc_resume(struct device *dev) if (ret) return ret; - if (adc->was_rpm_active) { - ret = pm_runtime_force_resume(dev); - if (ret) - goto resets_restore; - } + ret = pm_runtime_force_resume(dev); + if (ret) + goto resets_restore; ret = rzg2l_adc_hw_init(dev, adc); if (ret) @@ -590,10 +580,7 @@ static int rzg2l_adc_resume(struct device *dev) return 0; rpm_restore: - if (adc->was_rpm_active) { - pm_runtime_mark_last_busy(dev); - pm_runtime_put_autosuspend(dev); - } + pm_runtime_force_suspend(dev); resets_restore: reset_control_bulk_assert(ARRAY_SIZE(resets), resets); return ret; diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c index 8b15afca498c..271a4788604a 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c @@ -32,8 +32,12 @@ static int inv_icm42600_temp_read(struct inv_icm42600_state *st, s16 *temp) goto exit; *temp = (s16)be16_to_cpup(raw); + /* + * Temperature data is invalid if both accel and gyro are off. + * Return -EBUSY in this case. + */ if (*temp == INV_ICM42600_DATA_INVALID) - ret = -EINVAL; + ret = -EBUSY; exit: mutex_unlock(&st->lock); diff --git a/drivers/iio/light/as73211.c b/drivers/iio/light/as73211.c index 68f60dc3c79d..32719f584c47 100644 --- a/drivers/iio/light/as73211.c +++ b/drivers/iio/light/as73211.c @@ -639,7 +639,7 @@ static irqreturn_t as73211_trigger_handler(int irq __always_unused, void *p) struct { __le16 chan[4]; aligned_s64 ts; - } scan; + } scan = { }; int data_result, ret; mutex_lock(&data->mutex); diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c index 74505c9ec1a0..6cdc8ed53520 100644 --- a/drivers/iio/pressure/bmp280-core.c +++ b/drivers/iio/pressure/bmp280-core.c @@ -3213,11 +3213,12 @@ int bmp280_common_probe(struct device *dev, /* Bring chip out of reset if there is an assigned GPIO line */ gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(gpiod)) + return dev_err_probe(dev, PTR_ERR(gpiod), "failed to get reset GPIO\n"); + /* Deassert the signal */ - if (gpiod) { - dev_info(dev, "release reset\n"); - gpiod_set_value(gpiod, 0); - } + dev_info(dev, "release reset\n"); + gpiod_set_value(gpiod, 0); data->regmap = regmap; diff --git a/drivers/iio/proximity/isl29501.c b/drivers/iio/proximity/isl29501.c index d1510fe24050..f69db6f2f380 100644 --- a/drivers/iio/proximity/isl29501.c +++ b/drivers/iio/proximity/isl29501.c @@ -938,12 +938,18 @@ static irqreturn_t isl29501_trigger_handler(int irq, void *p) struct iio_dev *indio_dev = pf->indio_dev; struct isl29501_private *isl29501 = iio_priv(indio_dev); const unsigned long *active_mask = indio_dev->active_scan_mask; - u32 buffer[4] __aligned(8) = {}; /* 1x16-bit + naturally aligned ts */ - - if (test_bit(ISL29501_DISTANCE_SCAN_INDEX, active_mask)) - isl29501_register_read(isl29501, REG_DISTANCE, buffer); + u32 value; + struct { + u16 data; + aligned_s64 ts; + } scan = { }; + + if (test_bit(ISL29501_DISTANCE_SCAN_INDEX, active_mask)) { + isl29501_register_read(isl29501, REG_DISTANCE, &value); + scan.data = value; + } - iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp); + iio_push_to_buffers_with_timestamp(indio_dev, &scan, pf->timestamp); iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c index cae8e84821d7..205939680fd4 100644 --- a/drivers/iio/temperature/maxim_thermocouple.c +++ b/drivers/iio/temperature/maxim_thermocouple.c @@ -11,6 +11,7 @@ #include <linux/module.h> #include <linux/err.h> #include <linux/spi/spi.h> +#include <linux/types.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> #include <linux/iio/trigger.h> @@ -121,8 +122,15 @@ struct maxim_thermocouple_data { struct spi_device *spi; const struct maxim_thermocouple_chip *chip; char tc_type; - - u8 buffer[16] __aligned(IIO_DMA_MINALIGN); + /* Buffer for reading up to 2 hardware channels. */ + struct { + union { + __be16 raw16; + __be32 raw32; + __be16 raw[2]; + }; + aligned_s64 timestamp; + } buffer __aligned(IIO_DMA_MINALIGN); }; static int maxim_thermocouple_read(struct maxim_thermocouple_data *data, @@ -130,18 +138,16 @@ static int maxim_thermocouple_read(struct maxim_thermocouple_data *data, { unsigned int storage_bytes = data->chip->read_size; unsigned int shift = chan->scan_type.shift + (chan->address * 8); - __be16 buf16; - __be32 buf32; int ret; switch (storage_bytes) { case 2: - ret = spi_read(data->spi, (void *)&buf16, storage_bytes); - *val = be16_to_cpu(buf16); + ret = spi_read(data->spi, &data->buffer.raw16, storage_bytes); + *val = be16_to_cpu(data->buffer.raw16); break; case 4: - ret = spi_read(data->spi, (void *)&buf32, storage_bytes); - *val = be32_to_cpu(buf32); + ret = spi_read(data->spi, &data->buffer.raw32, storage_bytes); + *val = be32_to_cpu(data->buffer.raw32); break; default: ret = -EINVAL; @@ -166,9 +172,9 @@ static irqreturn_t maxim_thermocouple_trigger_handler(int irq, void *private) struct maxim_thermocouple_data *data = iio_priv(indio_dev); int ret; - ret = spi_read(data->spi, data->buffer, data->chip->read_size); + ret = spi_read(data->spi, data->buffer.raw, data->chip->read_size); if (!ret) { - iio_push_to_buffers_with_ts(indio_dev, data->buffer, + iio_push_to_buffers_with_ts(indio_dev, &data->buffer, sizeof(data->buffer), iio_get_time_ns(indio_dev)); } diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index b1c44ec1a3f3..572a91a62a7b 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -115,7 +115,7 @@ static int ib_init_umem_odp(struct ib_umem_odp *umem_odp, out_free_map: if (ib_uses_virt_dma(dev)) - kfree(map->pfn_list); + kvfree(map->pfn_list); else hmm_dma_map_free(dev->dma_device, map); return ret; @@ -287,7 +287,7 @@ static void ib_umem_odp_free(struct ib_umem_odp *umem_odp) mutex_unlock(&umem_odp->umem_mutex); mmu_interval_notifier_remove(&umem_odp->notifier); if (ib_uses_virt_dma(dev)) - kfree(umem_odp->map.pfn_list); + kvfree(umem_odp->map.pfn_list); else hmm_dma_map_free(dev->dma_device, &umem_odp->map); } diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 37c2bc3bdba5..260dc67b8b87 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -1921,7 +1921,6 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr, struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq); struct bnxt_re_dev *rdev = srq->rdev; - int rc; switch (srq_attr_mask) { case IB_SRQ_MAX_WR: @@ -1933,11 +1932,8 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr, return -EINVAL; srq->qplib_srq.threshold = srq_attr->srq_limit; - rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq); - if (rc) { - ibdev_err(&rdev->ibdev, "Modify HW SRQ failed!"); - return rc; - } + bnxt_qplib_srq_arm_db(&srq->qplib_srq.dbinfo, srq->qplib_srq.threshold); + /* On success, update the shadow */ srq->srq_limit = srq_attr->srq_limit; /* No need to Build and send response back to udata */ diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 293b0a96c8e3..df7cf8d68e27 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -2017,6 +2017,28 @@ static void bnxt_re_free_nqr_mem(struct bnxt_re_dev *rdev) rdev->nqr = NULL; } +/* When DEL_GID fails, driver is not freeing GID ctx memory. + * To avoid the memory leak, free the memory during unload + */ +static void bnxt_re_free_gid_ctx(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; + struct bnxt_re_gid_ctx *ctx, **ctx_tbl; + int i; + + if (!sgid_tbl->active) + return; + + ctx_tbl = sgid_tbl->ctx; + for (i = 0; i < sgid_tbl->max; i++) { + if (sgid_tbl->hw_id[i] == 0xFFFF) + continue; + + ctx = ctx_tbl[i]; + kfree(ctx); + } +} + static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type) { u8 type; @@ -2030,6 +2052,7 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type) if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags)) cancel_delayed_work_sync(&rdev->worker); + bnxt_re_free_gid_ctx(rdev); if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, &rdev->flags)) bnxt_re_cleanup_res(rdev); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index dfe3177123e5..ee36b3d82cc0 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -705,9 +705,7 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res, srq->dbinfo.db = srq->dpi->dbr; srq->dbinfo.max_slot = 1; srq->dbinfo.priv_db = res->dpi_tbl.priv_db; - if (srq->threshold) - bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA); - srq->arm_req = false; + bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA); return 0; fail: @@ -717,24 +715,6 @@ fail: return rc; } -int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res, - struct bnxt_qplib_srq *srq) -{ - struct bnxt_qplib_hwq *srq_hwq = &srq->hwq; - u32 count; - - count = __bnxt_qplib_get_avail(srq_hwq); - if (count > srq->threshold) { - srq->arm_req = false; - bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold); - } else { - /* Deferred arming */ - srq->arm_req = true; - } - - return 0; -} - int bnxt_qplib_query_srq(struct bnxt_qplib_res *res, struct bnxt_qplib_srq *srq) { @@ -776,7 +756,6 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq, struct bnxt_qplib_hwq *srq_hwq = &srq->hwq; struct rq_wqe *srqe; struct sq_sge *hw_sge; - u32 count = 0; int i, next; spin_lock(&srq_hwq->lock); @@ -808,15 +787,8 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq, bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot); - spin_lock(&srq_hwq->lock); - count = __bnxt_qplib_get_avail(srq_hwq); - spin_unlock(&srq_hwq->lock); /* Ring DB */ bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ); - if (srq->arm_req == true && count > srq->threshold) { - srq->arm_req = false; - bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold); - } return 0; } diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h index ab125f1d949e..4921a214c34c 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h @@ -546,8 +546,6 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, srqn_handler_t srq_handler); int bnxt_qplib_create_srq(struct bnxt_qplib_res *res, struct bnxt_qplib_srq *srq); -int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res, - struct bnxt_qplib_srq *srq); int bnxt_qplib_query_srq(struct bnxt_qplib_res *res, struct bnxt_qplib_srq *srq); void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c index 6cd05207ffed..cc5c82d96839 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c @@ -121,6 +121,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res, pbl->pg_arr = vmalloc_array(pages, sizeof(void *)); if (!pbl->pg_arr) return -ENOMEM; + memset(pbl->pg_arr, 0, pages * sizeof(void *)); pbl->pg_map_arr = vmalloc_array(pages, sizeof(dma_addr_t)); if (!pbl->pg_map_arr) { @@ -128,6 +129,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res, pbl->pg_arr = NULL; return -ENOMEM; } + memset(pbl->pg_map_arr, 0, pages * sizeof(dma_addr_t)); pbl->pg_count = 0; pbl->pg_size = sginfo->pgsize; diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c index 94c211df09d8..fdeec33c71da 100644 --- a/drivers/infiniband/hw/erdma/erdma_verbs.c +++ b/drivers/infiniband/hw/erdma/erdma_verbs.c @@ -994,6 +994,8 @@ int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs, old_entry = xa_store(&dev->qp_xa, 1, qp, GFP_KERNEL); if (xa_is_err(old_entry)) ret = xa_err(old_entry); + else + qp->ibqp.qp_num = 1; } else { ret = xa_alloc_cyclic(&dev->qp_xa, &qp->ibqp.qp_num, qp, XA_LIMIT(1, dev->attrs.max_qp - 1), @@ -1031,7 +1033,9 @@ int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs, if (ret) goto err_out_cmd; } else { - init_kernel_qp(dev, qp, attrs); + ret = init_kernel_qp(dev, qp, attrs); + if (ret) + goto err_out_xa; } qp->attrs.max_send_sge = attrs->cap.max_send_sge; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 64bca08f3f1a..f82bdd46a917 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -3043,7 +3043,7 @@ static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev) if (!hr_dev->is_vf) hns_roce_free_link_table(hr_dev); - if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP09) + if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) free_dip_entry(hr_dev); } @@ -5476,7 +5476,7 @@ out: return ret; } -static int hns_roce_v2_query_sccc(struct hns_roce_dev *hr_dev, u32 qpn, +static int hns_roce_v2_query_sccc(struct hns_roce_dev *hr_dev, u32 sccn, void *buffer) { struct hns_roce_v2_scc_context *context; @@ -5488,7 +5488,7 @@ static int hns_roce_v2_query_sccc(struct hns_roce_dev *hr_dev, u32 qpn, return PTR_ERR(mailbox); ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_SCCC, - qpn); + sccn); if (ret) goto out; diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c index f637b73b946e..230187dda6a0 100644 --- a/drivers/infiniband/hw/hns/hns_roce_restrack.c +++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c @@ -100,6 +100,7 @@ int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp) struct hns_roce_v2_qp_context qpc; struct hns_roce_v2_scc_context sccc; } context = {}; + u32 sccn = hr_qp->qpn; int ret; if (!hr_dev->hw->query_qpc) @@ -116,7 +117,13 @@ int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp) !hr_dev->hw->query_sccc) goto out; - ret = hr_dev->hw->query_sccc(hr_dev, hr_qp->qpn, &context.sccc); + if (hr_qp->cong_type == CONG_TYPE_DIP) { + if (!hr_qp->dip) + goto out; + sccn = hr_qp->dip->dip_idx; + } + + ret = hr_dev->hw->query_sccc(hr_dev, sccn, &context.sccc); if (ret) ibdev_warn_ratelimited(&hr_dev->ib_dev, "failed to query SCCC, ret = %d.\n", diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index 132a87e52d5c..ac0183a2ff7a 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -345,33 +345,15 @@ int rxe_prepare(struct rxe_av *av, struct rxe_pkt_info *pkt, static void rxe_skb_tx_dtor(struct sk_buff *skb) { - struct net_device *ndev = skb->dev; - struct rxe_dev *rxe; - unsigned int qp_index; - struct rxe_qp *qp; + struct rxe_qp *qp = skb->sk->sk_user_data; int skb_out; - rxe = rxe_get_dev_from_net(ndev); - if (!rxe && is_vlan_dev(ndev)) - rxe = rxe_get_dev_from_net(vlan_dev_real_dev(ndev)); - if (WARN_ON(!rxe)) - return; - - qp_index = (int)(uintptr_t)skb->sk->sk_user_data; - if (!qp_index) - return; - - qp = rxe_pool_get_index(&rxe->qp_pool, qp_index); - if (!qp) - goto put_dev; - skb_out = atomic_dec_return(&qp->skb_out); - if (qp->need_req_skb && skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW) + if (unlikely(qp->need_req_skb && + skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW)) rxe_sched_task(&qp->send_task); rxe_put(qp); -put_dev: - ib_device_put(&rxe->ib_dev); sock_put(skb->sk); } @@ -383,6 +365,7 @@ static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt) sock_hold(sk); skb->sk = sk; skb->destructor = rxe_skb_tx_dtor; + rxe_get(pkt->qp); atomic_inc(&pkt->qp->skb_out); if (skb->protocol == htons(ETH_P_IP)) @@ -405,6 +388,7 @@ static int rxe_loopback(struct sk_buff *skb, struct rxe_pkt_info *pkt) sock_hold(sk); skb->sk = sk; skb->destructor = rxe_skb_tx_dtor; + rxe_get(pkt->qp); atomic_inc(&pkt->qp->skb_out); if (skb->protocol == htons(ETH_P_IP)) @@ -497,6 +481,9 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av, goto out; } + /* Add time stamp to skb. */ + skb->tstamp = ktime_get(); + skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(ndev)); /* FIXME: hold reference to this netdev until life of this skb. */ diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index f2af3e0aef35..95f1c1c2949d 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -244,7 +244,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk); if (err < 0) return err; - qp->sk->sk->sk_user_data = (void *)(uintptr_t)qp->elem.index; + qp->sk->sk->sk_user_data = qp; /* pick a source UDP port number for this QP based on * the source QPN. this spreads traffic for different QPs diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 7b5af6176de9..8de689b2c5ed 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -3638,7 +3638,7 @@ static int __init parse_ivrs_acpihid(char *str) { u32 seg = 0, bus, dev, fn; char *hid, *uid, *p, *addr; - char acpiid[ACPIID_LEN] = {0}; + char acpiid[ACPIID_LEN + 1] = { }; /* size with NULL terminator */ int i; addr = strchr(str, '@'); @@ -3664,7 +3664,7 @@ static int __init parse_ivrs_acpihid(char *str) /* We have the '@', make it the terminator to get just the acpiid */ *addr++ = 0; - if (strlen(str) > ACPIID_LEN + 1) + if (strlen(str) > ACPIID_LEN) goto not_found; if (sscanf(str, "=%s", acpiid) != 1) diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 5968043ac802..2a8b46b948f0 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -2997,9 +2997,9 @@ void arm_smmu_attach_commit(struct arm_smmu_attach_state *state) /* ATS is being switched off, invalidate the entire ATC */ arm_smmu_atc_inv_master(master, IOMMU_NO_PASID); } - master->ats_enabled = state->ats_enabled; arm_smmu_remove_master_domain(master, state->old_domain, state->ssid); + master->ats_enabled = state->ats_enabled; } static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) diff --git a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c index be1aaaf8cd17..378104cd395e 100644 --- a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c +++ b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c @@ -301,9 +301,11 @@ static void tegra241_vintf_user_handle_error(struct tegra241_vintf *vintf) struct iommu_vevent_tegra241_cmdqv vevent_data; int i; - for (i = 0; i < LVCMDQ_ERR_MAP_NUM_64; i++) - vevent_data.lvcmdq_err_map[i] = - readq_relaxed(REG_VINTF(vintf, LVCMDQ_ERR_MAP_64(i))); + for (i = 0; i < LVCMDQ_ERR_MAP_NUM_64; i++) { + u64 err = readq_relaxed(REG_VINTF(vintf, LVCMDQ_ERR_MAP_64(i))); + + vevent_data.lvcmdq_err_map[i] = cpu_to_le64(err); + } iommufd_viommu_report_event(viommu, IOMMU_VEVENTQ_TYPE_TEGRA241_CMDQV, &vevent_data, sizeof(vevent_data)); diff --git a/drivers/iommu/iommufd/viommu.c b/drivers/iommu/iommufd/viommu.c index 2ca5809b238b..462b457ffd0c 100644 --- a/drivers/iommu/iommufd/viommu.c +++ b/drivers/iommu/iommufd/viommu.c @@ -339,7 +339,7 @@ iommufd_hw_queue_alloc_phys(struct iommu_hw_queue_alloc *cmd, } *base_pa = (page_to_pfn(pages[0]) << PAGE_SHIFT) + offset; - kfree(pages); + kvfree(pages); return access; out_unpin: @@ -349,7 +349,7 @@ out_detach: out_destroy: iommufd_access_destroy_internal(viommu->ictx, access); out_free: - kfree(pages); + kvfree(pages); return ERR_PTR(rc); } diff --git a/drivers/iommu/riscv/iommu.c b/drivers/iommu/riscv/iommu.c index 2d0d31ba2886..0eae2f4bdc5e 100644 --- a/drivers/iommu/riscv/iommu.c +++ b/drivers/iommu/riscv/iommu.c @@ -1283,7 +1283,7 @@ static phys_addr_t riscv_iommu_iova_to_phys(struct iommu_domain *iommu_domain, unsigned long *ptr; ptr = riscv_iommu_pte_fetch(domain, iova, &pte_size); - if (_io_pte_none(*ptr) || !_io_pte_present(*ptr)) + if (!ptr) return 0; return pfn_to_phys(__page_val_to_pfn(*ptr)) | (iova & (pte_size - 1)); diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index 532db1de201b..b39d6f134ab2 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -998,8 +998,7 @@ static void viommu_get_resv_regions(struct device *dev, struct list_head *head) iommu_dma_get_resv_regions(dev, head); } -static const struct iommu_ops viommu_ops; -static struct virtio_driver virtio_iommu_drv; +static const struct bus_type *virtio_bus_type; static int viommu_match_node(struct device *dev, const void *data) { @@ -1008,8 +1007,9 @@ static int viommu_match_node(struct device *dev, const void *data) static struct viommu_dev *viommu_get_by_fwnode(struct fwnode_handle *fwnode) { - struct device *dev = driver_find_device(&virtio_iommu_drv.driver, NULL, - fwnode, viommu_match_node); + struct device *dev = bus_find_device(virtio_bus_type, NULL, fwnode, + viommu_match_node); + put_device(dev); return dev ? dev_to_virtio(dev)->priv : NULL; @@ -1160,6 +1160,9 @@ static int viommu_probe(struct virtio_device *vdev) if (!viommu) return -ENOMEM; + /* Borrow this for easy lookups later */ + virtio_bus_type = dev->bus; + spin_lock_init(&viommu->request_lock); ida_init(&viommu->domain_ids); viommu->dev = dev; @@ -1229,10 +1232,10 @@ static int viommu_probe(struct virtio_device *vdev) if (ret) goto err_free_vqs; - iommu_device_register(&viommu->iommu, &viommu_ops, parent_dev); - vdev->priv = viommu; + iommu_device_register(&viommu->iommu, &viommu_ops, parent_dev); + dev_info(dev, "input address: %u bits\n", order_base_2(viommu->geometry.aperture_end)); dev_info(dev, "page mask: %#llx\n", viommu->pgsize_bitmap); diff --git a/drivers/md/md.c b/drivers/md/md.c index ac85ec73a409..1baaf52c603c 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -339,6 +339,7 @@ static int start_readonly; * so all the races disappear. */ static bool create_on_open = true; +static bool legacy_async_del_gendisk = true; /* * We have a system wide 'event count' that is incremented @@ -877,15 +878,18 @@ void mddev_unlock(struct mddev *mddev) export_rdev(rdev, mddev); } - /* Call del_gendisk after release reconfig_mutex to avoid - * deadlock (e.g. call del_gendisk under the lock and an - * access to sysfs files waits the lock) - * And MD_DELETED is only used for md raid which is set in - * do_md_stop. dm raid only uses md_stop to stop. So dm raid - * doesn't need to check MD_DELETED when getting reconfig lock - */ - if (test_bit(MD_DELETED, &mddev->flags)) - del_gendisk(mddev->gendisk); + if (!legacy_async_del_gendisk) { + /* + * Call del_gendisk after release reconfig_mutex to avoid + * deadlock (e.g. call del_gendisk under the lock and an + * access to sysfs files waits the lock) + * And MD_DELETED is only used for md raid which is set in + * do_md_stop. dm raid only uses md_stop to stop. So dm raid + * doesn't need to check MD_DELETED when getting reconfig lock + */ + if (test_bit(MD_DELETED, &mddev->flags)) + del_gendisk(mddev->gendisk); + } } EXPORT_SYMBOL_GPL(mddev_unlock); @@ -1419,7 +1423,7 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *freshest, stru else { if (sb->events_hi == sb->cp_events_hi && sb->events_lo == sb->cp_events_lo) { - mddev->resync_offset = sb->resync_offset; + mddev->resync_offset = sb->recovery_cp; } else mddev->resync_offset = 0; } @@ -1547,13 +1551,13 @@ static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev) mddev->minor_version = sb->minor_version; if (mddev->in_sync) { - sb->resync_offset = mddev->resync_offset; + sb->recovery_cp = mddev->resync_offset; sb->cp_events_hi = (mddev->events>>32); sb->cp_events_lo = (u32)mddev->events; if (mddev->resync_offset == MaxSector) sb->state = (1<< MD_SB_CLEAN); } else - sb->resync_offset = 0; + sb->recovery_cp = 0; sb->layout = mddev->layout; sb->chunk_size = mddev->chunk_sectors << 9; @@ -4835,9 +4839,42 @@ out_unlock: static struct md_sysfs_entry md_metadata = __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); +static bool rdev_needs_recovery(struct md_rdev *rdev, sector_t sectors) +{ + return rdev->raid_disk >= 0 && + !test_bit(Journal, &rdev->flags) && + !test_bit(Faulty, &rdev->flags) && + !test_bit(In_sync, &rdev->flags) && + rdev->recovery_offset < sectors; +} + +static enum sync_action md_get_active_sync_action(struct mddev *mddev) +{ + struct md_rdev *rdev; + bool is_recover = false; + + if (mddev->resync_offset < MaxSector) + return ACTION_RESYNC; + + if (mddev->reshape_position != MaxSector) + return ACTION_RESHAPE; + + rcu_read_lock(); + rdev_for_each_rcu(rdev, mddev) { + if (rdev_needs_recovery(rdev, MaxSector)) { + is_recover = true; + break; + } + } + rcu_read_unlock(); + + return is_recover ? ACTION_RECOVER : ACTION_IDLE; +} + enum sync_action md_sync_action(struct mddev *mddev) { unsigned long recovery = mddev->recovery; + enum sync_action active_action; /* * frozen has the highest priority, means running sync_thread will be @@ -4861,8 +4898,17 @@ enum sync_action md_sync_action(struct mddev *mddev) !test_bit(MD_RECOVERY_NEEDED, &recovery)) return ACTION_IDLE; - if (test_bit(MD_RECOVERY_RESHAPE, &recovery) || - mddev->reshape_position != MaxSector) + /* + * Check if any sync operation (resync/recover/reshape) is + * currently active. This ensures that only one sync operation + * can run at a time. Returns the type of active operation, or + * ACTION_IDLE if none are active. + */ + active_action = md_get_active_sync_action(mddev); + if (active_action != ACTION_IDLE) + return active_action; + + if (test_bit(MD_RECOVERY_RESHAPE, &recovery)) return ACTION_RESHAPE; if (test_bit(MD_RECOVERY_RECOVER, &recovery)) @@ -5818,6 +5864,13 @@ static void md_kobj_release(struct kobject *ko) { struct mddev *mddev = container_of(ko, struct mddev, kobj); + if (legacy_async_del_gendisk) { + if (mddev->sysfs_state) + sysfs_put(mddev->sysfs_state); + if (mddev->sysfs_level) + sysfs_put(mddev->sysfs_level); + del_gendisk(mddev->gendisk); + } put_disk(mddev->gendisk); } @@ -6021,6 +6074,9 @@ static int md_alloc_and_put(dev_t dev, char *name) { struct mddev *mddev = md_alloc(dev, name); + if (legacy_async_del_gendisk) + pr_warn("md: async del_gendisk mode will be removed in future, please upgrade to mdadm-4.5+\n"); + if (IS_ERR(mddev)) return PTR_ERR(mddev); mddev_put(mddev); @@ -6431,10 +6487,22 @@ static void md_clean(struct mddev *mddev) mddev->persistent = 0; mddev->level = LEVEL_NONE; mddev->clevel[0] = 0; - /* if UNTIL_STOP is set, it's cleared here */ - mddev->hold_active = 0; - /* Don't clear MD_CLOSING, or mddev can be opened again. */ - mddev->flags &= BIT_ULL_MASK(MD_CLOSING); + + /* + * For legacy_async_del_gendisk mode, it can stop the array in the + * middle of assembling it, then it still can access the array. So + * it needs to clear MD_CLOSING. If not legacy_async_del_gendisk, + * it can't open the array again after stopping it. So it doesn't + * clear MD_CLOSING. + */ + if (legacy_async_del_gendisk && mddev->hold_active) { + clear_bit(MD_CLOSING, &mddev->flags); + } else { + /* if UNTIL_STOP is set, it's cleared here */ + mddev->hold_active = 0; + /* Don't clear MD_CLOSING, or mddev can be opened again. */ + mddev->flags &= BIT_ULL_MASK(MD_CLOSING); + } mddev->sb_flags = 0; mddev->ro = MD_RDWR; mddev->metadata_type[0] = 0; @@ -6658,7 +6726,8 @@ static int do_md_stop(struct mddev *mddev, int mode) export_array(mddev); md_clean(mddev); - set_bit(MD_DELETED, &mddev->flags); + if (!legacy_async_del_gendisk) + set_bit(MD_DELETED, &mddev->flags); } md_new_event(); sysfs_notify_dirent_safe(mddev->sysfs_state); @@ -8968,11 +9037,7 @@ static sector_t md_sync_position(struct mddev *mddev, enum sync_action action) start = MaxSector; rcu_read_lock(); rdev_for_each_rcu(rdev, mddev) - if (rdev->raid_disk >= 0 && - !test_bit(Journal, &rdev->flags) && - !test_bit(Faulty, &rdev->flags) && - !test_bit(In_sync, &rdev->flags) && - rdev->recovery_offset < start) + if (rdev_needs_recovery(rdev, start)) start = rdev->recovery_offset; rcu_read_unlock(); @@ -9331,12 +9396,8 @@ void md_do_sync(struct md_thread *thread) test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) { rcu_read_lock(); rdev_for_each_rcu(rdev, mddev) - if (rdev->raid_disk >= 0 && - mddev->delta_disks >= 0 && - !test_bit(Journal, &rdev->flags) && - !test_bit(Faulty, &rdev->flags) && - !test_bit(In_sync, &rdev->flags) && - rdev->recovery_offset < mddev->curr_resync) + if (mddev->delta_disks >= 0 && + rdev_needs_recovery(rdev, mddev->curr_resync)) rdev->recovery_offset = mddev->curr_resync; rcu_read_unlock(); } @@ -10392,6 +10453,7 @@ module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR); module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR); module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR); module_param(create_on_open, bool, S_IRUSR|S_IWUSR); +module_param(legacy_async_del_gendisk, bool, 0600); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("MD RAID framework"); diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c index 7f3f47db4c98..e4275f8ee5db 100644 --- a/drivers/memstick/core/memstick.c +++ b/drivers/memstick/core/memstick.c @@ -555,7 +555,6 @@ EXPORT_SYMBOL(memstick_add_host); */ void memstick_remove_host(struct memstick_host *host) { - host->removing = 1; flush_workqueue(workqueue); mutex_lock(&host->lock); if (host->card) diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c index 3878136227e4..5b5e9354fb2e 100644 --- a/drivers/memstick/host/rtsx_usb_ms.c +++ b/drivers/memstick/host/rtsx_usb_ms.c @@ -812,6 +812,7 @@ static void rtsx_usb_ms_drv_remove(struct platform_device *pdev) int err; host->eject = true; + msh->removing = true; cancel_work_sync(&host->handle_req); cancel_delayed_work_sync(&host->poll_card); diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c index 42878474e56e..60dbc815e501 100644 --- a/drivers/mmc/host/sdhci-of-arasan.c +++ b/drivers/mmc/host/sdhci-of-arasan.c @@ -99,6 +99,9 @@ #define HIWORD_UPDATE(val, mask, shift) \ ((val) << (shift) | (mask) << ((shift) + 16)) +#define CD_STABLE_TIMEOUT_US 1000000 +#define CD_STABLE_MAX_SLEEP_US 10 + /** * struct sdhci_arasan_soc_ctl_field - Field used in sdhci_arasan_soc_ctl_map * @@ -206,12 +209,15 @@ struct sdhci_arasan_data { * 19MHz instead */ #define SDHCI_ARASAN_QUIRK_CLOCK_25_BROKEN BIT(2) +/* Enable CD stable check before power-up */ +#define SDHCI_ARASAN_QUIRK_ENSURE_CD_STABLE BIT(3) }; struct sdhci_arasan_of_data { const struct sdhci_arasan_soc_ctl_map *soc_ctl_map; const struct sdhci_pltfm_data *pdata; const struct sdhci_arasan_clk_ops *clk_ops; + u32 quirks; }; static const struct sdhci_arasan_soc_ctl_map rk3399_soc_ctl_map = { @@ -514,6 +520,24 @@ static int sdhci_arasan_voltage_switch(struct mmc_host *mmc, return -EINVAL; } +static void sdhci_arasan_set_power_and_bus_voltage(struct sdhci_host *host, unsigned char mode, + unsigned short vdd) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host); + u32 reg; + + /* + * Ensure that the card detect logic has stabilized before powering up, this is + * necessary after a host controller reset. + */ + if (mode == MMC_POWER_UP && sdhci_arasan->quirks & SDHCI_ARASAN_QUIRK_ENSURE_CD_STABLE) + read_poll_timeout(sdhci_readl, reg, reg & SDHCI_CD_STABLE, CD_STABLE_MAX_SLEEP_US, + CD_STABLE_TIMEOUT_US, false, host, SDHCI_PRESENT_STATE); + + sdhci_set_power_and_bus_voltage(host, mode, vdd); +} + static const struct sdhci_ops sdhci_arasan_ops = { .set_clock = sdhci_arasan_set_clock, .get_max_clock = sdhci_pltfm_clk_get_max_clock, @@ -521,7 +545,7 @@ static const struct sdhci_ops sdhci_arasan_ops = { .set_bus_width = sdhci_set_bus_width, .reset = sdhci_arasan_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, - .set_power = sdhci_set_power_and_bus_voltage, + .set_power = sdhci_arasan_set_power_and_bus_voltage, .hw_reset = sdhci_arasan_hw_reset, }; @@ -570,7 +594,7 @@ static const struct sdhci_ops sdhci_arasan_cqe_ops = { .set_bus_width = sdhci_set_bus_width, .reset = sdhci_arasan_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, - .set_power = sdhci_set_power_and_bus_voltage, + .set_power = sdhci_arasan_set_power_and_bus_voltage, .irq = sdhci_arasan_cqhci_irq, }; @@ -1447,6 +1471,7 @@ static const struct sdhci_arasan_clk_ops zynqmp_clk_ops = { static struct sdhci_arasan_of_data sdhci_arasan_zynqmp_data = { .pdata = &sdhci_arasan_zynqmp_pdata, .clk_ops = &zynqmp_clk_ops, + .quirks = SDHCI_ARASAN_QUIRK_ENSURE_CD_STABLE, }; static const struct sdhci_arasan_clk_ops versal_clk_ops = { @@ -1457,6 +1482,7 @@ static const struct sdhci_arasan_clk_ops versal_clk_ops = { static struct sdhci_arasan_of_data sdhci_arasan_versal_data = { .pdata = &sdhci_arasan_zynqmp_pdata, .clk_ops = &versal_clk_ops, + .quirks = SDHCI_ARASAN_QUIRK_ENSURE_CD_STABLE, }; static const struct sdhci_arasan_clk_ops versal_net_clk_ops = { @@ -1467,6 +1493,7 @@ static const struct sdhci_arasan_clk_ops versal_net_clk_ops = { static struct sdhci_arasan_of_data sdhci_arasan_versal_net_data = { .pdata = &sdhci_arasan_versal_net_pdata, .clk_ops = &versal_net_clk_ops, + .quirks = SDHCI_ARASAN_QUIRK_ENSURE_CD_STABLE, }; static struct sdhci_arasan_of_data intel_keembay_emmc_data = { @@ -1937,6 +1964,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev) if (of_device_is_compatible(np, "rockchip,rk3399-sdhci-5.1")) sdhci_arasan_update_clockmultiplier(host, 0x0); + sdhci_arasan->quirks |= data->quirks; + if (of_device_is_compatible(np, "intel,keembay-sdhci-5.1-emmc") || of_device_is_compatible(np, "intel,keembay-sdhci-5.1-sd") || of_device_is_compatible(np, "intel,keembay-sdhci-5.1-sdio")) { diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c index 4c2ae71770f7..3a1de477e9af 100644 --- a/drivers/mmc/host/sdhci-pci-gli.c +++ b/drivers/mmc/host/sdhci-pci-gli.c @@ -287,6 +287,20 @@ #define GLI_MAX_TUNING_LOOP 40 /* Genesys Logic chipset */ +static void sdhci_gli_mask_replay_timer_timeout(struct pci_dev *pdev) +{ + int aer; + u32 value; + + /* mask the replay timer timeout of AER */ + aer = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); + if (aer) { + pci_read_config_dword(pdev, aer + PCI_ERR_COR_MASK, &value); + value |= PCI_ERR_COR_REP_TIMER; + pci_write_config_dword(pdev, aer + PCI_ERR_COR_MASK, value); + } +} + static inline void gl9750_wt_on(struct sdhci_host *host) { u32 wt_value; @@ -607,7 +621,6 @@ static void gl9750_hw_setting(struct sdhci_host *host) { struct sdhci_pci_slot *slot = sdhci_priv(host); struct pci_dev *pdev; - int aer; u32 value; pdev = slot->chip->pdev; @@ -626,12 +639,7 @@ static void gl9750_hw_setting(struct sdhci_host *host) pci_set_power_state(pdev, PCI_D0); /* mask the replay timer timeout of AER */ - aer = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); - if (aer) { - pci_read_config_dword(pdev, aer + PCI_ERR_COR_MASK, &value); - value |= PCI_ERR_COR_REP_TIMER; - pci_write_config_dword(pdev, aer + PCI_ERR_COR_MASK, value); - } + sdhci_gli_mask_replay_timer_timeout(pdev); gl9750_wt_off(host); } @@ -806,7 +814,6 @@ static void sdhci_gl9755_set_clock(struct sdhci_host *host, unsigned int clock) static void gl9755_hw_setting(struct sdhci_pci_slot *slot) { struct pci_dev *pdev = slot->chip->pdev; - int aer; u32 value; gl9755_wt_on(pdev); @@ -841,12 +848,7 @@ static void gl9755_hw_setting(struct sdhci_pci_slot *slot) pci_set_power_state(pdev, PCI_D0); /* mask the replay timer timeout of AER */ - aer = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); - if (aer) { - pci_read_config_dword(pdev, aer + PCI_ERR_COR_MASK, &value); - value |= PCI_ERR_COR_REP_TIMER; - pci_write_config_dword(pdev, aer + PCI_ERR_COR_MASK, value); - } + sdhci_gli_mask_replay_timer_timeout(pdev); gl9755_wt_off(pdev); } @@ -1751,7 +1753,7 @@ cleanup: return ret; } -static void gli_set_gl9763e(struct sdhci_pci_slot *slot) +static void gl9763e_hw_setting(struct sdhci_pci_slot *slot) { struct pci_dev *pdev = slot->chip->pdev; u32 value; @@ -1780,6 +1782,9 @@ static void gli_set_gl9763e(struct sdhci_pci_slot *slot) value |= FIELD_PREP(GLI_9763E_HS400_RXDLY, GLI_9763E_HS400_RXDLY_5); pci_write_config_dword(pdev, PCIE_GLI_9763E_CLKRXDLY, value); + /* mask the replay timer timeout of AER */ + sdhci_gli_mask_replay_timer_timeout(pdev); + pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value); value &= ~GLI_9763E_VHS_REV; value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R); @@ -1923,7 +1928,7 @@ static int gli_probe_slot_gl9763e(struct sdhci_pci_slot *slot) gli_pcie_enable_msi(slot); host->mmc_host_ops.hs400_enhanced_strobe = gl9763e_hs400_enhanced_strobe; - gli_set_gl9763e(slot); + gl9763e_hw_setting(slot); sdhci_enable_v4_mode(host); return 0; diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c index e4fc345be7e5..17e62c61b6e6 100644 --- a/drivers/mmc/host/sdhci_am654.c +++ b/drivers/mmc/host/sdhci_am654.c @@ -156,6 +156,7 @@ struct sdhci_am654_data { #define SDHCI_AM654_QUIRK_FORCE_CDTEST BIT(0) #define SDHCI_AM654_QUIRK_SUPPRESS_V1P8_ENA BIT(1) +#define SDHCI_AM654_QUIRK_DISABLE_HS400 BIT(2) }; struct window { @@ -765,6 +766,7 @@ static int sdhci_am654_init(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host); + struct device *dev = mmc_dev(host->mmc); u32 ctl_cfg_2 = 0; u32 mask; u32 val; @@ -820,6 +822,12 @@ static int sdhci_am654_init(struct sdhci_host *host) if (ret) goto err_cleanup_host; + if (sdhci_am654->quirks & SDHCI_AM654_QUIRK_DISABLE_HS400 && + host->mmc->caps2 & (MMC_CAP2_HS400 | MMC_CAP2_HS400_ES)) { + dev_info(dev, "HS400 mode not supported on this silicon revision, disabling it\n"); + host->mmc->caps2 &= ~(MMC_CAP2_HS400 | MMC_CAP2_HS400_ES); + } + ret = __sdhci_add_host(host); if (ret) goto err_cleanup_host; @@ -883,6 +891,12 @@ static int sdhci_am654_get_of_property(struct platform_device *pdev, return 0; } +static const struct soc_device_attribute sdhci_am654_descope_hs400[] = { + { .family = "AM62PX", .revision = "SR1.0" }, + { .family = "AM62PX", .revision = "SR1.1" }, + { /* sentinel */ } +}; + static const struct of_device_id sdhci_am654_of_match[] = { { .compatible = "ti,am654-sdhci-5.1", @@ -970,6 +984,10 @@ static int sdhci_am654_probe(struct platform_device *pdev) if (ret) return dev_err_probe(dev, ret, "parsing dt failed\n"); + soc = soc_device_match(sdhci_am654_descope_hs400); + if (soc) + sdhci_am654->quirks |= SDHCI_AM654_QUIRK_DISABLE_HS400; + host->mmc_host_ops.start_signal_voltage_switch = sdhci_am654_start_signal_voltage_switch; host->mmc_host_ops.execute_tuning = sdhci_am654_execute_tuning; diff --git a/drivers/most/core.c b/drivers/most/core.c index a635d5082ebb..da319d108ea1 100644 --- a/drivers/most/core.c +++ b/drivers/most/core.c @@ -538,8 +538,8 @@ static struct most_channel *get_channel(char *mdev, char *mdev_ch) dev = bus_find_device_by_name(&mostbus, NULL, mdev); if (!dev) return NULL; - put_device(dev); iface = dev_get_drvdata(dev); + put_device(dev); list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) { if (!strcmp(dev_name(&c->dev), mdev_ch)) return c; diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 2fca8e84ab10..4edc8e6b6b64 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -95,13 +95,13 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker); static void ad_mux_machine(struct port *port, bool *update_slave_arr); static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port); static void ad_tx_machine(struct port *port); -static void ad_periodic_machine(struct port *port, struct bond_params *bond_params); +static void ad_periodic_machine(struct port *port); static void ad_port_selection_logic(struct port *port, bool *update_slave_arr); static void ad_agg_selection_logic(struct aggregator *aggregator, bool *update_slave_arr); static void ad_clear_agg(struct aggregator *aggregator); static void ad_initialize_agg(struct aggregator *aggregator); -static void ad_initialize_port(struct port *port, int lacp_fast); +static void ad_initialize_port(struct port *port, const struct bond_params *bond_params); static void ad_enable_collecting(struct port *port); static void ad_disable_distributing(struct port *port, bool *update_slave_arr); @@ -1307,10 +1307,16 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) * case of EXPIRED even if LINK_DOWN didn't arrive for * the port. */ - port->partner_oper.port_state &= ~LACP_STATE_SYNCHRONIZATION; port->sm_vars &= ~AD_PORT_MATCHED; + /* Based on IEEE 8021AX-2014, Figure 6-18 - Receive + * machine state diagram, the statue should be + * Partner_Oper_Port_State.Synchronization = FALSE; + * Partner_Oper_Port_State.LACP_Timeout = Short Timeout; + * start current_while_timer(Short Timeout); + * Actor_Oper_Port_State.Expired = TRUE; + */ + port->partner_oper.port_state &= ~LACP_STATE_SYNCHRONIZATION; port->partner_oper.port_state |= LACP_STATE_LACP_TIMEOUT; - port->partner_oper.port_state |= LACP_STATE_LACP_ACTIVITY; port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(AD_SHORT_TIMEOUT)); port->actor_oper_port_state |= LACP_STATE_EXPIRED; port->sm_vars |= AD_PORT_CHURNED; @@ -1417,11 +1423,10 @@ static void ad_tx_machine(struct port *port) /** * ad_periodic_machine - handle a port's periodic state machine * @port: the port we're looking at - * @bond_params: bond parameters we will use * * Turn ntt flag on priodically to perform periodic transmission of lacpdu's. */ -static void ad_periodic_machine(struct port *port, struct bond_params *bond_params) +static void ad_periodic_machine(struct port *port) { periodic_states_t last_state; @@ -1430,8 +1435,7 @@ static void ad_periodic_machine(struct port *port, struct bond_params *bond_para /* check if port was reinitialized */ if (((port->sm_vars & AD_PORT_BEGIN) || !(port->sm_vars & AD_PORT_LACP_ENABLED) || !port->is_enabled) || - (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY)) || - !bond_params->lacp_active) { + (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY))) { port->sm_periodic_state = AD_NO_PERIODIC; } /* check if state machine should change state */ @@ -1955,16 +1959,16 @@ static void ad_initialize_agg(struct aggregator *aggregator) /** * ad_initialize_port - initialize a given port's parameters * @port: the port we're looking at - * @lacp_fast: boolean. whether fast periodic should be used + * @bond_params: bond parameters we will use */ -static void ad_initialize_port(struct port *port, int lacp_fast) +static void ad_initialize_port(struct port *port, const struct bond_params *bond_params) { static const struct port_params tmpl = { .system_priority = 0xffff, .key = 1, .port_number = 1, .port_priority = 0xff, - .port_state = 1, + .port_state = 0, }; static const struct lacpdu lacpdu = { .subtype = 0x01, @@ -1982,12 +1986,14 @@ static void ad_initialize_port(struct port *port, int lacp_fast) port->actor_port_priority = 0xff; port->actor_port_aggregator_identifier = 0; port->ntt = false; - port->actor_admin_port_state = LACP_STATE_AGGREGATION | - LACP_STATE_LACP_ACTIVITY; - port->actor_oper_port_state = LACP_STATE_AGGREGATION | - LACP_STATE_LACP_ACTIVITY; + port->actor_admin_port_state = LACP_STATE_AGGREGATION; + port->actor_oper_port_state = LACP_STATE_AGGREGATION; + if (bond_params->lacp_active) { + port->actor_admin_port_state |= LACP_STATE_LACP_ACTIVITY; + port->actor_oper_port_state |= LACP_STATE_LACP_ACTIVITY; + } - if (lacp_fast) + if (bond_params->lacp_fast) port->actor_oper_port_state |= LACP_STATE_LACP_TIMEOUT; memcpy(&port->partner_admin, &tmpl, sizeof(tmpl)); @@ -2201,7 +2207,7 @@ void bond_3ad_bind_slave(struct slave *slave) /* port initialization */ port = &(SLAVE_AD_INFO(slave)->port); - ad_initialize_port(port, bond->params.lacp_fast); + ad_initialize_port(port, &bond->params); port->slave = slave; port->actor_port_number = SLAVE_AD_INFO(slave)->id; @@ -2513,7 +2519,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work) } ad_rx_machine(NULL, port); - ad_periodic_machine(port, &bond->params); + ad_periodic_machine(port); ad_port_selection_logic(port, &update_slave_arr); ad_mux_machine(port, &update_slave_arr); ad_tx_machine(port); @@ -2883,6 +2889,31 @@ void bond_3ad_update_lacp_rate(struct bonding *bond) spin_unlock_bh(&bond->mode_lock); } +/** + * bond_3ad_update_lacp_active - change the lacp active + * @bond: bonding struct + * + * Update actor_oper_port_state when lacp_active is modified. + */ +void bond_3ad_update_lacp_active(struct bonding *bond) +{ + struct port *port = NULL; + struct list_head *iter; + struct slave *slave; + int lacp_active; + + lacp_active = bond->params.lacp_active; + spin_lock_bh(&bond->mode_lock); + bond_for_each_slave(bond, slave, iter) { + port = &(SLAVE_AD_INFO(slave)->port); + if (lacp_active) + port->actor_oper_port_state |= LACP_STATE_LACP_ACTIVITY; + else + port->actor_oper_port_state &= ~LACP_STATE_LACP_ACTIVITY; + } + spin_unlock_bh(&bond->mode_lock); +} + size_t bond_3ad_stats_size(void) { return nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_LACPDU_RX */ diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 1d639a3be6ba..3b6f815c55ff 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -1660,6 +1660,7 @@ static int bond_option_lacp_active_set(struct bonding *bond, netdev_dbg(bond->dev, "Setting LACP active to %s (%llu)\n", newval->string, newval->value); bond->params.lacp_active = newval->value; + bond_3ad_update_lacp_active(bond); return 0; } diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 9942fb6f7f4b..829b1f087e9e 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -2078,7 +2078,7 @@ int b53_fdb_dump(struct dsa_switch *ds, int port, /* Start search operation */ reg = ARL_SRCH_STDN; - b53_write8(priv, offset, B53_ARL_SRCH_CTL, reg); + b53_write8(priv, B53_ARLIO_PAGE, offset, reg); do { ret = b53_arl_search_wait(priv); diff --git a/drivers/net/dsa/microchip/ksz8.c b/drivers/net/dsa/microchip/ksz8.c index 76e490070e9c..c354abdafc1b 100644 --- a/drivers/net/dsa/microchip/ksz8.c +++ b/drivers/net/dsa/microchip/ksz8.c @@ -36,15 +36,14 @@ static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set) { - regmap_update_bits(ksz_regmap_8(dev), addr, bits, set ? bits : 0); + ksz_rmw8(dev, addr, bits, set ? bits : 0); } static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits, bool set) { - regmap_update_bits(ksz_regmap_8(dev), - dev->dev_ops->get_port_addr(port, offset), - bits, set ? bits : 0); + ksz_rmw8(dev, dev->dev_ops->get_port_addr(port, offset), bits, + set ? bits : 0); } /** @@ -1955,16 +1954,19 @@ int ksz8_setup(struct dsa_switch *ds) ksz_cfg(dev, S_LINK_AGING_CTRL, SW_LINK_AUTO_AGING, true); /* Enable aggressive back off algorithm in half duplex mode. */ - regmap_update_bits(ksz_regmap_8(dev), REG_SW_CTRL_1, - SW_AGGR_BACKOFF, SW_AGGR_BACKOFF); + ret = ksz_rmw8(dev, REG_SW_CTRL_1, SW_AGGR_BACKOFF, SW_AGGR_BACKOFF); + if (ret) + return ret; /* * Make sure unicast VLAN boundary is set as default and * enable no excessive collision drop. */ - regmap_update_bits(ksz_regmap_8(dev), REG_SW_CTRL_2, - UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP, - UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP); + ret = ksz_rmw8(dev, REG_SW_CTRL_2, + UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP, + UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP); + if (ret) + return ret; ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_REPLACE_VID, false); diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 7292bfe2f7ca..9568cc391fe3 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -1447,6 +1447,7 @@ static const struct regmap_range ksz8873_valid_regs[] = { regmap_reg_range(0x3f, 0x3f), /* advanced control registers */ + regmap_reg_range(0x43, 0x43), regmap_reg_range(0x60, 0x6f), regmap_reg_range(0x70, 0x75), regmap_reg_range(0x76, 0x78), @@ -2456,6 +2457,12 @@ static void ksz_update_port_member(struct ksz_device *dev, int port) dev->dev_ops->cfg_port_member(dev, i, val | cpu_port); } + /* HSR ports are setup once so need to use the assigned membership + * when the port is enabled. + */ + if (!port_member && p->stp_state == BR_STATE_FORWARDING && + (dev->hsr_ports & BIT(port))) + port_member = dev->hsr_ports; dev->dev_ops->cfg_port_member(dev, port, port_member | cpu_port); } diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c index 47411d2cbd28..88694b08afa1 100644 --- a/drivers/net/ethernet/airoha/airoha_ppe.c +++ b/drivers/net/ethernet/airoha/airoha_ppe.c @@ -736,10 +736,8 @@ static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe, continue; } - if (commit_done || !airoha_ppe_foe_compare_entry(e, hwe)) { - e->hash = 0xffff; + if (!airoha_ppe_foe_compare_entry(e, hwe)) continue; - } airoha_ppe_foe_commit_entry(ppe, &e->data, hash); commit_done = true; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 5578ddcb465d..207a8bb36ae5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -926,15 +926,21 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, dma_addr_t *mapping, struct bnxt_rx_ring_info *rxr, + unsigned int *offset, gfp_t gfp) { netmem_ref netmem; - netmem = page_pool_alloc_netmems(rxr->page_pool, gfp); + if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { + netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset, BNXT_RX_PAGE_SIZE, gfp); + } else { + netmem = page_pool_alloc_netmems(rxr->page_pool, gfp); + *offset = 0; + } if (!netmem) return 0; - *mapping = page_pool_get_dma_addr_netmem(netmem); + *mapping = page_pool_get_dma_addr_netmem(netmem) + *offset; return netmem; } @@ -1029,7 +1035,7 @@ static int bnxt_alloc_rx_netmem(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, dma_addr_t mapping; netmem_ref netmem; - netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, gfp); + netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, &offset, gfp); if (!netmem) return -ENOMEM; @@ -3819,7 +3825,6 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp, if (BNXT_RX_PAGE_MODE(bp)) pp.pool_size += bp->rx_ring_size / rx_size_fac; pp.nid = numa_node; - pp.napi = &rxr->bnapi->napi; pp.netdev = bp->dev; pp.dev = &bp->pdev->dev; pp.dma_dir = bp->rx_dir; @@ -3851,6 +3856,12 @@ err_destroy_pp: return PTR_ERR(pool); } +static void bnxt_enable_rx_page_pool(struct bnxt_rx_ring_info *rxr) +{ + page_pool_enable_direct_recycling(rxr->head_pool, &rxr->bnapi->napi); + page_pool_enable_direct_recycling(rxr->page_pool, &rxr->bnapi->napi); +} + static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) { u16 mem_size; @@ -3889,6 +3900,7 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp) rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node); if (rc) return rc; + bnxt_enable_rx_page_pool(rxr); rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0); if (rc < 0) @@ -5320,7 +5332,7 @@ static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all) { int i; - netdev_assert_locked(bp->dev); + netdev_assert_locked_or_invisible(bp->dev); /* Under netdev instance lock and all our NAPIs have been disabled. * It's safe to delete the hash table. @@ -16031,6 +16043,7 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx) goto err_reset; } + bnxt_enable_rx_page_pool(rxr); napi_enable_locked(&bnapi->napi); bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index ce95fad8cedd..9693f0289435 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -5113,7 +5113,8 @@ static const struct macb_config sama7g5_gem_config = { static const struct macb_config sama7g5_emac_config = { .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | - MACB_CAPS_MIIONRGMII | MACB_CAPS_GEM_HAS_PTP, + MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_MIIONRGMII | + MACB_CAPS_GEM_HAS_PTP, .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index 1f411d7c4373..1be1b1ef31ee 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -2870,6 +2870,8 @@ static void gve_shutdown(struct pci_dev *pdev) struct gve_priv *priv = netdev_priv(netdev); bool was_up = netif_running(priv->dev); + netif_device_detach(netdev); + rtnl_lock(); netdev_lock(netdev); if (was_up && gve_close(priv->dev)) { diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c index 503cfbfb4a8a..83cf75bf7a17 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c @@ -53,9 +53,11 @@ static int hbg_reset_prepare(struct hbg_priv *priv, enum hbg_reset_type type) { int ret; - ASSERT_RTNL(); + if (test_and_set_bit(HBG_NIC_STATE_RESETTING, &priv->state)) + return -EBUSY; if (netif_running(priv->netdev)) { + clear_bit(HBG_NIC_STATE_RESETTING, &priv->state); dev_warn(&priv->pdev->dev, "failed to reset because port is up\n"); return -EBUSY; @@ -64,7 +66,6 @@ static int hbg_reset_prepare(struct hbg_priv *priv, enum hbg_reset_type type) netif_device_detach(priv->netdev); priv->reset_type = type; - set_bit(HBG_NIC_STATE_RESETTING, &priv->state); clear_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state); ret = hbg_hw_event_notify(priv, HBG_HW_EVENT_RESET); if (ret) { @@ -84,29 +85,26 @@ static int hbg_reset_done(struct hbg_priv *priv, enum hbg_reset_type type) type != priv->reset_type) return 0; - ASSERT_RTNL(); - - clear_bit(HBG_NIC_STATE_RESETTING, &priv->state); ret = hbg_rebuild(priv); if (ret) { priv->stats.reset_fail_cnt++; set_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state); + clear_bit(HBG_NIC_STATE_RESETTING, &priv->state); dev_err(&priv->pdev->dev, "failed to rebuild after reset\n"); return ret; } netif_device_attach(priv->netdev); + clear_bit(HBG_NIC_STATE_RESETTING, &priv->state); dev_info(&priv->pdev->dev, "reset done\n"); return ret; } -/* must be protected by rtnl lock */ int hbg_reset(struct hbg_priv *priv) { int ret; - ASSERT_RTNL(); ret = hbg_reset_prepare(priv, HBG_RESET_TYPE_FUNCTION); if (ret) return ret; @@ -171,7 +169,6 @@ static void hbg_pci_err_reset_prepare(struct pci_dev *pdev) struct net_device *netdev = pci_get_drvdata(pdev); struct hbg_priv *priv = netdev_priv(netdev); - rtnl_lock(); hbg_reset_prepare(priv, HBG_RESET_TYPE_FLR); } @@ -181,7 +178,6 @@ static void hbg_pci_err_reset_done(struct pci_dev *pdev) struct hbg_priv *priv = netdev_priv(netdev); hbg_reset_done(priv, HBG_RESET_TYPE_FLR); - rtnl_unlock(); } static const struct pci_error_handlers hbg_pci_err_handler = { diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c index 8cca8316ba40..d0aa0661ecd4 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c @@ -12,6 +12,8 @@ #define HBG_HW_EVENT_WAIT_TIMEOUT_US (2 * 1000 * 1000) #define HBG_HW_EVENT_WAIT_INTERVAL_US (10 * 1000) +#define HBG_MAC_LINK_WAIT_TIMEOUT_US (500 * 1000) +#define HBG_MAC_LINK_WAIT_INTERVAL_US (5 * 1000) /* little endian or big endian. * ctrl means packet description, data means skb packet data */ @@ -228,6 +230,9 @@ void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr) void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex) { + u32 link_status; + int ret; + hbg_hw_mac_enable(priv, HBG_STATUS_DISABLE); hbg_reg_write_field(priv, HBG_REG_PORT_MODE_ADDR, @@ -239,8 +244,14 @@ void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex) hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE); - if (!hbg_reg_read_field(priv, HBG_REG_AN_NEG_STATE_ADDR, - HBG_REG_AN_NEG_STATE_NP_LINK_OK_B)) + /* wait MAC link up */ + ret = readl_poll_timeout(priv->io_base + HBG_REG_AN_NEG_STATE_ADDR, + link_status, + FIELD_GET(HBG_REG_AN_NEG_STATE_NP_LINK_OK_B, + link_status), + HBG_MAC_LINK_WAIT_INTERVAL_US, + HBG_MAC_LINK_WAIT_TIMEOUT_US); + if (ret) hbg_np_link_fail_task_schedule(priv); } diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h index 2883a5899ae2..8b6110599e10 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h @@ -29,7 +29,12 @@ static inline bool hbg_fifo_is_full(struct hbg_priv *priv, enum hbg_dir dir) static inline u32 hbg_get_queue_used_num(struct hbg_ring *ring) { - return (ring->ntu + ring->len - ring->ntc) % ring->len; + u32 len = READ_ONCE(ring->len); + + if (!len) + return 0; + + return (READ_ONCE(ring->ntu) + len - READ_ONCE(ring->ntc)) % len; } netdev_tx_t hbg_net_start_xmit(struct sk_buff *skb, struct net_device *netdev); diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 458e5eaa92e5..e79b14d50b24 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -7149,6 +7149,13 @@ static int igc_probe(struct pci_dev *pdev, adapter->port_num = hw->bus.func; adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + /* Disable ASPM L1.2 on I226 devices to avoid packet loss */ if (igc_is_device_id_i226(hw)) pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2); @@ -7175,13 +7182,6 @@ static int igc_probe(struct pci_dev *pdev, netdev->mem_start = pci_resource_start(pdev, 0); netdev->mem_end = pci_resource_end(pdev, 0); - /* PCI config space info */ - hw->vendor_id = pdev->vendor; - hw->device_id = pdev->device; - hw->revision_id = pdev->revision; - hw->subsystem_vendor_id = pdev->subsystem_vendor; - hw->subsystem_device_id = pdev->subsystem_device; - /* Copy the default MAC and PHY function pointers */ memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); diff --git a/drivers/net/ethernet/intel/ixgbe/devlink/devlink.c b/drivers/net/ethernet/intel/ixgbe/devlink/devlink.c index 54f1b83dfe42..d227f4d2a2d1 100644 --- a/drivers/net/ethernet/intel/ixgbe/devlink/devlink.c +++ b/drivers/net/ethernet/intel/ixgbe/devlink/devlink.c @@ -543,6 +543,7 @@ int ixgbe_devlink_register_port(struct ixgbe_adapter *adapter) attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; attrs.phys.port_number = adapter->hw.bus.func; + attrs.no_phys_port_name = 1; ixgbe_devlink_set_switch_id(adapter, &attrs.switch_id); devlink_port_attrs_set(devlink_port, &attrs); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 6122a0abb41f..80e6a2ef1350 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -968,10 +968,6 @@ static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter) for (i = 0; i < adapter->num_tx_queues; i++) clear_bit(__IXGBE_HANG_CHECK_ARMED, &adapter->tx_ring[i]->state); - - for (i = 0; i < adapter->num_xdp_queues; i++) - clear_bit(__IXGBE_HANG_CHECK_ARMED, - &adapter->xdp_ring[i]->state); } static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) @@ -1214,7 +1210,7 @@ static void ixgbe_pf_handle_tx_hang(struct ixgbe_ring *tx_ring, struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev); struct ixgbe_hw *hw = &adapter->hw; - e_err(drv, "Detected Tx Unit Hang%s\n" + e_err(drv, "Detected Tx Unit Hang\n" " Tx Queue <%d>\n" " TDH, TDT <%x>, <%x>\n" " next_to_use <%x>\n" @@ -1222,16 +1218,14 @@ static void ixgbe_pf_handle_tx_hang(struct ixgbe_ring *tx_ring, "tx_buffer_info[next_to_clean]\n" " time_stamp <%lx>\n" " jiffies <%lx>\n", - ring_is_xdp(tx_ring) ? " (XDP)" : "", tx_ring->queue_index, IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)), IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)), tx_ring->next_to_use, next, tx_ring->tx_buffer_info[next].time_stamp, jiffies); - if (!ring_is_xdp(tx_ring)) - netif_stop_subqueue(tx_ring->netdev, - tx_ring->queue_index); + netif_stop_subqueue(tx_ring->netdev, + tx_ring->queue_index); } /** @@ -1451,6 +1445,9 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, total_bytes); adapter->tx_ipsec += total_ipsec; + if (ring_is_xdp(tx_ring)) + return !!budget; + if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { if (adapter->hw.mac.type == ixgbe_mac_e610) ixgbe_handle_mdd_event(adapter, tx_ring); @@ -1468,9 +1465,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, return true; } - if (ring_is_xdp(tx_ring)) - return !!budget; - #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); if (!__netif_txq_completed_wake(txq, total_packets, total_bytes, @@ -7974,12 +7968,9 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) return; /* Force detection of hung controller */ - if (netif_carrier_ok(adapter->netdev)) { + if (netif_carrier_ok(adapter->netdev)) for (i = 0; i < adapter->num_tx_queues; i++) set_check_for_tx_hang(adapter->tx_ring[i]); - for (i = 0; i < adapter->num_xdp_queues; i++) - set_check_for_tx_hang(adapter->xdp_ring[i]); - } if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { /* @@ -8199,13 +8190,6 @@ static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter) return true; } - for (i = 0; i < adapter->num_xdp_queues; i++) { - struct ixgbe_ring *ring = adapter->xdp_ring[i]; - - if (ring->next_to_use != ring->next_to_clean) - return true; - } - return false; } @@ -11005,6 +10989,10 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n, if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state))) return -ENETDOWN; + if (!netif_carrier_ok(adapter->netdev) || + !netif_running(adapter->netdev)) + return -ENETDOWN; + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index ac58964b2f08..7b941505a9d0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -398,7 +398,7 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) dma_addr_t dma; u32 cmd_type; - while (budget-- > 0) { + while (likely(budget)) { if (unlikely(!ixgbe_desc_unused(xdp_ring))) { work_done = false; break; @@ -433,6 +433,8 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) xdp_ring->next_to_use++; if (xdp_ring->next_to_use == xdp_ring->count) xdp_ring->next_to_use = 0; + + budget--; } if (tx_desc) { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c index 1b765045aa63..b56395ac5a74 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c @@ -606,8 +606,8 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf) if (!npc_check_field(rvu, blkaddr, NPC_LB, intf)) *features &= ~BIT_ULL(NPC_OUTER_VID); - /* Set SPI flag only if AH/ESP and IPSEC_SPI are in the key */ - if (npc_check_field(rvu, blkaddr, NPC_IPSEC_SPI, intf) && + /* Allow extracting SPI field from AH and ESP headers at same offset */ + if (npc_is_field_present(rvu, NPC_IPSEC_SPI, intf) && (*features & (BIT_ULL(NPC_IPPROTO_ESP) | BIT_ULL(NPC_IPPROTO_AH)))) *features |= BIT_ULL(NPC_IPSEC_SPI); diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c index c855fb799ce1..e9bd32741983 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c @@ -101,7 +101,9 @@ mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_i if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)) return -1; + rcu_read_lock(); err = dev_fill_forward_path(dev, addr, &stack); + rcu_read_unlock(); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h index b59aee75de94..2c98a5299df3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h @@ -26,7 +26,6 @@ struct mlx5e_dcbx { u8 cap; /* Buffer configuration */ - bool manual_buffer; u32 cable_len; u32 xoff; u16 port_buff_cell_sz; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c index 5ae787656a7c..3efa8bf1d14e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c @@ -272,8 +272,8 @@ static int port_update_shared_buffer(struct mlx5_core_dev *mdev, /* Total shared buffer size is split in a ratio of 3:1 between * lossy and lossless pools respectively. */ - lossy_epool_size = (shared_buffer_size / 4) * 3; lossless_ipool_size = shared_buffer_size / 4; + lossy_epool_size = shared_buffer_size - lossless_ipool_size; mlx5e_port_set_sbpr(mdev, 0, MLX5_EGRESS_DIR, MLX5_LOSSY_POOL, 0, lossy_epool_size); @@ -288,14 +288,12 @@ static int port_set_buffer(struct mlx5e_priv *priv, u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz; struct mlx5_core_dev *mdev = priv->mdev; int sz = MLX5_ST_SZ_BYTES(pbmc_reg); - u32 new_headroom_size = 0; - u32 current_headroom_size; + u32 current_headroom_cells = 0; + u32 new_headroom_cells = 0; void *in; int err; int i; - current_headroom_size = port_buffer->headroom_size; - in = kzalloc(sz, GFP_KERNEL); if (!in) return -ENOMEM; @@ -306,12 +304,14 @@ static int port_set_buffer(struct mlx5e_priv *priv, for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) { void *buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]); + current_headroom_cells += MLX5_GET(bufferx_reg, buffer, size); + u64 size = port_buffer->buffer[i].size; u64 xoff = port_buffer->buffer[i].xoff; u64 xon = port_buffer->buffer[i].xon; - new_headroom_size += size; do_div(size, port_buff_cell_sz); + new_headroom_cells += size; do_div(xoff, port_buff_cell_sz); do_div(xon, port_buff_cell_sz); MLX5_SET(bufferx_reg, buffer, size, size); @@ -320,10 +320,8 @@ static int port_set_buffer(struct mlx5e_priv *priv, MLX5_SET(bufferx_reg, buffer, xon_threshold, xon); } - new_headroom_size /= port_buff_cell_sz; - current_headroom_size /= port_buff_cell_sz; - err = port_update_shared_buffer(priv->mdev, current_headroom_size, - new_headroom_size); + err = port_update_shared_buffer(priv->mdev, current_headroom_cells, + new_headroom_cells); if (err) goto out; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c index a4263137fef5..01d522b02947 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c @@ -173,6 +173,8 @@ static void mlx5_ct_fs_hmfs_fill_rule_actions(struct mlx5_ct_fs_hmfs *fs_hmfs, memset(rule_actions, 0, NUM_CT_HMFS_RULES * sizeof(*rule_actions)); rule_actions[0].action = mlx5_fc_get_hws_action(fs_hmfs->ctx, attr->counter); + rule_actions[0].counter.offset = + attr->counter->id - attr->counter->bulk->base_id; /* Modify header is special, it may require extra arguments outside the action itself. */ if (mh_action->mh_data) { rule_actions[1].modify_header.offset = mh_action->mh_data->offset; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index 5fe016e477b3..d166c0d5189e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -362,6 +362,7 @@ static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev, static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc) { + u8 buffer_ownership = MLX5_BUF_OWNERSHIP_UNKNOWN; struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5_core_dev *mdev = priv->mdev; u32 old_cable_len = priv->dcbx.cable_len; @@ -389,7 +390,14 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev, if (MLX5_BUFFER_SUPPORTED(mdev)) { pfc_new.pfc_en = (changed & MLX5E_PORT_BUFFER_PFC) ? pfc->pfc_en : curr_pfc_en; - if (priv->dcbx.manual_buffer) + ret = mlx5_query_port_buffer_ownership(mdev, + &buffer_ownership); + if (ret) + netdev_err(dev, + "%s, Failed to get buffer ownership: %d\n", + __func__, ret); + + if (buffer_ownership == MLX5_BUF_OWNERSHIP_SW_OWNED) ret = mlx5e_port_manual_buffer_config(priv, changed, dev->mtu, &pfc_new, NULL, NULL); @@ -982,7 +990,6 @@ static int mlx5e_dcbnl_setbuffer(struct net_device *dev, if (!changed) return 0; - priv->dcbx.manual_buffer = true; err = mlx5e_port_manual_buffer_config(priv, changed, dev->mtu, NULL, buffer_size, prio2buffer); return err; @@ -1252,7 +1259,6 @@ void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv) priv->dcbx.cap |= DCB_CAP_DCBX_HOST; priv->dcbx.port_buff_cell_sz = mlx5e_query_port_buffers_cell_size(priv); - priv->dcbx.manual_buffer = false; priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN; mlx5e_ets_init(priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c index b7102e14d23d..c33accadae0f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c @@ -47,10 +47,12 @@ static void mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(struct mlx5_eswitch * devlink_port_attrs_pci_vf_set(dl_port, controller_num, pfnum, vport_num - 1, external); } else if (mlx5_core_is_ec_vf_vport(esw->dev, vport_num)) { + u16 base_vport = mlx5_core_ec_vf_vport_base(dev); + memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len); dl_port->attrs.switch_id.id_len = ppid.id_len; devlink_port_attrs_pci_vf_set(dl_port, 0, pfnum, - vport_num - 1, false); + vport_num - base_vport, false); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index 91d863c8c152..8b4977650183 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -102,6 +102,8 @@ struct mlx5_esw_sched_node { u8 level; /* Valid only when this node represents a traffic class. */ u8 tc; + /* Valid only for a TC arbiter node or vport TC arbiter. */ + u32 tc_bw[DEVLINK_RATE_TCS_MAX]; }; static void esw_qos_node_attach_to_parent(struct mlx5_esw_sched_node *node) @@ -462,6 +464,7 @@ static int esw_qos_vport_create_sched_element(struct mlx5_esw_sched_node *vport_node, struct netlink_ext_ack *extack) { + struct mlx5_esw_sched_node *parent = vport_node->parent; u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; struct mlx5_core_dev *dev = vport_node->esw->dev; void *attr; @@ -477,7 +480,7 @@ esw_qos_vport_create_sched_element(struct mlx5_esw_sched_node *vport_node, attr = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes); MLX5_SET(vport_element, attr, vport_number, vport_node->vport->vport); MLX5_SET(scheduling_context, sched_ctx, parent_element_id, - vport_node->parent->ix); + parent ? parent->ix : vport_node->esw->qos.root_tsar_ix); MLX5_SET(scheduling_context, sched_ctx, max_average_bw, vport_node->max_rate); @@ -608,10 +611,7 @@ static void esw_qos_tc_arbiter_get_bw_shares(struct mlx5_esw_sched_node *tc_arbiter_node, u32 *tc_bw) { - struct mlx5_esw_sched_node *vports_tc_node; - - list_for_each_entry(vports_tc_node, &tc_arbiter_node->children, entry) - tc_bw[vports_tc_node->tc] = vports_tc_node->bw_share; + memcpy(tc_bw, tc_arbiter_node->tc_bw, sizeof(tc_arbiter_node->tc_bw)); } static void @@ -628,6 +628,7 @@ esw_qos_set_tc_arbiter_bw_shares(struct mlx5_esw_sched_node *tc_arbiter_node, u8 tc = vports_tc_node->tc; u32 bw_share; + tc_arbiter_node->tc_bw[tc] = tc_bw[tc]; bw_share = tc_bw[tc] * fw_max_bw_share; bw_share = esw_qos_calc_bw_share(bw_share, divider, fw_max_bw_share); @@ -786,48 +787,15 @@ static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta return err; } - if (MLX5_CAP_QOS(dev, log_esw_max_sched_depth)) { - esw->qos.node0 = __esw_qos_create_vports_sched_node(esw, NULL, extack); - } else { - /* The eswitch doesn't support scheduling nodes. - * Create a software-only node0 using the root TSAR to attach vport QoS to. - */ - if (!__esw_qos_alloc_node(esw, - esw->qos.root_tsar_ix, - SCHED_NODE_TYPE_VPORTS_TSAR, - NULL)) - esw->qos.node0 = ERR_PTR(-ENOMEM); - else - list_add_tail(&esw->qos.node0->entry, - &esw->qos.domain->nodes); - } - if (IS_ERR(esw->qos.node0)) { - err = PTR_ERR(esw->qos.node0); - esw_warn(dev, "E-Switch create rate node 0 failed (%d)\n", err); - goto err_node0; - } refcount_set(&esw->qos.refcnt, 1); return 0; - -err_node0: - if (mlx5_destroy_scheduling_element_cmd(esw->dev, SCHEDULING_HIERARCHY_E_SWITCH, - esw->qos.root_tsar_ix)) - esw_warn(esw->dev, "E-Switch destroy root TSAR failed.\n"); - - return err; } static void esw_qos_destroy(struct mlx5_eswitch *esw) { int err; - if (esw->qos.node0->ix != esw->qos.root_tsar_ix) - __esw_qos_destroy_node(esw->qos.node0, NULL); - else - __esw_qos_free_node(esw->qos.node0); - esw->qos.node0 = NULL; - err = mlx5_destroy_scheduling_element_cmd(esw->dev, SCHEDULING_HIERARCHY_E_SWITCH, esw->qos.root_tsar_ix); @@ -990,13 +958,16 @@ esw_qos_vport_tc_enable(struct mlx5_vport *vport, enum sched_node_type type, struct netlink_ext_ack *extack) { struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; - int err, new_level, max_level; + struct mlx5_esw_sched_node *parent = vport_node->parent; + int err; if (type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) { + int new_level, max_level; + /* Increase the parent's level by 2 to account for both the * TC arbiter and the vports TC scheduling element. */ - new_level = vport_node->parent->level + 2; + new_level = (parent ? parent->level : 2) + 2; max_level = 1 << MLX5_CAP_QOS(vport_node->esw->dev, log_esw_max_sched_depth); if (new_level > max_level) { @@ -1033,9 +1004,7 @@ esw_qos_vport_tc_enable(struct mlx5_vport *vport, enum sched_node_type type, err_sched_nodes: if (type == SCHED_NODE_TYPE_RATE_LIMITER) { esw_qos_node_destroy_sched_element(vport_node, NULL); - list_add_tail(&vport_node->entry, - &vport_node->parent->children); - vport_node->level = vport_node->parent->level + 1; + esw_qos_node_attach_to_parent(vport_node); } else { esw_qos_tc_arbiter_scheduling_teardown(vport_node, NULL); } @@ -1083,7 +1052,6 @@ err_out: static void esw_qos_vport_disable(struct mlx5_vport *vport, struct netlink_ext_ack *extack) { struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; - struct mlx5_esw_sched_node *parent = vport_node->parent; enum sched_node_type curr_type = vport_node->type; if (curr_type == SCHED_NODE_TYPE_VPORT) @@ -1092,8 +1060,9 @@ static void esw_qos_vport_disable(struct mlx5_vport *vport, struct netlink_ext_a esw_qos_vport_tc_disable(vport, extack); vport_node->bw_share = 0; + memset(vport_node->tc_bw, 0, sizeof(vport_node->tc_bw)); list_del_init(&vport_node->entry); - esw_qos_normalize_min_rate(parent->esw, parent, extack); + esw_qos_normalize_min_rate(vport_node->esw, vport_node->parent, extack); trace_mlx5_esw_vport_qos_destroy(vport_node->esw->dev, vport); } @@ -1103,25 +1072,23 @@ static int esw_qos_vport_enable(struct mlx5_vport *vport, struct mlx5_esw_sched_node *parent, struct netlink_ext_ack *extack) { + struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; int err; esw_assert_qos_lock_held(vport->dev->priv.eswitch); - esw_qos_node_set_parent(vport->qos.sched_node, parent); - if (type == SCHED_NODE_TYPE_VPORT) { - err = esw_qos_vport_create_sched_element(vport->qos.sched_node, - extack); - } else { + esw_qos_node_set_parent(vport_node, parent); + if (type == SCHED_NODE_TYPE_VPORT) + err = esw_qos_vport_create_sched_element(vport_node, extack); + else err = esw_qos_vport_tc_enable(vport, type, extack); - } if (err) return err; - vport->qos.sched_node->type = type; - esw_qos_normalize_min_rate(parent->esw, parent, extack); - trace_mlx5_esw_vport_qos_create(vport->dev, vport, - vport->qos.sched_node->max_rate, - vport->qos.sched_node->bw_share); + vport_node->type = type; + esw_qos_normalize_min_rate(vport_node->esw, parent, extack); + trace_mlx5_esw_vport_qos_create(vport->dev, vport, vport_node->max_rate, + vport_node->bw_share); return 0; } @@ -1132,6 +1099,7 @@ static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_t { struct mlx5_eswitch *esw = vport->dev->priv.eswitch; struct mlx5_esw_sched_node *sched_node; + struct mlx5_eswitch *parent_esw; int err; esw_assert_qos_lock_held(esw); @@ -1139,10 +1107,14 @@ static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_t if (err) return err; - parent = parent ?: esw->qos.node0; - sched_node = __esw_qos_alloc_node(parent->esw, 0, type, parent); - if (!sched_node) + parent_esw = parent ? parent->esw : esw; + sched_node = __esw_qos_alloc_node(parent_esw, 0, type, parent); + if (!sched_node) { + esw_qos_put(esw); return -ENOMEM; + } + if (!parent) + list_add_tail(&sched_node->entry, &esw->qos.domain->nodes); sched_node->max_rate = max_rate; sched_node->min_rate = min_rate; @@ -1150,6 +1122,7 @@ static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_t vport->qos.sched_node = sched_node; err = esw_qos_vport_enable(vport, type, parent, extack); if (err) { + __esw_qos_free_node(sched_node); esw_qos_put(esw); vport->qos.sched_node = NULL; } @@ -1157,6 +1130,19 @@ static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_t return err; } +static void mlx5_esw_qos_vport_disable_locked(struct mlx5_vport *vport) +{ + struct mlx5_eswitch *esw = vport->dev->priv.eswitch; + + esw_assert_qos_lock_held(esw); + if (!vport->qos.sched_node) + return; + + esw_qos_vport_disable(vport, NULL); + mlx5_esw_qos_vport_qos_free(vport); + esw_qos_put(esw); +} + void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport) { struct mlx5_eswitch *esw = vport->dev->priv.eswitch; @@ -1168,11 +1154,9 @@ void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport) goto unlock; parent = vport->qos.sched_node->parent; - WARN(parent != esw->qos.node0, "Disabling QoS on port before detaching it from node"); + WARN(parent, "Disabling QoS on port before detaching it from node"); - esw_qos_vport_disable(vport, NULL); - mlx5_esw_qos_vport_qos_free(vport); - esw_qos_put(esw); + mlx5_esw_qos_vport_disable_locked(vport); unlock: esw_qos_unlock(esw); } @@ -1262,13 +1246,13 @@ static int esw_qos_vport_update(struct mlx5_vport *vport, struct mlx5_esw_sched_node *parent, struct netlink_ext_ack *extack) { - struct mlx5_esw_sched_node *curr_parent = vport->qos.sched_node->parent; - enum sched_node_type curr_type = vport->qos.sched_node->type; + struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; + struct mlx5_esw_sched_node *curr_parent = vport_node->parent; + enum sched_node_type curr_type = vport_node->type; u32 curr_tc_bw[DEVLINK_RATE_TCS_MAX] = {0}; int err; esw_assert_qos_lock_held(vport->dev->priv.eswitch); - parent = parent ?: curr_parent; if (curr_type == type && curr_parent == parent) return 0; @@ -1276,10 +1260,8 @@ static int esw_qos_vport_update(struct mlx5_vport *vport, if (err) return err; - if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR && curr_type == type) { - esw_qos_tc_arbiter_get_bw_shares(vport->qos.sched_node, - curr_tc_bw); - } + if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR && curr_type == type) + esw_qos_tc_arbiter_get_bw_shares(vport_node, curr_tc_bw); esw_qos_vport_disable(vport, extack); @@ -1290,8 +1272,8 @@ static int esw_qos_vport_update(struct mlx5_vport *vport, } if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR && curr_type == type) { - esw_qos_set_tc_arbiter_bw_shares(vport->qos.sched_node, - curr_tc_bw, extack); + esw_qos_set_tc_arbiter_bw_shares(vport_node, curr_tc_bw, + extack); } return err; @@ -1306,16 +1288,16 @@ static int esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw esw_assert_qos_lock_held(esw); curr_parent = vport->qos.sched_node->parent; - parent = parent ?: esw->qos.node0; if (curr_parent == parent) return 0; /* Set vport QoS type based on parent node type if different from * default QoS; otherwise, use the vport's current QoS type. */ - if (parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) + if (parent && parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) type = SCHED_NODE_TYPE_RATE_LIMITER; - else if (curr_parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) + else if (curr_parent && + curr_parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) type = SCHED_NODE_TYPE_VPORT; else type = vport->qos.sched_node->type; @@ -1654,9 +1636,10 @@ static bool esw_qos_validate_unsupported_tc_bw(struct mlx5_eswitch *esw, static bool esw_qos_vport_validate_unsupported_tc_bw(struct mlx5_vport *vport, u32 *tc_bw) { - struct mlx5_eswitch *esw = vport->qos.sched_node ? - vport->qos.sched_node->parent->esw : - vport->dev->priv.eswitch; + struct mlx5_esw_sched_node *node = vport->qos.sched_node; + struct mlx5_eswitch *esw = vport->dev->priv.eswitch; + + esw = (node && node->parent) ? node->parent->esw : esw; return esw_qos_validate_unsupported_tc_bw(esw, tc_bw); } @@ -1673,6 +1656,21 @@ static bool esw_qos_tc_bw_disabled(u32 *tc_bw) return true; } +static void esw_vport_qos_prune_empty(struct mlx5_vport *vport) +{ + struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; + + esw_assert_qos_lock_held(vport->dev->priv.eswitch); + if (!vport_node) + return; + + if (vport_node->parent || vport_node->max_rate || + vport_node->min_rate || !esw_qos_tc_bw_disabled(vport_node->tc_bw)) + return; + + mlx5_esw_qos_vport_disable_locked(vport); +} + int mlx5_esw_qos_init(struct mlx5_eswitch *esw) { if (esw->qos.domain) @@ -1706,6 +1704,10 @@ int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void esw_qos_lock(esw); err = mlx5_esw_qos_set_vport_min_rate(vport, tx_share, extack); + if (err) + goto out; + esw_vport_qos_prune_empty(vport); +out: esw_qos_unlock(esw); return err; } @@ -1727,6 +1729,10 @@ int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void * esw_qos_lock(esw); err = mlx5_esw_qos_set_vport_max_rate(vport, tx_max, extack); + if (err) + goto out; + esw_vport_qos_prune_empty(vport); +out: esw_qos_unlock(esw); return err; } @@ -1763,7 +1769,8 @@ int mlx5_esw_devlink_rate_leaf_tc_bw_set(struct devlink_rate *rate_leaf, if (disable) { if (vport_node->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) err = esw_qos_vport_update(vport, SCHED_NODE_TYPE_VPORT, - NULL, extack); + vport_node->parent, extack); + esw_vport_qos_prune_empty(vport); goto unlock; } @@ -1775,7 +1782,7 @@ int mlx5_esw_devlink_rate_leaf_tc_bw_set(struct devlink_rate *rate_leaf, } else { err = esw_qos_vport_update(vport, SCHED_NODE_TYPE_TC_ARBITER_TSAR, - NULL, extack); + vport_node->parent, extack); } if (!err) esw_qos_set_tc_arbiter_bw_shares(vport_node, tc_bw, extack); @@ -1924,14 +1931,20 @@ int mlx5_esw_devlink_rate_leaf_parent_set(struct devlink_rate *devlink_rate, void *priv, void *parent_priv, struct netlink_ext_ack *extack) { - struct mlx5_esw_sched_node *node; + struct mlx5_esw_sched_node *node = parent ? parent_priv : NULL; struct mlx5_vport *vport = priv; + int err; - if (!parent) - return mlx5_esw_qos_vport_update_parent(vport, NULL, extack); + err = mlx5_esw_qos_vport_update_parent(vport, node, extack); + if (!err) { + struct mlx5_eswitch *esw = vport->dev->priv.eswitch; + + esw_qos_lock(esw); + esw_vport_qos_prune_empty(vport); + esw_qos_unlock(esw); + } - node = parent_priv; - return mlx5_esw_qos_vport_update_parent(vport, node, extack); + return err; } static bool esw_qos_is_node_empty(struct mlx5_esw_sched_node *node) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index b0b8ef3ec3c4..45506ad56847 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -373,11 +373,6 @@ struct mlx5_eswitch { refcount_t refcnt; u32 root_tsar_ix; struct mlx5_qos_domain *domain; - /* Contains all vports with QoS enabled but no explicit node. - * Cannot be NULL if QoS is enabled, but may be a fake node - * referencing the root TSAR if the esw doesn't support nodes. - */ - struct mlx5_esw_sched_node *node0; } qos; struct mlx5_esw_bridge_offloads *br_offloads; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index b6d53db27cd5..9d3504f5abfa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -367,6 +367,8 @@ int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out); int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in); int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state); int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state); +int mlx5_query_port_buffer_ownership(struct mlx5_core_dev *mdev, + u8 *buffer_ownership); int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio); int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 549f1066d2a5..2d7adf7444ba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -968,6 +968,26 @@ int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state) return err; } +int mlx5_query_port_buffer_ownership(struct mlx5_core_dev *mdev, + u8 *buffer_ownership) +{ + u32 out[MLX5_ST_SZ_DW(pfcc_reg)] = {}; + int err; + + if (!MLX5_CAP_PCAM_FEATURE(mdev, buffer_ownership)) { + *buffer_ownership = MLX5_BUF_OWNERSHIP_UNKNOWN; + return 0; + } + + err = mlx5_query_pfcc_reg(mdev, out, sizeof(out)); + if (err) + return err; + + *buffer_ownership = MLX5_GET(pfcc_reg, out, buf_ownership); + + return 0; +} + int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio) { int sz = MLX5_ST_SZ_BYTES(qpdpm_reg); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c index 92de4b761a83..adeccc588e5d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c @@ -74,9 +74,9 @@ static void hws_bwc_matcher_init_attr(struct mlx5hws_bwc_matcher *bwc_matcher, static int hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher) { - bool move_error = false, poll_error = false, drain_error = false; struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx; struct mlx5hws_matcher *matcher = bwc_matcher->matcher; + int drain_error = 0, move_error = 0, poll_error = 0; u16 bwc_queues = mlx5hws_bwc_queues(ctx); struct mlx5hws_rule_attr rule_attr; struct mlx5hws_bwc_rule *bwc_rule; @@ -84,6 +84,7 @@ hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher) struct list_head *rules_list; u32 pending_rules; int i, ret = 0; + bool drain; mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr); @@ -99,23 +100,37 @@ hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher) ret = mlx5hws_matcher_resize_rule_move(matcher, bwc_rule->rule, &rule_attr); - if (unlikely(ret && !move_error)) { - mlx5hws_err(ctx, - "Moving BWC rule: move failed (%d), attempting to move rest of the rules\n", - ret); - move_error = true; + if (unlikely(ret)) { + if (!move_error) { + mlx5hws_err(ctx, + "Moving BWC rule: move failed (%d), attempting to move rest of the rules\n", + ret); + move_error = ret; + } + /* Rule wasn't queued, no need to poll */ + continue; } pending_rules++; + drain = pending_rules >= + hws_bwc_get_burst_th(ctx, rule_attr.queue_id); ret = mlx5hws_bwc_queue_poll(ctx, rule_attr.queue_id, &pending_rules, - false); - if (unlikely(ret && !poll_error)) { - mlx5hws_err(ctx, - "Moving BWC rule: poll failed (%d), attempting to move rest of the rules\n", - ret); - poll_error = true; + drain); + if (unlikely(ret)) { + if (ret == -ETIMEDOUT) { + mlx5hws_err(ctx, + "Moving BWC rule: timeout polling for completions (%d), aborting rehash\n", + ret); + return ret; + } + if (!poll_error) { + mlx5hws_err(ctx, + "Moving BWC rule: polling for completions failed (%d), attempting to move rest of the rules\n", + ret); + poll_error = ret; + } } } @@ -126,17 +141,30 @@ hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher) rule_attr.queue_id, &pending_rules, true); - if (unlikely(ret && !drain_error)) { - mlx5hws_err(ctx, - "Moving BWC rule: drain failed (%d), attempting to move rest of the rules\n", - ret); - drain_error = true; + if (unlikely(ret)) { + if (ret == -ETIMEDOUT) { + mlx5hws_err(ctx, + "Moving bwc rule: timeout draining completions (%d), aborting rehash\n", + ret); + return ret; + } + if (!drain_error) { + mlx5hws_err(ctx, + "Moving bwc rule: drain failed (%d), attempting to move rest of the rules\n", + ret); + drain_error = ret; + } } } } - if (move_error || poll_error || drain_error) - ret = -EINVAL; + /* Return the first error that happened */ + if (unlikely(move_error)) + return move_error; + if (unlikely(poll_error)) + return poll_error; + if (unlikely(drain_error)) + return drain_error; return ret; } @@ -1035,6 +1063,21 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule, return 0; /* rule inserted successfully */ } + /* Rule insertion could fail due to queue being full, timeout, or + * matcher in resize. In such cases, no point in trying to rehash. + */ + if (ret == -EBUSY || ret == -ETIMEDOUT || ret == -EAGAIN) { + mutex_unlock(queue_lock); + mlx5hws_err(ctx, + "BWC rule insertion failed - %s (%d)\n", + ret == -EBUSY ? "queue is full" : + ret == -ETIMEDOUT ? "timeout" : + ret == -EAGAIN ? "matcher in resize" : "N/A", + ret); + hws_bwc_rule_cnt_dec(bwc_rule); + return ret; + } + /* At this point the rule wasn't added. * It could be because there was collision, or some other problem. * Try rehash by size and insert rule again - last chance. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c index ca7501c57468..14e79579c719 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c @@ -1328,11 +1328,11 @@ mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher) { struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx; struct mlx5hws_matcher *matcher = bwc_matcher->matcher; - bool move_error = false, poll_error = false; u16 bwc_queues = mlx5hws_bwc_queues(ctx); struct mlx5hws_bwc_rule *tmp_bwc_rule; struct mlx5hws_rule_attr rule_attr; struct mlx5hws_table *isolated_tbl; + int move_error = 0, poll_error = 0; struct mlx5hws_rule *tmp_rule; struct list_head *rules_list; u32 expected_completions = 1; @@ -1391,11 +1391,15 @@ mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher) ret = mlx5hws_matcher_resize_rule_move(matcher, tmp_rule, &rule_attr); - if (unlikely(ret && !move_error)) { - mlx5hws_err(ctx, - "Moving complex BWC rule failed (%d), attempting to move rest of the rules\n", - ret); - move_error = true; + if (unlikely(ret)) { + if (!move_error) { + mlx5hws_err(ctx, + "Moving complex BWC rule: move failed (%d), attempting to move rest of the rules\n", + ret); + move_error = ret; + } + /* Rule wasn't queued, no need to poll */ + continue; } expected_completions = 1; @@ -1403,11 +1407,19 @@ mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher) rule_attr.queue_id, &expected_completions, true); - if (unlikely(ret && !poll_error)) { - mlx5hws_err(ctx, - "Moving complex BWC rule: poll failed (%d), attempting to move rest of the rules\n", - ret); - poll_error = true; + if (unlikely(ret)) { + if (ret == -ETIMEDOUT) { + mlx5hws_err(ctx, + "Moving complex BWC rule: timeout polling for completions (%d), aborting rehash\n", + ret); + return ret; + } + if (!poll_error) { + mlx5hws_err(ctx, + "Moving complex BWC rule: polling for completions failed (%d), attempting to move rest of the rules\n", + ret); + poll_error = ret; + } } /* Done moving the rule to the new matcher, @@ -1422,8 +1434,11 @@ mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher) } } - if (move_error || poll_error) - ret = -EINVAL; + /* Return the first error that happened */ + if (unlikely(move_error)) + return move_error; + if (unlikely(poll_error)) + return poll_error; return ret; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c index 9c83753e4592..0bdcab2e5cf3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c @@ -55,6 +55,7 @@ int mlx5hws_cmd_flow_table_create(struct mlx5_core_dev *mdev, MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE); MLX5_SET(create_flow_table_in, in, table_type, ft_attr->type); + MLX5_SET(create_flow_table_in, in, uid, ft_attr->uid); ft_ctx = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context); MLX5_SET(flow_table_context, ft_ctx, level, ft_attr->level); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h index fa6bff210266..122ccc671628 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h @@ -36,6 +36,7 @@ struct mlx5hws_cmd_set_fte_attr { struct mlx5hws_cmd_ft_create_attr { u8 type; u8 level; + u16 uid; bool rtc_valid; bool decap_en; bool reformat_en; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c index 57592b92e24b..131e74b2b774 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c @@ -267,6 +267,7 @@ static int mlx5_cmd_hws_create_flow_table(struct mlx5_flow_root_namespace *ns, tbl_attr.type = MLX5HWS_TABLE_TYPE_FDB; tbl_attr.level = ft_attr->level; + tbl_attr.uid = ft_attr->uid; tbl = mlx5hws_table_create(ctx, &tbl_attr); if (!tbl) { mlx5_core_err(ns->dev, "Failed creating hws flow_table\n"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c index f3ea09caba2b..32f87fdf3213 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c @@ -85,6 +85,7 @@ static int hws_matcher_create_end_ft_isolated(struct mlx5hws_matcher *matcher) ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl, + 0, &matcher->end_ft_id); if (ret) { mlx5hws_err(tbl->ctx, "Isolated matcher: failed to create end flow table\n"); @@ -112,7 +113,9 @@ static int hws_matcher_create_end_ft(struct mlx5hws_matcher *matcher) if (mlx5hws_matcher_is_isolated(matcher)) ret = hws_matcher_create_end_ft_isolated(matcher); else - ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl, + ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, + tbl, + 0, &matcher->end_ft_id); if (ret) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h index 59c14745ed0c..2498ceff2060 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h @@ -75,6 +75,7 @@ struct mlx5hws_context_attr { struct mlx5hws_table_attr { enum mlx5hws_table_type type; u32 level; + u16 uid; }; enum mlx5hws_matcher_flow_src { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c index c4b22be19a9b..b0595c9b09e4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c @@ -964,7 +964,6 @@ static int hws_send_ring_open_cq(struct mlx5_core_dev *mdev, return -ENOMEM; MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index); - MLX5_SET(cqc, cqc_data, cqe_sz, queue->num_entries); MLX5_SET(cqc, cqc_data, log_cq_size, ilog2(queue->num_entries)); err = hws_send_ring_alloc_cq(mdev, numa_node, queue, cqc_data, cq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c index 568f691733f3..6113383ae47b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c @@ -9,6 +9,7 @@ u32 mlx5hws_table_get_id(struct mlx5hws_table *tbl) } static void hws_table_init_next_ft_attr(struct mlx5hws_table *tbl, + u16 uid, struct mlx5hws_cmd_ft_create_attr *ft_attr) { ft_attr->type = tbl->fw_ft_type; @@ -16,7 +17,9 @@ static void hws_table_init_next_ft_attr(struct mlx5hws_table *tbl, ft_attr->level = tbl->ctx->caps->fdb_ft.max_level - 1; else ft_attr->level = tbl->ctx->caps->nic_ft.max_level - 1; + ft_attr->rtc_valid = true; + ft_attr->uid = uid; } static void hws_table_set_cap_attr(struct mlx5hws_table *tbl, @@ -119,12 +122,12 @@ static int hws_table_connect_to_default_miss_tbl(struct mlx5hws_table *tbl, u32 int mlx5hws_table_create_default_ft(struct mlx5_core_dev *mdev, struct mlx5hws_table *tbl, - u32 *ft_id) + u16 uid, u32 *ft_id) { struct mlx5hws_cmd_ft_create_attr ft_attr = {0}; int ret; - hws_table_init_next_ft_attr(tbl, &ft_attr); + hws_table_init_next_ft_attr(tbl, uid, &ft_attr); hws_table_set_cap_attr(tbl, &ft_attr); ret = mlx5hws_cmd_flow_table_create(mdev, &ft_attr, ft_id); @@ -189,7 +192,10 @@ static int hws_table_init(struct mlx5hws_table *tbl) } mutex_lock(&ctx->ctrl_lock); - ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl, &tbl->ft_id); + ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, + tbl, + tbl->uid, + &tbl->ft_id); if (ret) { mlx5hws_err(tbl->ctx, "Failed to create flow table object\n"); mutex_unlock(&ctx->ctrl_lock); @@ -239,6 +245,7 @@ struct mlx5hws_table *mlx5hws_table_create(struct mlx5hws_context *ctx, tbl->ctx = ctx; tbl->type = attr->type; tbl->level = attr->level; + tbl->uid = attr->uid; ret = hws_table_init(tbl); if (ret) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h index 0400cce0c317..1246f9bd8422 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h @@ -18,6 +18,7 @@ struct mlx5hws_table { enum mlx5hws_table_type type; u32 fw_ft_type; u32 level; + u16 uid; struct list_head matchers_list; struct list_head tbl_list_node; struct mlx5hws_default_miss default_miss; @@ -47,7 +48,7 @@ u32 mlx5hws_table_get_res_fw_ft_type(enum mlx5hws_table_type tbl_type, int mlx5hws_table_create_default_ft(struct mlx5_core_dev *mdev, struct mlx5hws_table *tbl, - u32 *ft_id); + u16 uid, u32 *ft_id); void mlx5hws_table_destroy_default_ft(struct mlx5hws_table *tbl, u32 ft_id); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 618957d65663..9a2d64a0a858 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -2375,6 +2375,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { ROUTER_EXP, false), MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD, ROUTER_EXP, false), + MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_LINK_LOCAL, FORWARD, + ROUTER_EXP, false), /* Multicast Router Traps */ MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 80ee5c4825dc..9962dc157901 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -94,6 +94,7 @@ enum { MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_SIP_BC = 0x16A, MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_DIP_LOCAL_NET = 0x16B, MLXSW_TRAP_ID_DISCARD_ING_ROUTER_DIP_LINK_LOCAL = 0x16C, + MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_LINK_LOCAL = 0x16D, MLXSW_TRAP_ID_DISCARD_ROUTER_IRIF_EN = 0x178, MLXSW_TRAP_ID_DISCARD_ROUTER_ERIF_EN = 0x179, MLXSW_TRAP_ID_DISCARD_ROUTER_LPM4 = 0x17B, diff --git a/drivers/net/ethernet/microchip/lan865x/lan865x.c b/drivers/net/ethernet/microchip/lan865x/lan865x.c index dd436bdff0f8..84c41f193561 100644 --- a/drivers/net/ethernet/microchip/lan865x/lan865x.c +++ b/drivers/net/ethernet/microchip/lan865x/lan865x.c @@ -32,6 +32,10 @@ /* MAC Specific Addr 1 Top Reg */ #define LAN865X_REG_MAC_H_SADDR1 0x00010023 +/* MAC TSU Timer Increment Register */ +#define LAN865X_REG_MAC_TSU_TIMER_INCR 0x00010077 +#define MAC_TSU_TIMER_INCR_COUNT_NANOSECONDS 0x0028 + struct lan865x_priv { struct work_struct multicast_work; struct net_device *netdev; @@ -311,6 +315,8 @@ static int lan865x_net_open(struct net_device *netdev) phy_start(netdev->phydev); + netif_start_queue(netdev); + return 0; } @@ -344,6 +350,21 @@ static int lan865x_probe(struct spi_device *spi) goto free_netdev; } + /* LAN865x Rev.B0/B1 configuration parameters from AN1760 + * As per the Configuration Application Note AN1760 published in the + * link, https://www.microchip.com/en-us/application-notes/an1760 + * Revision F (DS60001760G - June 2024), configure the MAC to set time + * stamping at the end of the Start of Frame Delimiter (SFD) and set the + * Timer Increment reg to 40 ns to be used as a 25 MHz internal clock. + */ + ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_TSU_TIMER_INCR, + MAC_TSU_TIMER_INCR_COUNT_NANOSECONDS); + if (ret) { + dev_err(&spi->dev, "Failed to config TSU Timer Incr reg: %d\n", + ret); + goto oa_tc6_exit; + } + /* As per the point s3 in the below errata, SPI receive Ethernet frame * transfer may halt when starting the next frame in the same data block * (chunk) as the end of a previous frame. The RFA field should be diff --git a/drivers/net/ethernet/realtek/rtase/rtase.h b/drivers/net/ethernet/realtek/rtase/rtase.h index 20decdeb9fdb..b9209eb6ea73 100644 --- a/drivers/net/ethernet/realtek/rtase/rtase.h +++ b/drivers/net/ethernet/realtek/rtase/rtase.h @@ -241,7 +241,7 @@ union rtase_rx_desc { #define RTASE_RX_RES BIT(20) #define RTASE_RX_RUNT BIT(19) #define RTASE_RX_RWT BIT(18) -#define RTASE_RX_CRC BIT(16) +#define RTASE_RX_CRC BIT(17) #define RTASE_RX_V6F BIT(31) #define RTASE_RX_V4F BIT(30) #define RTASE_RX_UDPT BIT(29) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c index 09ae16e026eb..6c363f9b0ce2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c @@ -330,15 +330,11 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev) if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); - ret = devm_clk_bulk_get_all(&pdev->dev, &plat_dat->clks); + ret = devm_clk_bulk_get_all_enabled(&pdev->dev, &plat_dat->clks); if (ret < 0) - return dev_err_probe(&pdev->dev, ret, "Failed to retrieve all required clocks\n"); + return dev_err_probe(&pdev->dev, ret, "Failed to retrieve and enable all required clocks\n"); plat_dat->num_clks = ret; - ret = clk_bulk_prepare_enable(plat_dat->num_clks, plat_dat->clks); - if (ret) - return dev_err_probe(&pdev->dev, ret, "Failed to enable clocks\n"); - plat_dat->stmmac_clk = stmmac_pltfr_find_clk(plat_dat, data->stmmac_clk_name); @@ -346,7 +342,6 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev) ret = data->probe(pdev, plat_dat, &stmmac_res); if (ret < 0) { dev_err_probe(&pdev->dev, ret, "failed to probe subdriver\n"); - clk_bulk_disable_unprepare(plat_dat->num_clks, plat_dat->clks); return ret; } @@ -370,15 +365,11 @@ remove: static void dwc_eth_dwmac_remove(struct platform_device *pdev) { const struct dwc_eth_dwmac_data *data = device_get_match_data(&pdev->dev); - struct plat_stmmacenet_data *plat_dat = dev_get_platdata(&pdev->dev); stmmac_dvr_remove(&pdev->dev); if (data->remove) data->remove(pdev); - - if (plat_dat) - clk_bulk_disable_unprepare(plat_dat->num_clks, plat_dat->clks); } static const struct of_device_id dwc_eth_dwmac_match[] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 79b92130a03f..f6687c2f30f6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -1765,11 +1765,15 @@ err_gmac_powerdown: static void rk_gmac_remove(struct platform_device *pdev) { - struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(&pdev->dev); + struct stmmac_priv *priv = netdev_priv(platform_get_drvdata(pdev)); + struct rk_priv_data *bsp_priv = priv->plat->bsp_priv; stmmac_dvr_remove(&pdev->dev); rk_gmac_powerdown(bsp_priv); + + if (priv->plat->phy_node && bsp_priv->integrated_phy) + clk_put(bsp_priv->clk_phy); } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c index c72ee759aae5..6c6c49e4b66f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c @@ -152,7 +152,7 @@ static int thead_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i, static int thead_dwmac_enable_clk(struct plat_stmmacenet_data *plat) { struct thead_dwmac *dwmac = plat->bsp_priv; - u32 reg; + u32 reg, div; switch (plat->mac_interface) { case PHY_INTERFACE_MODE_MII: @@ -164,6 +164,13 @@ static int thead_dwmac_enable_clk(struct plat_stmmacenet_data *plat) case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_TXID: /* use pll */ + div = clk_get_rate(plat->stmmac_clk) / rgmii_clock(SPEED_1000); + reg = FIELD_PREP(GMAC_PLLCLK_DIV_EN, 1) | + FIELD_PREP(GMAC_PLLCLK_DIV_NUM, div); + + writel(0, dwmac->apb_base + GMAC_PLLCLK_DIV); + writel(reg, dwmac->apb_base + GMAC_PLLCLK_DIV); + writel(GMAC_GTXCLK_SEL_PLL, dwmac->apb_base + GMAC_GTXCLK_SEL); reg = GMAC_TX_CLK_EN | GMAC_TX_CLK_N_EN | GMAC_TX_CLK_OUT_EN | GMAC_RX_CLK_EN | GMAC_RX_CLK_N_EN; @@ -211,6 +218,7 @@ static int thead_dwmac_probe(struct platform_device *pdev) struct stmmac_resources stmmac_res; struct plat_stmmacenet_data *plat; struct thead_dwmac *dwmac; + struct clk *apb_clk; void __iomem *apb; int ret; @@ -224,6 +232,19 @@ static int thead_dwmac_probe(struct platform_device *pdev) return dev_err_probe(&pdev->dev, PTR_ERR(plat), "dt configuration failed\n"); + /* + * The APB clock is essential for accessing glue registers. However, + * old devicetrees don't describe it correctly. We continue to probe + * and emit a warning if it isn't present. + */ + apb_clk = devm_clk_get_enabled(&pdev->dev, "apb"); + if (PTR_ERR(apb_clk) == -ENOENT) + dev_warn(&pdev->dev, + "cannot get apb clock, link may break after speed changes\n"); + else if (IS_ERR(apb_clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(apb_clk), + "failed to get apb clock\n"); + dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); if (!dwmac) return -ENOMEM; diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c index 50bfbc2779e4..d8c9fe1d98c4 100644 --- a/drivers/net/ethernet/ti/icssg/icss_iep.c +++ b/drivers/net/ethernet/ti/icssg/icss_iep.c @@ -621,7 +621,8 @@ exit: static int icss_iep_extts_enable(struct icss_iep *iep, u32 index, int on) { - u32 val, cap, ret = 0; + u32 val, cap; + int ret = 0; mutex_lock(&iep->ptp_clk_mutex); diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c index 2b973d6e2341..dadce6009791 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c @@ -50,6 +50,8 @@ /* CTRLMMR_ICSSG_RGMII_CTRL register bits */ #define ICSSG_CTRL_RGMII_ID_MODE BIT(24) +static void emac_adjust_link(struct net_device *ndev); + static int emac_get_tx_ts(struct prueth_emac *emac, struct emac_tx_ts_response *rsp) { @@ -201,6 +203,44 @@ static void prueth_emac_stop(struct prueth *prueth) } } +static void icssg_enable_fw_offload(struct prueth *prueth) +{ + struct prueth_emac *emac; + int mac; + + for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) { + emac = prueth->emac[mac]; + if (prueth->is_hsr_offload_mode) { + if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM) + icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE); + else + icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE); + } + + if (prueth->is_switch_mode || prueth->is_hsr_offload_mode) { + if (netif_running(emac->ndev)) { + icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan, + ICSSG_FDB_ENTRY_P0_MEMBERSHIP | + ICSSG_FDB_ENTRY_P1_MEMBERSHIP | + ICSSG_FDB_ENTRY_P2_MEMBERSHIP | + ICSSG_FDB_ENTRY_BLOCK, + true); + icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID, + BIT(emac->port_id) | DEFAULT_PORT_MASK, + BIT(emac->port_id) | DEFAULT_UNTAG_MASK, + true); + if (prueth->is_hsr_offload_mode) + icssg_vtbl_modify(emac, DEFAULT_VID, + DEFAULT_PORT_MASK, + DEFAULT_UNTAG_MASK, true); + icssg_set_pvid(prueth, emac->port_vlan, emac->port_id); + if (prueth->is_switch_mode) + icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE); + } + } + } +} + static int prueth_emac_common_start(struct prueth *prueth) { struct prueth_emac *emac; @@ -229,6 +269,10 @@ static int prueth_emac_common_start(struct prueth *prueth) ret = icssg_config(prueth, emac, slice); if (ret) goto disable_class; + + mutex_lock(&emac->ndev->phydev->lock); + emac_adjust_link(emac->ndev); + mutex_unlock(&emac->ndev->phydev->lock); } ret = prueth_emac_start(prueth); @@ -747,6 +791,7 @@ static int emac_ndo_open(struct net_device *ndev) ret = prueth_emac_common_start(prueth); if (ret) goto free_rx_irq; + icssg_enable_fw_offload(prueth); } flow_cfg = emac->dram.va + ICSSG_CONFIG_OFFSET + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET; @@ -1354,8 +1399,7 @@ static int prueth_emac_restart(struct prueth *prueth) static void icssg_change_mode(struct prueth *prueth) { - struct prueth_emac *emac; - int mac, ret; + int ret; ret = prueth_emac_restart(prueth); if (ret) { @@ -1363,35 +1407,7 @@ static void icssg_change_mode(struct prueth *prueth) return; } - for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) { - emac = prueth->emac[mac]; - if (prueth->is_hsr_offload_mode) { - if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM) - icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE); - else - icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE); - } - - if (netif_running(emac->ndev)) { - icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan, - ICSSG_FDB_ENTRY_P0_MEMBERSHIP | - ICSSG_FDB_ENTRY_P1_MEMBERSHIP | - ICSSG_FDB_ENTRY_P2_MEMBERSHIP | - ICSSG_FDB_ENTRY_BLOCK, - true); - icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID, - BIT(emac->port_id) | DEFAULT_PORT_MASK, - BIT(emac->port_id) | DEFAULT_UNTAG_MASK, - true); - if (prueth->is_hsr_offload_mode) - icssg_vtbl_modify(emac, DEFAULT_VID, - DEFAULT_PORT_MASK, - DEFAULT_UNTAG_MASK, true); - icssg_set_pvid(prueth, emac->port_vlan, emac->port_id); - if (prueth->is_switch_mode) - icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE); - } - } + icssg_enable_fw_offload(prueth); } static int prueth_netdevice_port_link(struct net_device *ndev, diff --git a/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c index 5d48df7a849f..3023ea2732ef 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c @@ -192,7 +192,7 @@ void wx_setup_vfmrqc_vf(struct wx *wx) u8 i, j; /* Fill out hash function seeds */ - netdev_rss_key_fill(wx->rss_key, sizeof(wx->rss_key)); + netdev_rss_key_fill(wx->rss_key, WX_RSS_KEY_SIZE); for (i = 0; i < WX_RSS_KEY_SIZE / 4; i++) wr32(wx, WX_VXRSSRK(i), wx->rss_key[i]); diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 6011d7eae0c7..0d8a05fe541a 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1160,6 +1160,7 @@ static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result) struct axienet_local *lp = data; struct sk_buff *skb; u32 *app_metadata; + int i; skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++); skb = skbuf_dma->skb; @@ -1178,7 +1179,10 @@ static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result) u64_stats_add(&lp->rx_packets, 1); u64_stats_add(&lp->rx_bytes, rx_len); u64_stats_update_end(&lp->rx_stat_sync); - axienet_rx_submit_desc(lp->ndev); + + for (i = 0; i < CIRC_SPACE(lp->rx_ring_head, lp->rx_ring_tail, + RX_BUF_NUM_DEFAULT); i++) + axienet_rx_submit_desc(lp->ndev); dma_async_issue_pending(lp->rx_chan); } @@ -1457,7 +1461,6 @@ static void axienet_rx_submit_desc(struct net_device *ndev) if (!skbuf_dma) return; - lp->rx_ring_head++; skb = netdev_alloc_skb(ndev, lp->max_frm_size); if (!skb) return; @@ -1482,6 +1485,7 @@ static void axienet_rx_submit_desc(struct net_device *ndev) skbuf_dma->desc = dma_rx_desc; dma_rx_desc->callback_param = lp; dma_rx_desc->callback_result = axienet_dma_rx_cb; + lp->rx_ring_head++; dmaengine_submit(dma_rx_desc); return; diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index 0e0fe32d2da4..045c5177262e 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -138,7 +138,7 @@ static inline struct net_device *bpq_get_ax25_dev(struct net_device *dev) static inline int dev_is_ethdev(struct net_device *dev) { - return dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5); + return dev->type == ARPHRD_ETHER && !netdev_need_ops_lock(dev); } /* ------------------------------------------------------------------------ */ diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index cb6f5482d203..7397c693f984 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -1061,6 +1061,7 @@ struct net_device_context { struct net_device __rcu *vf_netdev; struct netvsc_vf_pcpu_stats __percpu *vf_stats; struct delayed_work vf_takeover; + struct delayed_work vfns_work; /* 1: allocated, serial number is valid. 0: not allocated */ u32 vf_alloc; @@ -1075,6 +1076,8 @@ struct net_device_context { struct netvsc_device_info *saved_netvsc_dev_info; }; +void netvsc_vfns_work(struct work_struct *w); + /* Azure hosts don't support non-TCP port numbers in hashing for fragmented * packets. We can use ethtool to change UDP hash level when necessary. */ diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index f44753756358..39c892e46cb0 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -2522,6 +2522,7 @@ static int netvsc_probe(struct hv_device *dev, spin_lock_init(&net_device_ctx->lock); INIT_LIST_HEAD(&net_device_ctx->reconfig_events); INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup); + INIT_DELAYED_WORK(&net_device_ctx->vfns_work, netvsc_vfns_work); net_device_ctx->vf_stats = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats); @@ -2666,6 +2667,8 @@ static void netvsc_remove(struct hv_device *dev) cancel_delayed_work_sync(&ndev_ctx->dwork); rtnl_lock(); + cancel_delayed_work_sync(&ndev_ctx->vfns_work); + nvdev = rtnl_dereference(ndev_ctx->nvdev); if (nvdev) { cancel_work_sync(&nvdev->subchan_work); @@ -2707,6 +2710,7 @@ static int netvsc_suspend(struct hv_device *dev) cancel_delayed_work_sync(&ndev_ctx->dwork); rtnl_lock(); + cancel_delayed_work_sync(&ndev_ctx->vfns_work); nvdev = rtnl_dereference(ndev_ctx->nvdev); if (nvdev == NULL) { @@ -2800,6 +2804,27 @@ static void netvsc_event_set_vf_ns(struct net_device *ndev) } } +void netvsc_vfns_work(struct work_struct *w) +{ + struct net_device_context *ndev_ctx = + container_of(w, struct net_device_context, vfns_work.work); + struct net_device *ndev; + + if (!rtnl_trylock()) { + schedule_delayed_work(&ndev_ctx->vfns_work, 1); + return; + } + + ndev = hv_get_drvdata(ndev_ctx->device_ctx); + if (!ndev) + goto out; + + netvsc_event_set_vf_ns(ndev); + +out: + rtnl_unlock(); +} + /* * On Hyper-V, every VF interface is matched with a corresponding * synthetic interface. The synthetic interface is presented first @@ -2810,10 +2835,12 @@ static int netvsc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); + struct net_device_context *ndev_ctx; int ret = 0; if (event_dev->netdev_ops == &device_ops && event == NETDEV_REGISTER) { - netvsc_event_set_vf_ns(event_dev); + ndev_ctx = netdev_priv(event_dev); + schedule_delayed_work(&ndev_ctx->vfns_work, 0); return NOTIFY_DONE; } diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index 39fe28af48b9..0178219f0db5 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -710,9 +710,13 @@ static struct nsim_rq *nsim_queue_alloc(void) static void nsim_queue_free(struct net_device *dev, struct nsim_rq *rq) { hrtimer_cancel(&rq->napi_timer); - local_bh_disable(); - dev_dstats_rx_dropped_add(dev, rq->skb_queue.qlen); - local_bh_enable(); + + if (rq->skb_queue.qlen) { + local_bh_disable(); + dev_dstats_rx_dropped_add(dev, rq->skb_queue.qlen); + local_bh_enable(); + } + skb_queue_purge_reason(&rq->skb_queue, SKB_DROP_REASON_QUEUE_PURGE); kfree(rq); } diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index fda2e27c1810..cad6ed3aa10b 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -91,6 +91,7 @@ int mdiobus_unregister_device(struct mdio_device *mdiodev) if (mdiodev->bus->mdio_map[mdiodev->addr] != mdiodev) return -EINVAL; + gpiod_put(mdiodev->reset_gpio); reset_control_put(mdiodev->reset_ctrl); mdiodev->bus->mdio_map[mdiodev->addr] = NULL; diff --git a/drivers/net/phy/mdio_bus_provider.c b/drivers/net/phy/mdio_bus_provider.c index 48dc4bf85125..f43973e73ea3 100644 --- a/drivers/net/phy/mdio_bus_provider.c +++ b/drivers/net/phy/mdio_bus_provider.c @@ -443,9 +443,6 @@ void mdiobus_unregister(struct mii_bus *bus) if (!mdiodev) continue; - if (mdiodev->reset_gpio) - gpiod_put(mdiodev->reset_gpio); - mdiodev->device_remove(mdiodev); mdiodev->device_free(mdiodev); } diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h index 6a3d8a754eb8..58c6d47fbe04 100644 --- a/drivers/net/phy/mscc/mscc.h +++ b/drivers/net/phy/mscc/mscc.h @@ -362,6 +362,13 @@ struct vsc85xx_hw_stat { u16 mask; }; +struct vsc8531_skb_cb { + u32 ns; +}; + +#define VSC8531_SKB_CB(skb) \ + ((struct vsc8531_skb_cb *)((skb)->cb)) + struct vsc8531_private { int rate_magic; u16 supp_led_modes; @@ -410,6 +417,11 @@ struct vsc8531_private { */ struct mutex ts_lock; struct mutex phc_lock; + + /* list of skbs that were received and need timestamp information but it + * didn't received it yet + */ + struct sk_buff_head rx_skbs_list; }; /* Shared structure between the PHYs of the same package. diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c index 7ed6522fb0ef..f1c9ce351ab4 100644 --- a/drivers/net/phy/mscc/mscc_main.c +++ b/drivers/net/phy/mscc/mscc_main.c @@ -2335,6 +2335,13 @@ static int vsc85xx_probe(struct phy_device *phydev) return vsc85xx_dt_led_modes_get(phydev, default_mode); } +static void vsc85xx_remove(struct phy_device *phydev) +{ + struct vsc8531_private *priv = phydev->priv; + + skb_queue_purge(&priv->rx_skbs_list); +} + /* Microsemi VSC85xx PHYs */ static struct phy_driver vsc85xx_driver[] = { { @@ -2589,6 +2596,7 @@ static struct phy_driver vsc85xx_driver[] = { .config_intr = &vsc85xx_config_intr, .suspend = &genphy_suspend, .resume = &genphy_resume, + .remove = &vsc85xx_remove, .probe = &vsc8574_probe, .set_wol = &vsc85xx_wol_set, .get_wol = &vsc85xx_wol_get, @@ -2614,6 +2622,7 @@ static struct phy_driver vsc85xx_driver[] = { .config_intr = &vsc85xx_config_intr, .suspend = &genphy_suspend, .resume = &genphy_resume, + .remove = &vsc85xx_remove, .probe = &vsc8574_probe, .set_wol = &vsc85xx_wol_set, .get_wol = &vsc85xx_wol_get, @@ -2639,6 +2648,7 @@ static struct phy_driver vsc85xx_driver[] = { .config_intr = &vsc85xx_config_intr, .suspend = &genphy_suspend, .resume = &genphy_resume, + .remove = &vsc85xx_remove, .probe = &vsc8584_probe, .get_tunable = &vsc85xx_get_tunable, .set_tunable = &vsc85xx_set_tunable, @@ -2662,6 +2672,7 @@ static struct phy_driver vsc85xx_driver[] = { .config_intr = &vsc85xx_config_intr, .suspend = &genphy_suspend, .resume = &genphy_resume, + .remove = &vsc85xx_remove, .probe = &vsc8584_probe, .get_tunable = &vsc85xx_get_tunable, .set_tunable = &vsc85xx_set_tunable, @@ -2685,6 +2696,7 @@ static struct phy_driver vsc85xx_driver[] = { .config_intr = &vsc85xx_config_intr, .suspend = &genphy_suspend, .resume = &genphy_resume, + .remove = &vsc85xx_remove, .probe = &vsc8584_probe, .get_tunable = &vsc85xx_get_tunable, .set_tunable = &vsc85xx_set_tunable, diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c index 275706de5847..de6c7312e8f2 100644 --- a/drivers/net/phy/mscc/mscc_ptp.c +++ b/drivers/net/phy/mscc/mscc_ptp.c @@ -1194,9 +1194,7 @@ static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts, { struct vsc8531_private *vsc8531 = container_of(mii_ts, struct vsc8531_private, mii_ts); - struct skb_shared_hwtstamps *shhwtstamps = NULL; struct vsc85xx_ptphdr *ptphdr; - struct timespec64 ts; unsigned long ns; if (!vsc8531->ptp->configured) @@ -1206,27 +1204,52 @@ static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts, type == PTP_CLASS_NONE) return false; - vsc85xx_gettime(&vsc8531->ptp->caps, &ts); - ptphdr = get_ptp_header_rx(skb, vsc8531->ptp->rx_filter); if (!ptphdr) return false; - shhwtstamps = skb_hwtstamps(skb); - memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); - ns = ntohl(ptphdr->rsrvd2); - /* nsec is in reserved field */ - if (ts.tv_nsec < ns) - ts.tv_sec--; + VSC8531_SKB_CB(skb)->ns = ns; + skb_queue_tail(&vsc8531->rx_skbs_list, skb); - shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ns); - netif_rx(skb); + ptp_schedule_worker(vsc8531->ptp->ptp_clock, 0); return true; } +static long vsc85xx_do_aux_work(struct ptp_clock_info *info) +{ + struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps); + struct skb_shared_hwtstamps *shhwtstamps = NULL; + struct phy_device *phydev = ptp->phydev; + struct vsc8531_private *priv = phydev->priv; + struct sk_buff_head received; + struct sk_buff *rx_skb; + struct timespec64 ts; + unsigned long flags; + + __skb_queue_head_init(&received); + spin_lock_irqsave(&priv->rx_skbs_list.lock, flags); + skb_queue_splice_tail_init(&priv->rx_skbs_list, &received); + spin_unlock_irqrestore(&priv->rx_skbs_list.lock, flags); + + vsc85xx_gettime(info, &ts); + while ((rx_skb = __skb_dequeue(&received)) != NULL) { + shhwtstamps = skb_hwtstamps(rx_skb); + memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); + + if (ts.tv_nsec < VSC8531_SKB_CB(rx_skb)->ns) + ts.tv_sec--; + + shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, + VSC8531_SKB_CB(rx_skb)->ns); + netif_rx(rx_skb); + } + + return -1; +} + static const struct ptp_clock_info vsc85xx_clk_caps = { .owner = THIS_MODULE, .name = "VSC85xx timer", @@ -1240,6 +1263,7 @@ static const struct ptp_clock_info vsc85xx_clk_caps = { .adjfine = &vsc85xx_adjfine, .gettime64 = &vsc85xx_gettime, .settime64 = &vsc85xx_settime, + .do_aux_work = &vsc85xx_do_aux_work, }; static struct vsc8531_private *vsc8584_base_priv(struct phy_device *phydev) @@ -1567,6 +1591,7 @@ int vsc8584_ptp_probe(struct phy_device *phydev) mutex_init(&vsc8531->phc_lock); mutex_init(&vsc8531->ts_lock); + skb_queue_head_init(&vsc8531->rx_skbs_list); /* Retrieve the shared load/save GPIO. Request it as non exclusive as * the same GPIO can be requested by all the PHYs of the same package. diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c index 4c6d905f0a9f..87adb6508017 100644 --- a/drivers/net/phy/nxp-c45-tja11xx.c +++ b/drivers/net/phy/nxp-c45-tja11xx.c @@ -1965,24 +1965,27 @@ static int nxp_c45_macsec_ability(struct phy_device *phydev) return macsec_ability; } +static bool tja11xx_phy_id_compare(struct phy_device *phydev, + const struct phy_driver *phydrv) +{ + u32 id = phydev->is_c45 ? phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] : + phydev->phy_id; + + return phy_id_compare(id, phydrv->phy_id, phydrv->phy_id_mask); +} + static int tja11xx_no_macsec_match_phy_device(struct phy_device *phydev, const struct phy_driver *phydrv) { - if (!phy_id_compare(phydev->phy_id, phydrv->phy_id, - phydrv->phy_id_mask)) - return 0; - - return !nxp_c45_macsec_ability(phydev); + return tja11xx_phy_id_compare(phydev, phydrv) && + !nxp_c45_macsec_ability(phydev); } static int tja11xx_macsec_match_phy_device(struct phy_device *phydev, const struct phy_driver *phydrv) { - if (!phy_id_compare(phydev->phy_id, phydrv->phy_id, - phydrv->phy_id_mask)) - return 0; - - return nxp_c45_macsec_ability(phydev); + return tja11xx_phy_id_compare(phydev, phydrv) && + nxp_c45_macsec_ability(phydev); } static const struct nxp_c45_regmap tja1120_regmap = { diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 8c98cbd4b06d..824c8dc4120b 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -33,6 +33,7 @@ #include <linux/ppp_channel.h> #include <linux/ppp-comp.h> #include <linux/skbuff.h> +#include <linux/rculist.h> #include <linux/rtnetlink.h> #include <linux/if_arp.h> #include <linux/ip.h> @@ -1598,11 +1599,14 @@ static int ppp_fill_forward_path(struct net_device_path_ctx *ctx, if (ppp->flags & SC_MULTILINK) return -EOPNOTSUPP; - if (list_empty(&ppp->channels)) + pch = list_first_or_null_rcu(&ppp->channels, struct channel, clist); + if (!pch) + return -ENODEV; + + chan = READ_ONCE(pch->chan); + if (!chan) return -ENODEV; - pch = list_first_entry(&ppp->channels, struct channel, clist); - chan = pch->chan; if (!chan->ops->fill_forward_path) return -EOPNOTSUPP; @@ -2994,7 +2998,7 @@ ppp_unregister_channel(struct ppp_channel *chan) */ down_write(&pch->chan_sem); spin_lock_bh(&pch->downl); - pch->chan = NULL; + WRITE_ONCE(pch->chan, NULL); spin_unlock_bh(&pch->downl); up_write(&pch->chan_sem); ppp_disconnect_channel(pch); @@ -3515,7 +3519,7 @@ ppp_connect_channel(struct channel *pch, int unit) hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */ if (hdrlen > ppp->dev->hard_header_len) ppp->dev->hard_header_len = hdrlen; - list_add_tail(&pch->clist, &ppp->channels); + list_add_tail_rcu(&pch->clist, &ppp->channels); ++ppp->n_channels; pch->ppp = ppp; refcount_inc(&ppp->file.refcnt); @@ -3545,10 +3549,11 @@ ppp_disconnect_channel(struct channel *pch) if (ppp) { /* remove it from the ppp unit's list */ ppp_lock(ppp); - list_del(&pch->clist); + list_del_rcu(&pch->clist); if (--ppp->n_channels == 0) wake_up_interruptible(&ppp->file.rwait); ppp_unlock(ppp); + synchronize_net(); if (refcount_dec_and_test(&ppp->file.refcnt)) ppp_destroy_interface(ppp); err = 0; diff --git a/drivers/net/pse-pd/pd692x0.c b/drivers/net/pse-pd/pd692x0.c index 399ce9febda4..f4e91ba64a66 100644 --- a/drivers/net/pse-pd/pd692x0.c +++ b/drivers/net/pse-pd/pd692x0.c @@ -1041,6 +1041,10 @@ pd692x0_configure_managers(struct pd692x0_priv *priv, int nmanagers) int pw_budget; pw_budget = regulator_get_unclaimed_power_budget(supply); + if (!pw_budget) + /* Do nothing if no power budget */ + continue; + /* Max power budget per manager */ if (pw_budget > 6000000) pw_budget = 6000000; @@ -1162,12 +1166,44 @@ pd692x0_write_ports_matrix(struct pd692x0_priv *priv, return 0; } +static void pd692x0_of_put_managers(struct pd692x0_priv *priv, + struct pd692x0_manager *manager, + int nmanagers) +{ + int i, j; + + for (i = 0; i < nmanagers; i++) { + for (j = 0; j < manager[i].nports; j++) + of_node_put(manager[i].port_node[j]); + of_node_put(manager[i].node); + } +} + +static void pd692x0_managers_free_pw_budget(struct pd692x0_priv *priv) +{ + int i; + + for (i = 0; i < PD692X0_MAX_MANAGERS; i++) { + struct regulator *supply; + + if (!priv->manager_reg[i] || !priv->manager_pw_budget[i]) + continue; + + supply = priv->manager_reg[i]->supply; + if (!supply) + continue; + + regulator_free_power_budget(supply, + priv->manager_pw_budget[i]); + } +} + static int pd692x0_setup_pi_matrix(struct pse_controller_dev *pcdev) { struct pd692x0_manager *manager __free(kfree) = NULL; struct pd692x0_priv *priv = to_pd692x0_priv(pcdev); struct pd692x0_matrix port_matrix[PD692X0_MAX_PIS]; - int ret, i, j, nmanagers; + int ret, nmanagers; /* Should we flash the port matrix */ if (priv->fw_state != PD692X0_FW_OK && @@ -1185,31 +1221,27 @@ static int pd692x0_setup_pi_matrix(struct pse_controller_dev *pcdev) nmanagers = ret; ret = pd692x0_register_managers_regulator(priv, manager, nmanagers); if (ret) - goto out; + goto err_of_managers; ret = pd692x0_configure_managers(priv, nmanagers); if (ret) - goto out; + goto err_of_managers; ret = pd692x0_set_ports_matrix(priv, manager, nmanagers, port_matrix); if (ret) - goto out; + goto err_managers_req_pw; ret = pd692x0_write_ports_matrix(priv, port_matrix); if (ret) - goto out; - -out: - for (i = 0; i < nmanagers; i++) { - struct regulator *supply = priv->manager_reg[i]->supply; + goto err_managers_req_pw; - regulator_free_power_budget(supply, - priv->manager_pw_budget[i]); + pd692x0_of_put_managers(priv, manager, nmanagers); + return 0; - for (j = 0; j < manager[i].nports; j++) - of_node_put(manager[i].port_node[j]); - of_node_put(manager[i].node); - } +err_managers_req_pw: + pd692x0_managers_free_pw_budget(priv); +err_of_managers: + pd692x0_of_put_managers(priv, manager, nmanagers); return ret; } @@ -1748,6 +1780,7 @@ static void pd692x0_i2c_remove(struct i2c_client *client) { struct pd692x0_priv *priv = i2c_get_clientdata(client); + pd692x0_managers_free_pw_budget(priv); firmware_upload_unregister(priv->fwl); } diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 9b0318fb50b5..792ddda1ad49 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -676,6 +676,7 @@ static int ax88772_init_mdio(struct usbnet *dev) priv->mdio->read = &asix_mdio_bus_read; priv->mdio->write = &asix_mdio_bus_write; priv->mdio->name = "Asix MDIO Bus"; + priv->mdio->phy_mask = ~(BIT(priv->phy_addr & 0x1f) | BIT(AX_EMBD_PHY_ADDR)); /* mii bus name is usb-<usb bus number>-<usb device number> */ snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "usb-%03d:%03d", dev->udev->bus->busnum, dev->udev->devnum); diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index ea0e5e276cd6..5d123df0a866 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -2087,6 +2087,13 @@ static const struct usb_device_id cdc_devs[] = { .driver_info = (unsigned long)&wwan_info, }, + /* Intel modem (label from OEM reads Fibocom L850-GL) */ + { USB_DEVICE_AND_INTERFACE_INFO(0x8087, 0x095a, + USB_CLASS_COMM, + USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long)&wwan_info, + }, + /* DisplayLink docking stations */ { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR, diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index f5647ee0adde..e56901bb6ebc 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1361,6 +1361,7 @@ static const struct usb_device_id products[] = { {QMI_QUIRK_SET_DTR(0x1bc7, 0x1057, 2)}, /* Telit FN980 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990A */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1077, 2)}, /* Telit FN990A w/audio */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990A */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a0, 0)}, /* Telit FN920C04 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a4, 0)}, /* Telit FN920C04 */ diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index 995a7207bdf8..f357a7ac70ac 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -81,7 +81,7 @@ static struct lapbethdev *lapbeth_get_x25_dev(struct net_device *dev) static __inline__ int dev_is_ethdev(struct net_device *dev) { - return dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5); + return dev->type == ARPHRD_ETHER && !netdev_need_ops_lock(dev); } /* ------------------------------------------------------------------------ */ diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c index f121836c3cf4..937ea6ae1ac4 100644 --- a/drivers/pci/controller/pcie-xilinx.c +++ b/drivers/pci/controller/pcie-xilinx.c @@ -400,7 +400,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data) if (val & XILINX_PCIE_RPIFR1_MSI_INTR) { val = pcie_read(pcie, XILINX_PCIE_REG_RPIFR2) & XILINX_PCIE_RPIFR2_MSG_DATA; - domain = pcie->msi_domain->parent; + domain = pcie->msi_domain; } else { val = (val & XILINX_PCIE_RPIFR1_INTR_MASK) >> XILINX_PCIE_RPIFR1_INTR_SHIFT; diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index b679c7f28f51..1bd5bf4a6097 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -306,9 +306,6 @@ static bool vmd_init_dev_msi_info(struct device *dev, struct irq_domain *domain, struct irq_domain *real_parent, struct msi_domain_info *info) { - if (WARN_ON_ONCE(info->bus_token != DOMAIN_BUS_PCI_DEVICE_MSIX)) - return false; - if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info)) return false; diff --git a/drivers/platform/x86/amd/hsmp/acpi.c b/drivers/platform/x86/amd/hsmp/acpi.c index 54986a752f7d..a94009203e01 100644 --- a/drivers/platform/x86/amd/hsmp/acpi.c +++ b/drivers/platform/x86/amd/hsmp/acpi.c @@ -504,7 +504,7 @@ static int init_acpi(struct device *dev) dev_set_drvdata(dev, &hsmp_pdev->sock[sock_ind]); - return ret; + return 0; } static const struct bin_attribute hsmp_metric_tbl_attr = { diff --git a/drivers/platform/x86/amd/hsmp/hsmp.c b/drivers/platform/x86/amd/hsmp/hsmp.c index 885e2f8136fd..19f82c1d3090 100644 --- a/drivers/platform/x86/amd/hsmp/hsmp.c +++ b/drivers/platform/x86/amd/hsmp/hsmp.c @@ -356,6 +356,11 @@ ssize_t hsmp_metric_tbl_read(struct hsmp_socket *sock, char *buf, size_t size) if (!sock || !buf) return -EINVAL; + if (!sock->metric_tbl_addr) { + dev_err(sock->dev, "Metrics table address not available\n"); + return -ENOMEM; + } + /* Do not support lseek(), also don't allow more than the size of metric table */ if (size != sizeof(struct hsmp_metric_table)) { dev_err(sock->dev, "Wrong buffer size\n"); diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c index ded4c84f5ed1..7ffc659b2794 100644 --- a/drivers/platform/x86/amd/pmc/pmc-quirks.c +++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c @@ -28,10 +28,15 @@ static struct quirk_entry quirk_spurious_8042 = { .spurious_8042 = true, }; +static struct quirk_entry quirk_s2idle_spurious_8042 = { + .s2idle_bug_mmio = FCH_PM_BASE + FCH_PM_SCRATCH, + .spurious_8042 = true, +}; + static const struct dmi_system_id fwbug_list[] = { { .ident = "L14 Gen2 AMD", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "20X5"), @@ -39,7 +44,7 @@ static const struct dmi_system_id fwbug_list[] = { }, { .ident = "T14s Gen2 AMD", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "20XF"), @@ -47,7 +52,7 @@ static const struct dmi_system_id fwbug_list[] = { }, { .ident = "X13 Gen2 AMD", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "20XH"), @@ -55,7 +60,7 @@ static const struct dmi_system_id fwbug_list[] = { }, { .ident = "T14 Gen2 AMD", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "20XK"), @@ -63,7 +68,7 @@ static const struct dmi_system_id fwbug_list[] = { }, { .ident = "T14 Gen1 AMD", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "20UD"), @@ -71,7 +76,7 @@ static const struct dmi_system_id fwbug_list[] = { }, { .ident = "T14 Gen1 AMD", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "20UE"), @@ -79,7 +84,7 @@ static const struct dmi_system_id fwbug_list[] = { }, { .ident = "T14s Gen1 AMD", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "20UH"), @@ -87,7 +92,7 @@ static const struct dmi_system_id fwbug_list[] = { }, { .ident = "T14s Gen1 AMD", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "20UJ"), @@ -95,7 +100,7 @@ static const struct dmi_system_id fwbug_list[] = { }, { .ident = "P14s Gen1 AMD", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "20Y1"), @@ -103,7 +108,7 @@ static const struct dmi_system_id fwbug_list[] = { }, { .ident = "P14s Gen2 AMD", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "21A0"), @@ -111,7 +116,7 @@ static const struct dmi_system_id fwbug_list[] = { }, { .ident = "P14s Gen2 AMD", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "21A1"), @@ -152,7 +157,7 @@ static const struct dmi_system_id fwbug_list[] = { }, { .ident = "IdeaPad 1 14AMN7", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "82VF"), @@ -160,7 +165,7 @@ static const struct dmi_system_id fwbug_list[] = { }, { .ident = "IdeaPad 1 15AMN7", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "82VG"), @@ -168,7 +173,7 @@ static const struct dmi_system_id fwbug_list[] = { }, { .ident = "IdeaPad 1 15AMN7", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "82X5"), @@ -176,7 +181,7 @@ static const struct dmi_system_id fwbug_list[] = { }, { .ident = "IdeaPad Slim 3 14AMN8", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "82XN"), @@ -184,7 +189,7 @@ static const struct dmi_system_id fwbug_list[] = { }, { .ident = "IdeaPad Slim 3 15AMN8", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "82XQ"), @@ -193,7 +198,7 @@ static const struct dmi_system_id fwbug_list[] = { /* https://gitlab.freedesktop.org/drm/amd/-/issues/4434 */ { .ident = "Lenovo Yoga 6 13ALC6", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "82ND"), @@ -202,7 +207,7 @@ static const struct dmi_system_id fwbug_list[] = { /* https://gitlab.freedesktop.org/drm/amd/-/issues/2684 */ { .ident = "HP Laptop 15s-eq2xxx", - .driver_data = &quirk_s2idle_bug, + .driver_data = &quirk_s2idle_spurious_8042, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "HP"), DMI_MATCH(DMI_PRODUCT_NAME, "HP Laptop 15s-eq2xxx"), @@ -285,6 +290,16 @@ void amd_pmc_quirks_init(struct amd_pmc_dev *dev) { const struct dmi_system_id *dmi_id; + /* + * IRQ1 may cause an interrupt during resume even without a keyboard + * press. + * + * Affects Renoir, Cezanne and Barcelo SoCs + * + * A solution is available in PMFW 64.66.0, but it must be activated by + * SBIOS. If SBIOS is known to have the fix a quirk can be added for + * a given system to avoid workaround. + */ if (dev->cpu_id == AMD_CPU_ID_CZN) dev->disable_8042_wakeup = true; @@ -295,6 +310,5 @@ void amd_pmc_quirks_init(struct amd_pmc_dev *dev) if (dev->quirks->s2idle_bug_mmio) pr_info("Using s2idle quirk to avoid %s platform firmware bug\n", dmi_id->ident); - if (dev->quirks->spurious_8042) - dev->disable_8042_wakeup = true; + dev->disable_8042_wakeup = dev->quirks->spurious_8042; } diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c index 0b9b23eb7c2c..bd318fd02ccf 100644 --- a/drivers/platform/x86/amd/pmc/pmc.c +++ b/drivers/platform/x86/amd/pmc/pmc.c @@ -530,19 +530,6 @@ static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev) static int amd_pmc_wa_irq1(struct amd_pmc_dev *pdev) { struct device *d; - int rc; - - /* cezanne platform firmware has a fix in 64.66.0 */ - if (pdev->cpu_id == AMD_CPU_ID_CZN) { - if (!pdev->major) { - rc = amd_pmc_get_smu_version(pdev); - if (rc) - return rc; - } - - if (pdev->major > 64 || (pdev->major == 64 && pdev->minor > 65)) - return 0; - } d = bus_find_device_by_name(&serio_bus, NULL, "serio0"); if (!d) diff --git a/drivers/platform/x86/dell/dell-smbios-base.c b/drivers/platform/x86/dell/dell-smbios-base.c index 01c72b91a50d..444786102f02 100644 --- a/drivers/platform/x86/dell/dell-smbios-base.c +++ b/drivers/platform/x86/dell/dell-smbios-base.c @@ -39,6 +39,7 @@ struct token_sysfs_data { struct smbios_device { struct list_head list; struct device *device; + int priority; int (*call_fn)(struct calling_interface_buffer *arg); }; @@ -145,7 +146,7 @@ int dell_smbios_error(int value) } EXPORT_SYMBOL_GPL(dell_smbios_error); -int dell_smbios_register_device(struct device *d, void *call_fn) +int dell_smbios_register_device(struct device *d, int priority, void *call_fn) { struct smbios_device *priv; @@ -154,6 +155,7 @@ int dell_smbios_register_device(struct device *d, void *call_fn) return -ENOMEM; get_device(d); priv->device = d; + priv->priority = priority; priv->call_fn = call_fn; mutex_lock(&smbios_mutex); list_add_tail(&priv->list, &smbios_device_list); @@ -292,28 +294,25 @@ EXPORT_SYMBOL_GPL(dell_smbios_call_filter); int dell_smbios_call(struct calling_interface_buffer *buffer) { - int (*call_fn)(struct calling_interface_buffer *) = NULL; - struct device *selected_dev = NULL; + struct smbios_device *selected = NULL; struct smbios_device *priv; int ret; mutex_lock(&smbios_mutex); list_for_each_entry(priv, &smbios_device_list, list) { - if (!selected_dev || priv->device->id >= selected_dev->id) { - dev_dbg(priv->device, "Trying device ID: %d\n", - priv->device->id); - call_fn = priv->call_fn; - selected_dev = priv->device; + if (!selected || priv->priority >= selected->priority) { + dev_dbg(priv->device, "Trying device ID: %d\n", priv->priority); + selected = priv; } } - if (!selected_dev) { + if (!selected) { ret = -ENODEV; pr_err("No dell-smbios drivers are loaded\n"); goto out_smbios_call; } - ret = call_fn(buffer); + ret = selected->call_fn(buffer); out_smbios_call: mutex_unlock(&smbios_mutex); diff --git a/drivers/platform/x86/dell/dell-smbios-smm.c b/drivers/platform/x86/dell/dell-smbios-smm.c index 4d375985c85f..7055e2c40f34 100644 --- a/drivers/platform/x86/dell/dell-smbios-smm.c +++ b/drivers/platform/x86/dell/dell-smbios-smm.c @@ -125,8 +125,7 @@ int init_dell_smbios_smm(void) if (ret) goto fail_platform_device_add; - ret = dell_smbios_register_device(&platform_device->dev, - &dell_smbios_smm_call); + ret = dell_smbios_register_device(&platform_device->dev, 0, &dell_smbios_smm_call); if (ret) goto fail_register; diff --git a/drivers/platform/x86/dell/dell-smbios-wmi.c b/drivers/platform/x86/dell/dell-smbios-wmi.c index ae9012549560..a7dca8c59d60 100644 --- a/drivers/platform/x86/dell/dell-smbios-wmi.c +++ b/drivers/platform/x86/dell/dell-smbios-wmi.c @@ -264,9 +264,7 @@ static int dell_smbios_wmi_probe(struct wmi_device *wdev, const void *context) if (ret) return ret; - /* ID is used by dell-smbios to set priority of drivers */ - wdev->dev.id = 1; - ret = dell_smbios_register_device(&wdev->dev, &dell_smbios_wmi_call); + ret = dell_smbios_register_device(&wdev->dev, 1, &dell_smbios_wmi_call); if (ret) return ret; diff --git a/drivers/platform/x86/dell/dell-smbios.h b/drivers/platform/x86/dell/dell-smbios.h index 77baa15eb523..f421b8533a9e 100644 --- a/drivers/platform/x86/dell/dell-smbios.h +++ b/drivers/platform/x86/dell/dell-smbios.h @@ -64,7 +64,7 @@ struct calling_interface_structure { struct calling_interface_token tokens[]; } __packed; -int dell_smbios_register_device(struct device *d, void *call_fn); +int dell_smbios_register_device(struct device *d, int priority, void *call_fn); void dell_smbios_unregister_device(struct device *d); int dell_smbios_error(int value); diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c index db5fdee2109c..60c8ac8d902c 100644 --- a/drivers/platform/x86/hp/hp-wmi.c +++ b/drivers/platform/x86/hp/hp-wmi.c @@ -92,9 +92,9 @@ static const char * const victus_thermal_profile_boards[] = { "8A25" }; -/* DMI Board names of Victus 16-s1000 laptops */ +/* DMI Board names of Victus 16-r1000 and Victus 16-s1000 laptops */ static const char * const victus_s_thermal_profile_boards[] = { - "8C9C" + "8C99", "8C9C" }; enum hp_wmi_radio { diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c index 6df55c8e16b7..bfcf92aa4d69 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c @@ -192,9 +192,14 @@ static int uncore_read_control_freq(struct uncore_data *data, unsigned int *valu static int write_eff_lat_ctrl(struct uncore_data *data, unsigned int val, enum uncore_index index) { struct tpmi_uncore_cluster_info *cluster_info; + struct tpmi_uncore_struct *uncore_root; u64 control; cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data); + uncore_root = cluster_info->uncore_root; + + if (uncore_root->write_blocked) + return -EPERM; if (cluster_info->root_domain) return -ENODATA; diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h index a6aad743c282..b352df4cd3f9 100644 --- a/drivers/ptp/ptp_private.h +++ b/drivers/ptp/ptp_private.h @@ -24,6 +24,11 @@ #define PTP_DEFAULT_MAX_VCLOCKS 20 #define PTP_MAX_CHANNELS 2048 +enum { + PTP_LOCK_PHYSICAL = 0, + PTP_LOCK_VIRTUAL, +}; + struct timestamp_event_queue { struct ptp_extts_event buf[PTP_MAX_TIMESTAMPS]; int head; diff --git a/drivers/ptp/ptp_vclock.c b/drivers/ptp/ptp_vclock.c index 2fdeedd60e21..64c950456517 100644 --- a/drivers/ptp/ptp_vclock.c +++ b/drivers/ptp/ptp_vclock.c @@ -154,6 +154,11 @@ static long ptp_vclock_refresh(struct ptp_clock_info *ptp) return PTP_VCLOCK_REFRESH_INTERVAL; } +static void ptp_vclock_set_subclass(struct ptp_clock *ptp) +{ + lockdep_set_subclass(&ptp->clock.rwsem, PTP_LOCK_VIRTUAL); +} + static const struct ptp_clock_info ptp_vclock_info = { .owner = THIS_MODULE, .name = "ptp virtual clock", @@ -213,6 +218,8 @@ struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock) return NULL; } + ptp_vclock_set_subclass(vclock->clock); + timecounter_init(&vclock->tc, &vclock->cc, 0); ptp_schedule_worker(vclock->clock, PTP_VCLOCK_REFRESH_INTERVAL); diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c index feadb21a8f30..4be270f4d6c3 100644 --- a/drivers/regulator/pca9450-regulator.c +++ b/drivers/regulator/pca9450-regulator.c @@ -40,7 +40,6 @@ struct pca9450 { struct device *dev; struct regmap *regmap; struct gpio_desc *sd_vsel_gpio; - struct notifier_block restart_nb; enum pca9450_chip_type type; unsigned int rcnt; int irq; @@ -1100,10 +1099,9 @@ static irqreturn_t pca9450_irq_handler(int irq, void *data) return IRQ_HANDLED; } -static int pca9450_i2c_restart_handler(struct notifier_block *nb, - unsigned long action, void *data) +static int pca9450_i2c_restart_handler(struct sys_off_data *data) { - struct pca9450 *pca9450 = container_of(nb, struct pca9450, restart_nb); + struct pca9450 *pca9450 = data->cb_data; struct i2c_client *i2c = container_of(pca9450->dev, struct i2c_client, dev); dev_dbg(&i2c->dev, "Restarting device..\n"); @@ -1261,10 +1259,9 @@ static int pca9450_i2c_probe(struct i2c_client *i2c) pca9450->sd_vsel_fixed_low = of_property_read_bool(ldo5->dev.of_node, "nxp,sd-vsel-fixed-low"); - pca9450->restart_nb.notifier_call = pca9450_i2c_restart_handler; - pca9450->restart_nb.priority = PCA9450_RESTART_HANDLER_PRIORITY; - - if (register_restart_handler(&pca9450->restart_nb)) + if (devm_register_sys_off_handler(&i2c->dev, SYS_OFF_MODE_RESTART, + PCA9450_RESTART_HANDLER_PRIORITY, + pca9450_i2c_restart_handler, pca9450)) dev_warn(&i2c->dev, "Failed to register restart handler\n"); dev_info(&i2c->dev, "%s probed.\n", diff --git a/drivers/regulator/tps65219-regulator.c b/drivers/regulator/tps65219-regulator.c index 5e67fdc88f49..d77ca486879f 100644 --- a/drivers/regulator/tps65219-regulator.c +++ b/drivers/regulator/tps65219-regulator.c @@ -454,9 +454,9 @@ static int tps65219_regulator_probe(struct platform_device *pdev) irq_type->irq_name, irq_data); if (error) - return dev_err_probe(tps->dev, PTR_ERR(rdev), - "Failed to request %s IRQ %d: %d\n", - irq_type->irq_name, irq, error); + return dev_err_probe(tps->dev, error, + "Failed to request %s IRQ %d\n", + irq_type->irq_name, irq); } for (i = 0; i < pmic->dev_irq_size; ++i) { @@ -477,9 +477,9 @@ static int tps65219_regulator_probe(struct platform_device *pdev) irq_type->irq_name, irq_data); if (error) - return dev_err_probe(tps->dev, PTR_ERR(rdev), - "Failed to request %s IRQ %d: %d\n", - irq_type->irq_name, irq, error); + return dev_err_probe(tps->dev, error, + "Failed to request %s IRQ %d\n", + irq_type->irq_name, irq); } return 0; diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index f2e42c1d51aa..98e334724a62 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c @@ -77,6 +77,13 @@ unsigned long sclp_console_full; /* The currently active SCLP command word. */ static sclp_cmdw_t active_cmd; +static inline struct sccb_header *sclpint_to_sccb(u32 sccb_int) +{ + if (sccb_int) + return __va(sccb_int); + return NULL; +} + static inline void sclp_trace(int prio, char *id, u32 a, u64 b, bool err) { struct sclp_trace_entry e; @@ -620,7 +627,7 @@ __sclp_find_req(u32 sccb) static bool ok_response(u32 sccb_int, sclp_cmdw_t cmd) { - struct sccb_header *sccb = (struct sccb_header *)__va(sccb_int); + struct sccb_header *sccb = sclpint_to_sccb(sccb_int); struct evbuf_header *evbuf; u16 response; @@ -659,7 +666,7 @@ static void sclp_interrupt_handler(struct ext_code ext_code, /* INT: Interrupt received (a=intparm, b=cmd) */ sclp_trace_sccb(0, "INT", param32, active_cmd, active_cmd, - (struct sccb_header *)__va(finished_sccb), + sclpint_to_sccb(finished_sccb), !ok_response(finished_sccb, active_cmd)); if (finished_sccb) { diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index c2fdc6553e62..1199d701c3f5 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h @@ -323,8 +323,6 @@ enum fnic_state { FNIC_IN_ETH_TRANS_FC_MODE, }; -struct mempool; - enum fnic_role_e { FNIC_ROLE_FCP_INITIATOR = 0, }; diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 2db8d9529b8f..7c4d7bb3a56f 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -6280,7 +6280,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) } phba->nvmeio_trc_on = 1; phba->nvmeio_trc_output_idx = 0; - phba->nvmeio_trc = NULL; } else { nvmeio_off: phba->nvmeio_trc_size = 0; diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 2797aa75a689..aff6c9d5e7c2 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -666,7 +666,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport) * Take early refcount for outstanding I/O requests we schedule during * delete processing for unreg_vpi. Always keep this before * scsi_remove_host() as we can no longer obtain a reference through - * scsi_host_get() after scsi_host_remove as shost is set to SHOST_DEL. + * scsi_host_get() after scsi_remove_host as shost is set to SHOST_DEL. */ if (!scsi_host_get(shost)) return VPORT_INVAL; diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index a39f1da4ce47..a761c0aa5127 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -6606,6 +6606,8 @@ static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha, ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0); vfree(dst_addr); + if (IS_ERR(ep)) + return NULL; return ep; } diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 0847767d4d43..353cb60e1abe 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -2674,8 +2674,10 @@ static int resp_rsup_tmfs(struct scsi_cmnd *scp, static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target) { /* Read-Write Error Recovery page for mode_sense */ - unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0, - 5, 0, 0xff, 0xff}; + static const unsigned char err_recov_pg[] = { + 0x1, 0xa, 0xc0, 11, 240, 0, 0, 0, + 5, 0, 0xff, 0xff + }; memcpy(p, err_recov_pg, sizeof(err_recov_pg)); if (1 == pcontrol) @@ -2685,8 +2687,10 @@ static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target) static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target) { /* Disconnect-Reconnect page for mode_sense */ - unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0}; + static const unsigned char disconnect_pg[] = { + 0x2, 0xe, 128, 128, 0, 10, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0 + }; memcpy(p, disconnect_pg, sizeof(disconnect_pg)); if (1 == pcontrol) @@ -2696,9 +2700,11 @@ static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target) static int resp_format_pg(unsigned char *p, int pcontrol, int target) { /* Format device page for mode_sense */ - unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0x40, 0, 0, 0}; + static const unsigned char format_pg[] = { + 0x3, 0x16, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0x40, 0, 0, 0 + }; memcpy(p, format_pg, sizeof(format_pg)); put_unaligned_be16(sdebug_sectors_per, p + 10); @@ -2716,10 +2722,14 @@ static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0, static int resp_caching_pg(unsigned char *p, int pcontrol, int target) { /* Caching page for mode_sense */ - unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; - unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0, - 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0}; + static const unsigned char ch_caching_pg[] = { + /* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }; + static const unsigned char d_caching_pg[] = { + 0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0, + 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0 + }; if (SDEBUG_OPT_N_WCE & sdebug_opts) caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */ @@ -2738,8 +2748,10 @@ static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target) { /* Control mode page for mode_sense */ unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0, 0, 0, 0, 0}; - unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0, - 0, 0, 0x2, 0x4b}; + static const unsigned char d_ctrl_m_pg[] = { + 0xa, 10, 2, 0, 0, 0, 0, 0, + 0, 0, 0x2, 0x4b + }; if (sdebug_dsense) ctrl_m_pg[2] |= 0x4; @@ -2794,10 +2806,14 @@ static int resp_grouping_m_pg(unsigned char *p, int pcontrol, int target) static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target) { /* Informational Exceptions control mode page for mode_sense */ - unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0, - 0, 0, 0x0, 0x0}; - unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0, - 0, 0, 0x0, 0x0}; + static const unsigned char ch_iec_m_pg[] = { + /* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0, + 0, 0, 0x0, 0x0 + }; + static const unsigned char d_iec_m_pg[] = { + 0x1c, 0xa, 0x08, 0, 0, 0, 0, 0, + 0, 0, 0x0, 0x0 + }; memcpy(p, iec_m_pg, sizeof(iec_m_pg)); if (1 == pcontrol) @@ -2809,8 +2825,9 @@ static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target) static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target) { /* SAS SSP mode page - short format for mode_sense */ - unsigned char sas_sf_m_pg[] = {0x19, 0x6, - 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0}; + static const unsigned char sas_sf_m_pg[] = { + 0x19, 0x6, 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0 + }; memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg)); if (1 == pcontrol) @@ -2854,9 +2871,10 @@ static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target, static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol) { /* SAS SSP shared protocol specific port mode subpage */ - unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - }; + static const unsigned char sas_sha_m_pg[] = { + 0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + }; memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg)); if (1 == pcontrol) @@ -2923,8 +2941,10 @@ static int process_medium_part_m_pg(struct sdebug_dev_info *devip, static int resp_compression_m_pg(unsigned char *p, int pcontrol, int target, unsigned char dce) { /* Compression page for mode_sense (tape) */ - unsigned char compression_pg[] = {0x0f, 14, 0x40, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 00, 00}; + static const unsigned char compression_pg[] = { + 0x0f, 14, 0x40, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0 + }; memcpy(p, compression_pg, sizeof(compression_pg)); if (dce) @@ -3282,9 +3302,10 @@ bad_pcode: static int resp_temp_l_pg(unsigned char *arr) { - unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38, - 0x0, 0x1, 0x3, 0x2, 0x0, 65, - }; + static const unsigned char temp_l_pg[] = { + 0x0, 0x0, 0x3, 0x2, 0x0, 38, + 0x0, 0x1, 0x3, 0x2, 0x0, 65, + }; memcpy(arr, temp_l_pg, sizeof(temp_l_pg)); return sizeof(temp_l_pg); @@ -3292,8 +3313,9 @@ static int resp_temp_l_pg(unsigned char *arr) static int resp_ie_l_pg(unsigned char *arr) { - unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38, - }; + static const unsigned char ie_l_pg[] = { + 0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38, + }; memcpy(arr, ie_l_pg, sizeof(ie_l_pg)); if (iec_m_pg[2] & 0x4) { /* TEST bit set */ @@ -3305,11 +3327,12 @@ static int resp_ie_l_pg(unsigned char *arr) static int resp_env_rep_l_spg(unsigned char *arr) { - unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8, - 0x0, 40, 72, 0xff, 45, 18, 0, 0, - 0x1, 0x0, 0x23, 0x8, - 0x0, 55, 72, 35, 55, 45, 0, 0, - }; + static const unsigned char env_rep_l_spg[] = { + 0x0, 0x0, 0x23, 0x8, + 0x0, 40, 72, 0xff, 45, 18, 0, 0, + 0x1, 0x0, 0x23, 0x8, + 0x0, 55, 72, 35, 55, 45, 0, 0, + }; memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg)); return sizeof(env_rep_l_spg); diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 169af7d47ce7..15ba493d2138 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -265,7 +265,7 @@ show_shost_supported_mode(struct device *dev, struct device_attribute *attr, return show_shost_mode(supported_mode, buf); } -static DEVICE_ATTR(supported_mode, S_IRUGO | S_IWUSR, show_shost_supported_mode, NULL); +static DEVICE_ATTR(supported_mode, S_IRUGO, show_shost_supported_mode, NULL); static ssize_t show_shost_active_mode(struct device *dev, @@ -279,7 +279,7 @@ show_shost_active_mode(struct device *dev, return show_shost_mode(shost->active_mode, buf); } -static DEVICE_ATTR(active_mode, S_IRUGO | S_IWUSR, show_shost_active_mode, NULL); +static DEVICE_ATTR(active_mode, S_IRUGO, show_shost_active_mode, NULL); static int check_reset_type(const char *str) { diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index 2a5f24ee858c..034a2a535a1e 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -1232,7 +1232,7 @@ err: } static int tegra_powergate_of_get_resets(struct tegra_powergate *pg, - struct device_node *np, bool off) + struct device_node *np) { struct device *dev = pg->pmc->dev; int err; @@ -1247,22 +1247,6 @@ static int tegra_powergate_of_get_resets(struct tegra_powergate *pg, err = reset_control_acquire(pg->reset); if (err < 0) { pr_err("failed to acquire resets: %d\n", err); - goto out; - } - - if (off) { - err = reset_control_assert(pg->reset); - } else { - err = reset_control_deassert(pg->reset); - if (err < 0) - goto out; - - reset_control_release(pg->reset); - } - -out: - if (err) { - reset_control_release(pg->reset); reset_control_put(pg->reset); } @@ -1308,20 +1292,43 @@ static int tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np) goto set_available; } - err = tegra_powergate_of_get_resets(pg, np, off); + err = tegra_powergate_of_get_resets(pg, np); if (err < 0) { dev_err(dev, "failed to get resets for %pOFn: %d\n", np, err); goto remove_clks; } - if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) { - if (off) - WARN_ON(tegra_powergate_power_up(pg, true)); + /* + * If the power-domain is off, then ensure the resets are asserted. + * If the power-domain is on, then power down to ensure that when is + * it turned on the power-domain, clocks and resets are all in the + * expected state. + */ + if (off) { + err = reset_control_assert(pg->reset); + if (err) { + pr_err("failed to assert resets: %d\n", err); + goto remove_resets; + } + } else { + err = tegra_powergate_power_down(pg); + if (err) { + dev_err(dev, "failed to turn off PM domain %s: %d\n", + pg->genpd.name, err); + goto remove_resets; + } + } + /* + * If PM_GENERIC_DOMAINS is not enabled, power-on + * the domain and skip the genpd registration. + */ + if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) { + WARN_ON(tegra_powergate_power_up(pg, true)); goto remove_resets; } - err = pm_genpd_init(&pg->genpd, NULL, off); + err = pm_genpd_init(&pg->genpd, NULL, true); if (err < 0) { dev_err(dev, "failed to initialise PM domain %pOFn: %d\n", np, err); diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c index 67d4000c3cef..313e444a34f3 100644 --- a/drivers/spi/spi-fsl-lpspi.c +++ b/drivers/spi/spi-fsl-lpspi.c @@ -330,13 +330,11 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi) } if (config.speed_hz > perclk_rate / 2) { - dev_err(fsl_lpspi->dev, - "per-clk should be at least two times of transfer speed"); - return -EINVAL; + div = 2; + } else { + div = DIV_ROUND_UP(perclk_rate, config.speed_hz); } - div = DIV_ROUND_UP(perclk_rate, config.speed_hz); - for (prescale = 0; prescale <= prescale_max; prescale++) { scldiv = div / (1 << prescale) - 2; if (scldiv >= 0 && scldiv < 256) { diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c index d3b7e857b377..064b99204d9a 100644 --- a/drivers/spi/spi-mem.c +++ b/drivers/spi/spi-mem.c @@ -265,6 +265,9 @@ static bool spi_mem_internal_supports_op(struct spi_mem *mem, */ bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) { + /* Make sure the operation frequency is correct before going futher */ + spi_mem_adjust_op_freq(mem, (struct spi_mem_op *)op); + if (spi_mem_check_op(op)) return false; @@ -577,6 +580,7 @@ EXPORT_SYMBOL_GPL(spi_mem_adjust_op_freq); * spi_mem_calc_op_duration() - Derives the theoretical length (in ns) of an * operation. This helps finding the best variant * among a list of possible choices. + * @mem: the SPI memory * @op: the operation to benchmark * * Some chips have per-op frequency limitations, PCBs usually have their own diff --git a/drivers/spi/spi-qpic-snand.c b/drivers/spi/spi-qpic-snand.c index a8c4eb1cbde1..0ceaad7dba3c 100644 --- a/drivers/spi/spi-qpic-snand.c +++ b/drivers/spi/spi-qpic-snand.c @@ -210,13 +210,21 @@ static int qcom_spi_ooblayout_ecc(struct mtd_info *mtd, int section, struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand); struct qpic_ecc *qecc = snandc->qspi->ecc; - if (section > 1) - return -ERANGE; - - oobregion->length = qecc->ecc_bytes_hw + qecc->spare_bytes; - oobregion->offset = mtd->oobsize - oobregion->length; + switch (section) { + case 0: + oobregion->offset = 0; + oobregion->length = qecc->bytes * (qecc->steps - 1) + + qecc->bbm_size; + return 0; + case 1: + oobregion->offset = qecc->bytes * (qecc->steps - 1) + + qecc->bbm_size + + qecc->steps * 4; + oobregion->length = mtd->oobsize - oobregion->offset; + return 0; + } - return 0; + return -ERANGE; } static int qcom_spi_ooblayout_free(struct mtd_info *mtd, int section, @@ -1196,7 +1204,7 @@ static int qcom_spi_program_oob(struct qcom_nand_controller *snandc, u32 cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg; cfg0 = (ecc_cfg->cfg0 & ~CW_PER_PAGE_MASK) | - FIELD_PREP(CW_PER_PAGE_MASK, num_cw - 1); + FIELD_PREP(CW_PER_PAGE_MASK, 0); cfg1 = ecc_cfg->cfg1; ecc_bch_cfg = ecc_cfg->ecc_bch_cfg; ecc_buf_cfg = ecc_cfg->ecc_buf_cfg; diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c index 49ab4c515156..c07c61dc4938 100644 --- a/drivers/spi/spi-st-ssc4.c +++ b/drivers/spi/spi-st-ssc4.c @@ -378,7 +378,7 @@ static void spi_st_remove(struct platform_device *pdev) pinctrl_pm_select_sleep_state(&pdev->dev); } -static int __maybe_unused spi_st_runtime_suspend(struct device *dev) +static int spi_st_runtime_suspend(struct device *dev) { struct spi_controller *host = dev_get_drvdata(dev); struct spi_st *spi_st = spi_controller_get_devdata(host); @@ -391,7 +391,7 @@ static int __maybe_unused spi_st_runtime_suspend(struct device *dev) return 0; } -static int __maybe_unused spi_st_runtime_resume(struct device *dev) +static int spi_st_runtime_resume(struct device *dev) { struct spi_controller *host = dev_get_drvdata(dev); struct spi_st *spi_st = spi_controller_get_devdata(host); @@ -428,8 +428,8 @@ static int __maybe_unused spi_st_resume(struct device *dev) } static const struct dev_pm_ops spi_st_pm = { - SET_SYSTEM_SLEEP_PM_OPS(spi_st_suspend, spi_st_resume) - SET_RUNTIME_PM_OPS(spi_st_runtime_suspend, spi_st_runtime_resume, NULL) + SYSTEM_SLEEP_PM_OPS(spi_st_suspend, spi_st_resume) + RUNTIME_PM_OPS(spi_st_runtime_suspend, spi_st_runtime_resume, NULL) }; static const struct of_device_id stm_spi_match[] = { @@ -441,7 +441,7 @@ MODULE_DEVICE_TABLE(of, stm_spi_match); static struct platform_driver spi_st_driver = { .driver = { .name = "spi-st", - .pm = pm_sleep_ptr(&spi_st_pm), + .pm = pm_ptr(&spi_st_pm), .of_match_table = of_match_ptr(stm_spi_match), }, .probe = spi_st_probe, diff --git a/drivers/tty/serial/8250/8250_rsa.c b/drivers/tty/serial/8250/8250_rsa.c index d34093cc03ad..12a65b79583c 100644 --- a/drivers/tty/serial/8250/8250_rsa.c +++ b/drivers/tty/serial/8250/8250_rsa.c @@ -147,7 +147,7 @@ void rsa_enable(struct uart_8250_port *up) if (up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16) serial_out(up, UART_RSA_FRR, 0); } -EXPORT_SYMBOL_GPL_FOR_MODULES(rsa_enable, "8250_base"); +EXPORT_SYMBOL_FOR_MODULES(rsa_enable, "8250_base"); /* * Attempts to turn off the RSA FIFO and resets the RSA board back to 115kbps compat mode. It is @@ -179,7 +179,7 @@ void rsa_disable(struct uart_8250_port *up) up->port.uartclk = SERIAL_RSA_BAUD_BASE_LO * 16; uart_port_unlock_irq(&up->port); } -EXPORT_SYMBOL_GPL_FOR_MODULES(rsa_disable, "8250_base"); +EXPORT_SYMBOL_FOR_MODULES(rsa_disable, "8250_base"); void rsa_autoconfig(struct uart_8250_port *up) { @@ -192,7 +192,7 @@ void rsa_autoconfig(struct uart_8250_port *up) if (__rsa_enable(up)) up->port.type = PORT_RSA; } -EXPORT_SYMBOL_GPL_FOR_MODULES(rsa_autoconfig, "8250_base"); +EXPORT_SYMBOL_FOR_MODULES(rsa_autoconfig, "8250_base"); void rsa_reset(struct uart_8250_port *up) { @@ -201,7 +201,7 @@ void rsa_reset(struct uart_8250_port *up) serial_out(up, UART_RSA_FRR, 0); } -EXPORT_SYMBOL_GPL_FOR_MODULES(rsa_reset, "8250_base"); +EXPORT_SYMBOL_FOR_MODULES(rsa_reset, "8250_base"); #ifdef CONFIG_SERIAL_8250_DEPRECATED_OPTIONS #ifndef MODULE diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 96ad57c3144b..9a43102b2b21 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -1303,7 +1303,7 @@ static u32 ufshcd_pending_cmds(struct ufs_hba *hba) * * Return: 0 upon success; -EBUSY upon timeout. */ -static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, +static int ufshcd_wait_for_pending_cmds(struct ufs_hba *hba, u64 wait_timeout_us) { int ret = 0; @@ -1431,7 +1431,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us) down_write(&hba->clk_scaling_lock); if (!hba->clk_scaling.is_allowed || - ufshcd_wait_for_doorbell_clr(hba, timeout_us)) { + ufshcd_wait_for_pending_cmds(hba, timeout_us)) { ret = -EBUSY; up_write(&hba->clk_scaling_lock); mutex_unlock(&hba->wb_mutex); @@ -3199,7 +3199,8 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) } /* - * Return: 0 upon success; < 0 upon failure. + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; + * < 0 if another error occurred. */ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int max_timeout) @@ -3275,7 +3276,6 @@ retry: } } - WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err); return err; } @@ -3294,7 +3294,8 @@ static void ufshcd_dev_man_unlock(struct ufs_hba *hba) } /* - * Return: 0 upon success; < 0 upon failure. + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; + * < 0 if another error occurred. */ static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, const u32 tag, int timeout) @@ -3317,7 +3318,8 @@ static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, * @cmd_type: specifies the type (NOP, Query...) * @timeout: timeout in milliseconds * - * Return: 0 upon success; < 0 upon failure. + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; + * < 0 if another error occurred. * * NOTE: Since there is only one available tag for device management commands, * it is expected you hold the hba->dev_cmd.lock mutex. @@ -3363,6 +3365,10 @@ static inline void ufshcd_init_query(struct ufs_hba *hba, (*request)->upiu_req.selector = selector; } +/* + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; + * < 0 if another error occurred. + */ static int ufshcd_query_flag_retry(struct ufs_hba *hba, enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res) { @@ -3383,7 +3389,6 @@ static int ufshcd_query_flag_retry(struct ufs_hba *hba, dev_err(hba->dev, "%s: query flag, opcode %d, idn %d, failed with error %d after %d retries\n", __func__, opcode, idn, ret, retries); - WARN_ONCE(ret > 0, "Incorrect return value %d > 0\n", ret); return ret; } @@ -3395,7 +3400,8 @@ static int ufshcd_query_flag_retry(struct ufs_hba *hba, * @index: flag index to access * @flag_res: the flag value after the query request completes * - * Return: 0 for success; < 0 upon failure. + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; + * < 0 if another error occurred. */ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res) @@ -3451,7 +3457,6 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, out_unlock: ufshcd_dev_man_unlock(hba); - WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err); return err; } @@ -3464,8 +3469,9 @@ out_unlock: * @selector: selector field * @attr_val: the attribute value after the query request completes * - * Return: 0 upon success; < 0 upon failure. -*/ + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; + * < 0 if another error occurred. + */ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector, u32 *attr_val) { @@ -3513,7 +3519,6 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, out_unlock: ufshcd_dev_man_unlock(hba); - WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err); return err; } @@ -3528,8 +3533,9 @@ out_unlock: * @attr_val: the attribute value after the query request * completes * - * Return: 0 for success; < 0 upon failure. -*/ + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; + * < 0 if another error occurred. + */ int ufshcd_query_attr_retry(struct ufs_hba *hba, enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector, u32 *attr_val) @@ -3551,12 +3557,12 @@ int ufshcd_query_attr_retry(struct ufs_hba *hba, dev_err(hba->dev, "%s: query attribute, idn %d, failed with error %d after %d retries\n", __func__, idn, ret, QUERY_REQ_RETRIES); - WARN_ONCE(ret > 0, "Incorrect return value %d > 0\n", ret); return ret; } /* - * Return: 0 if successful; < 0 upon failure. + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; + * < 0 if another error occurred. */ static int __ufshcd_query_descriptor(struct ufs_hba *hba, enum query_opcode opcode, enum desc_idn idn, u8 index, @@ -3615,7 +3621,6 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba, out_unlock: hba->dev_cmd.query.descriptor = NULL; ufshcd_dev_man_unlock(hba); - WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err); return err; } @@ -3632,7 +3637,8 @@ out_unlock: * The buf_len parameter will contain, on return, the length parameter * received on the response. * - * Return: 0 for success; < 0 upon failure. + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; + * < 0 if another error occurred. */ int ufshcd_query_descriptor_retry(struct ufs_hba *hba, enum query_opcode opcode, @@ -3650,7 +3656,6 @@ int ufshcd_query_descriptor_retry(struct ufs_hba *hba, break; } - WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err); return err; } @@ -3663,7 +3668,8 @@ int ufshcd_query_descriptor_retry(struct ufs_hba *hba, * @param_read_buf: pointer to buffer where parameter would be read * @param_size: sizeof(param_read_buf) * - * Return: 0 in case of success; < 0 upon failure. + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; + * < 0 if another error occurred. */ int ufshcd_read_desc_param(struct ufs_hba *hba, enum desc_idn desc_id, @@ -3730,7 +3736,6 @@ int ufshcd_read_desc_param(struct ufs_hba *hba, out: if (is_kmalloc) kfree(desc_buf); - WARN_ONCE(ret > 0, "Incorrect return value %d > 0\n", ret); return ret; } @@ -4781,7 +4786,8 @@ EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode); * * Set fDeviceInit flag and poll until device toggles it. * - * Return: 0 upon success; < 0 upon failure. + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; + * < 0 if another error occurred. */ static int ufshcd_complete_dev_init(struct ufs_hba *hba) { @@ -5135,7 +5141,8 @@ out: * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations. * - * Return: 0 upon success; < 0 upon failure. + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; + * < 0 if another error occurred. */ static int ufshcd_verify_dev_init(struct ufs_hba *hba) { @@ -5559,9 +5566,9 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) irqreturn_t retval = IRQ_NONE; struct uic_command *cmd; - spin_lock(hba->host->host_lock); + guard(spinlock_irqsave)(hba->host->host_lock); cmd = hba->active_uic_cmd; - if (WARN_ON_ONCE(!cmd)) + if (!cmd) goto unlock; if (ufshcd_is_auto_hibern8_error(hba, intr_status)) @@ -5586,8 +5593,6 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) ufshcd_add_uic_command_trace(hba, cmd, UFS_CMD_COMP); unlock: - spin_unlock(hba->host->host_lock); - return retval; } @@ -5869,7 +5874,8 @@ static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask) * as the device is allowed to manage its own way of handling background * operations. * - * Return: zero on success, non-zero on failure. + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; + * < 0 if another error occurred. */ static int ufshcd_enable_auto_bkops(struct ufs_hba *hba) { @@ -5908,7 +5914,8 @@ out: * host is idle so that BKOPS are managed effectively without any negative * impacts. * - * Return: zero on success, non-zero on failure. + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; + * < 0 if another error occurred. */ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba) { @@ -6058,6 +6065,10 @@ out: __func__, err); } +/* + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; + * < 0 if another error occurred. + */ int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id) { struct utp_upiu_query_v4_0 *upiu_resp; @@ -6920,7 +6931,7 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status) bool queue_eh_work = false; irqreturn_t retval = IRQ_NONE; - spin_lock(hba->host->host_lock); + guard(spinlock_irqsave)(hba->host->host_lock); hba->errors |= UFSHCD_ERROR_MASK & intr_status; if (hba->errors & INT_FATAL_ERRORS) { @@ -6979,7 +6990,7 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status) */ hba->errors = 0; hba->uic_error = 0; - spin_unlock(hba->host->host_lock); + return retval; } @@ -7138,14 +7149,19 @@ static irqreturn_t ufshcd_threaded_intr(int irq, void *__hba) static irqreturn_t ufshcd_intr(int irq, void *__hba) { struct ufs_hba *hba = __hba; + u32 intr_status, enabled_intr_status; /* Move interrupt handling to thread when MCQ & ESI are not enabled */ if (!hba->mcq_enabled || !hba->mcq_esi_enabled) return IRQ_WAKE_THREAD; + intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); + enabled_intr_status = intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE); + + ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); + /* Directly handle interrupts since MCQ ESI handlers does the hard job */ - return ufshcd_sl_intr(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS) & - ufshcd_readl(hba, REG_INTERRUPT_ENABLE)); + return ufshcd_sl_intr(hba, enabled_intr_status); } static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) @@ -7449,7 +7465,8 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, * @sg_list: Pointer to SG list when DATA IN/OUT UPIU is required in ARPMB operation * @dir: DMA direction * - * Return: zero on success, non-zero on failure. + * Return: 0 upon success; > 0 in case the UFS device reported an OCS error; + * < 0 if another error occurred. */ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu, struct utp_upiu_req *rsp_upiu, struct ufs_ehs *req_ehs, @@ -10516,8 +10533,7 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle) err = devm_add_action_or_reset(dev, ufshcd_devres_release, host); if (err) - return dev_err_probe(dev, err, - "failed to add ufshcd dealloc action\n"); + return err; host->nr_maps = HCTX_TYPE_POLL + 1; hba = shost_priv(host); diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c index 86ae73b89d4d..f902ce08c95a 100644 --- a/drivers/ufs/host/ufs-mediatek.c +++ b/drivers/ufs/host/ufs-mediatek.c @@ -818,7 +818,7 @@ static u32 ufs_mtk_mcq_get_irq(struct ufs_hba *hba, unsigned int cpu) unsigned int q_index; q_index = map->mq_map[cpu]; - if (q_index > nr) { + if (q_index >= nr) { dev_err(hba->dev, "hwq index %d exceed %d\n", q_index, nr); return MTK_MCQ_INVALID_IRQ; diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index 76fc70503a62..9574fdc2bb0f 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -2070,17 +2070,6 @@ static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data) return IRQ_HANDLED; } -static void ufs_qcom_irq_free(struct ufs_qcom_irq *uqi) -{ - for (struct ufs_qcom_irq *q = uqi; q->irq; q++) - devm_free_irq(q->hba->dev, q->irq, q->hba); - - platform_device_msi_free_irqs_all(uqi->hba->dev); - devm_kfree(uqi->hba->dev, uqi); -} - -DEFINE_FREE(ufs_qcom_irq, struct ufs_qcom_irq *, if (_T) ufs_qcom_irq_free(_T)) - static int ufs_qcom_config_esi(struct ufs_hba *hba) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); @@ -2095,18 +2084,18 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba) */ nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL]; - struct ufs_qcom_irq *qi __free(ufs_qcom_irq) = - devm_kcalloc(hba->dev, nr_irqs, sizeof(*qi), GFP_KERNEL); - if (!qi) - return -ENOMEM; - /* Preset so __free() has a pointer to hba in all error paths */ - qi[0].hba = hba; - ret = platform_device_msi_init_and_alloc_irqs(hba->dev, nr_irqs, ufs_qcom_write_msi_msg); if (ret) { - dev_err(hba->dev, "Failed to request Platform MSI %d\n", ret); - return ret; + dev_warn(hba->dev, "Platform MSI not supported or failed, continuing without ESI\n"); + return ret; /* Continue without ESI */ + } + + struct ufs_qcom_irq *qi = devm_kcalloc(hba->dev, nr_irqs, sizeof(*qi), GFP_KERNEL); + + if (!qi) { + platform_device_msi_free_irqs_all(hba->dev); + return -ENOMEM; } for (int idx = 0; idx < nr_irqs; idx++) { @@ -2117,15 +2106,17 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba) ret = devm_request_irq(hba->dev, qi[idx].irq, ufs_qcom_mcq_esi_handler, IRQF_SHARED, "qcom-mcq-esi", qi + idx); if (ret) { - dev_err(hba->dev, "%s: Fail to request IRQ for %d, err = %d\n", + dev_err(hba->dev, "%s: Failed to request IRQ for %d, err = %d\n", __func__, qi[idx].irq, ret); - qi[idx].irq = 0; + /* Free previously allocated IRQs */ + for (int j = 0; j < idx; j++) + devm_free_irq(hba->dev, qi[j].irq, qi + j); + platform_device_msi_free_irqs_all(hba->dev); + devm_kfree(hba->dev, qi); return ret; } } - retain_and_null_ptr(qi); - if (host->hw_ver.major >= 6) { ufshcd_rmwl(hba, ESI_VEC_MASK, FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1), REG_UFS_CFG3); diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c index b39239f641f2..b87e03777395 100644 --- a/drivers/ufs/host/ufshcd-pci.c +++ b/drivers/ufs/host/ufshcd-pci.c @@ -630,6 +630,7 @@ static const struct pci_device_id ufshcd_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0xA847), (kernel_ulong_t)&ufs_intel_mtl_hba_vops }, { PCI_VDEVICE(INTEL, 0x7747), (kernel_ulong_t)&ufs_intel_mtl_hba_vops }, { PCI_VDEVICE(INTEL, 0xE447), (kernel_ulong_t)&ufs_intel_mtl_hba_vops }, + { PCI_VDEVICE(INTEL, 0x4D47), (kernel_ulong_t)&ufs_intel_mtl_hba_vops }, { } /* terminate list */ }; diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c index e1ec9b38f5b9..d7c2a1a3c271 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.c +++ b/drivers/usb/chipidea/ci_hdrc_imx.c @@ -338,7 +338,8 @@ static int ci_hdrc_imx_notify_event(struct ci_hdrc *ci, unsigned int event) schedule_work(&ci->usb_phy->chg_work); break; case CI_HDRC_CONTROLLER_PULLUP_EVENT: - if (ci->role == CI_ROLE_GADGET) + if (ci->role == CI_ROLE_GADGET && + ci->gadget.speed == USB_SPEED_HIGH) imx_usbmisc_pullup(data->usbmisc_data, ci->gadget.connected); break; diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c index 3d20c5e76c6a..b1418885707c 100644 --- a/drivers/usb/chipidea/usbmisc_imx.c +++ b/drivers/usb/chipidea/usbmisc_imx.c @@ -1068,15 +1068,24 @@ static void usbmisc_imx7d_pullup(struct imx_usbmisc_data *data, bool on) unsigned long flags; u32 val; + if (on) + return; + spin_lock_irqsave(&usbmisc->lock, flags); val = readl(usbmisc->base + MX7D_USBNC_USB_CTRL2); - if (!on) { - val &= ~MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_MASK; - val |= MX7D_USBNC_USB_CTRL2_OPMODE(1); - val |= MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_EN; - } else { - val &= ~MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_EN; - } + val &= ~MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_MASK; + val |= MX7D_USBNC_USB_CTRL2_OPMODE(1); + val |= MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_EN; + writel(val, usbmisc->base + MX7D_USBNC_USB_CTRL2); + spin_unlock_irqrestore(&usbmisc->lock, flags); + + /* Last for at least 1 micro-frame to let host see disconnect signal */ + usleep_range(125, 150); + + spin_lock_irqsave(&usbmisc->lock, flags); + val &= ~MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_MASK; + val |= MX7D_USBNC_USB_CTRL2_OPMODE(0); + val &= ~MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_EN; writel(val, usbmisc->base + MX7D_USBNC_USB_CTRL2); spin_unlock_irqrestore(&usbmisc->lock, flags); } diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 03771bbc6c01..9dd79769cad1 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1636,7 +1636,6 @@ static void __usb_hcd_giveback_urb(struct urb *urb) struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus); struct usb_anchor *anchor = urb->anchor; int status = urb->unlinked; - unsigned long flags; urb->hcpriv = NULL; if (unlikely((urb->transfer_flags & URB_SHORT_NOT_OK) && @@ -1654,14 +1653,13 @@ static void __usb_hcd_giveback_urb(struct urb *urb) /* pass ownership to the completion handler */ urb->status = status; /* - * Only collect coverage in the softirq context and disable interrupts - * to avoid scenarios with nested remote coverage collection sections - * that KCOV does not support. - * See the comment next to kcov_remote_start_usb_softirq() for details. + * This function can be called in task context inside another remote + * coverage collection section, but kcov doesn't support that kind of + * recursion yet. Only collect coverage in softirq context for now. */ - flags = kcov_remote_start_usb_softirq((u64)urb->dev->bus->busnum); + kcov_remote_start_usb_softirq((u64)urb->dev->bus->busnum); urb->complete(urb); - kcov_remote_stop_softirq(flags); + kcov_remote_stop_softirq(); usb_anchor_resume_wakeups(anchor); atomic_dec(&urb->use_count); @@ -1719,10 +1717,10 @@ static void usb_giveback_urb_bh(struct work_struct *work) * @urb: urb being returned to the USB device driver. * @status: completion status code for the URB. * - * Context: atomic. The completion callback is invoked in caller's context. - * For HCDs with HCD_BH flag set, the completion callback is invoked in BH - * context (except for URBs submitted to the root hub which always complete in - * caller's context). + * Context: atomic. The completion callback is invoked either in a work queue + * (BH) context or in the caller's context, depending on whether the HCD_BH + * flag is set in the @hcd structure, except that URBs submitted to the + * root hub always complete in BH context. * * This hands the URB from HCD to its USB device driver, using its * completion function. The HCD has freed all per-urb resources @@ -2166,7 +2164,7 @@ static struct urb *request_single_step_set_feature_urb( urb->complete = usb_ehset_completion; urb->status = -EINPROGRESS; urb->actual_length = 0; - urb->transfer_flags = URB_DIR_IN; + urb->transfer_flags = URB_DIR_IN | URB_NO_TRANSFER_DMA_MAP; usb_get_urb(urb); atomic_inc(&urb->use_count); atomic_inc(&urb->dev->urbnum); @@ -2230,9 +2228,15 @@ int ehset_single_step_set_feature(struct usb_hcd *hcd, int port) /* Complete remaining DATA and STATUS stages using the same URB */ urb->status = -EINPROGRESS; + urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP; usb_get_urb(urb); atomic_inc(&urb->use_count); atomic_inc(&urb->dev->urbnum); + if (map_urb_for_dma(hcd, urb, GFP_KERNEL)) { + usb_put_urb(urb); + goto out1; + } + retval = hcd->driver->submit_single_step_set_feature(hcd, urb, 0); if (!retval && !wait_for_completion_timeout(&done, msecs_to_jiffies(2000))) { diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index ff0ff95d5cca..f5bc53875330 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -371,6 +371,7 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x0781, 0x5591), .driver_info = USB_QUIRK_NO_LPM }, /* SanDisk Corp. SanDisk 3.2Gen1 */ + { USB_DEVICE(0x0781, 0x5596), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x0781, 0x55a3), .driver_info = USB_QUIRK_DELAY_INIT }, /* SanDisk Extreme 55AE */ diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 54a4ee2b90b7..39c72cb52ce7 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -41,6 +41,7 @@ #define PCI_DEVICE_ID_INTEL_TGPLP 0xa0ee #define PCI_DEVICE_ID_INTEL_TGPH 0x43ee #define PCI_DEVICE_ID_INTEL_JSP 0x4dee +#define PCI_DEVICE_ID_INTEL_WCL 0x4d7e #define PCI_DEVICE_ID_INTEL_ADL 0x460e #define PCI_DEVICE_ID_INTEL_ADL_PCH 0x51ee #define PCI_DEVICE_ID_INTEL_ADLN 0x465e @@ -431,6 +432,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = { { PCI_DEVICE_DATA(INTEL, TGPLP, &dwc3_pci_intel_swnode) }, { PCI_DEVICE_DATA(INTEL, TGPH, &dwc3_pci_intel_swnode) }, { PCI_DEVICE_DATA(INTEL, JSP, &dwc3_pci_intel_swnode) }, + { PCI_DEVICE_DATA(INTEL, WCL, &dwc3_pci_intel_swnode) }, { PCI_DEVICE_DATA(INTEL, ADL, &dwc3_pci_intel_swnode) }, { PCI_DEVICE_DATA(INTEL, ADL_PCH, &dwc3_pci_intel_swnode) }, { PCI_DEVICE_DATA(INTEL, ADLN, &dwc3_pci_intel_swnode) }, diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index 666ac432f52d..b4229aa13f37 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -288,7 +288,9 @@ void dwc3_ep0_out_start(struct dwc3 *dwc) dwc3_ep0_prepare_one_trb(dep, dwc->ep0_trb_addr, 8, DWC3_TRBCTL_CONTROL_SETUP, false); ret = dwc3_ep0_start_trans(dep); - WARN_ON(ret < 0); + if (ret < 0) + dev_err(dwc->dev, "ep0 out start transfer failed: %d\n", ret); + for (i = 2; i < DWC3_ENDPOINTS_NUM; i++) { struct dwc3_ep *dwc3_ep; @@ -1061,7 +1063,9 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc, ret = dwc3_ep0_start_trans(dep); } - WARN_ON(ret < 0); + if (ret < 0) + dev_err(dwc->dev, + "ep0 data phase start transfer failed: %d\n", ret); } static int dwc3_ep0_start_control_status(struct dwc3_ep *dep) @@ -1078,7 +1082,12 @@ static int dwc3_ep0_start_control_status(struct dwc3_ep *dep) static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep) { - WARN_ON(dwc3_ep0_start_control_status(dep)); + int ret; + + ret = dwc3_ep0_start_control_status(dep); + if (ret) + dev_err(dwc->dev, + "ep0 status phase start transfer failed: %d\n", ret); } static void dwc3_ep0_do_control_status(struct dwc3 *dwc, @@ -1121,7 +1130,10 @@ void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep) cmd |= DWC3_DEPCMD_PARAM(dep->resource_index); memset(¶ms, 0, sizeof(params)); ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); - WARN_ON_ONCE(ret); + if (ret) + dev_err_ratelimited(dwc->dev, + "ep0 data phase end transfer failed: %d\n", ret); + dep->resource_index = 0; } diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 25db36c63951..554f997eb8c4 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -1772,7 +1772,11 @@ static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool int dep->flags |= DWC3_EP_DELAY_STOP; return 0; } - WARN_ON_ONCE(ret); + + if (ret) + dev_err_ratelimited(dep->dwc->dev, + "end transfer failed: %d\n", ret); + dep->resource_index = 0; if (!interrupt) @@ -3777,6 +3781,15 @@ static void dwc3_gadget_endpoint_transfer_complete(struct dwc3_ep *dep, static void dwc3_gadget_endpoint_transfer_not_ready(struct dwc3_ep *dep, const struct dwc3_event_depevt *event) { + /* + * During a device-initiated disconnect, a late xferNotReady event can + * be generated after the End Transfer command resets the event filter, + * but before the controller is halted. Ignore it to prevent a new + * transfer from starting. + */ + if (!dep->dwc->connected) + return; + dwc3_gadget_endpoint_frame_from_event(dep, event); /* @@ -4039,7 +4052,9 @@ static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) dep->flags &= ~DWC3_EP_STALL; ret = dwc3_send_clear_stall_ep_cmd(dep); - WARN_ON_ONCE(ret); + if (ret) + dev_err_ratelimited(dwc->dev, + "failed to clear STALL on %s\n", dep->name); } } diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c index 2957316fd3d0..1d3085cc9d22 100644 --- a/drivers/usb/gadget/udc/tegra-xudc.c +++ b/drivers/usb/gadget/udc/tegra-xudc.c @@ -502,6 +502,7 @@ struct tegra_xudc { struct clk_bulk_data *clks; bool device_mode; + bool current_device_mode; struct work_struct usb_role_sw_work; struct phy **usb3_phy; @@ -715,6 +716,8 @@ static void tegra_xudc_device_mode_on(struct tegra_xudc *xudc) phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG, USB_ROLE_DEVICE); + + xudc->current_device_mode = true; } static void tegra_xudc_device_mode_off(struct tegra_xudc *xudc) @@ -725,6 +728,8 @@ static void tegra_xudc_device_mode_off(struct tegra_xudc *xudc) dev_dbg(xudc->dev, "device mode off\n"); + xudc->current_device_mode = false; + connected = !!(xudc_readl(xudc, PORTSC) & PORTSC_CCS); reinit_completion(&xudc->disconnect_complete); @@ -4044,10 +4049,10 @@ static int __maybe_unused tegra_xudc_resume(struct device *dev) spin_lock_irqsave(&xudc->lock, flags); xudc->suspended = false; + if (xudc->device_mode != xudc->current_device_mode) + schedule_work(&xudc->usb_role_sw_work); spin_unlock_irqrestore(&xudc->lock, flags); - schedule_work(&xudc->usb_role_sw_work); - pm_runtime_enable(dev); return 0; diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 92bb84f8132a..b3a59ce1b3f4 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -704,8 +704,7 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci, if (!xhci->devs[i]) continue; - retval = xhci_disable_slot(xhci, i); - xhci_free_virt_device(xhci, i); + retval = xhci_disable_and_free_slot(xhci, i); if (retval) xhci_err(xhci, "Failed to disable slot %d, %d. Enter test mode anyway\n", i, retval); diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 07289333a1e8..81eaad87a3d9 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -865,21 +865,20 @@ free_tts: * will be manipulated by the configure endpoint, allocate device, or update * hub functions while this function is removing the TT entries from the list. */ -void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) +void xhci_free_virt_device(struct xhci_hcd *xhci, struct xhci_virt_device *dev, + int slot_id) { - struct xhci_virt_device *dev; int i; int old_active_eps = 0; /* Slot ID 0 is reserved */ - if (slot_id == 0 || !xhci->devs[slot_id]) + if (slot_id == 0 || !dev) return; - dev = xhci->devs[slot_id]; - - xhci->dcbaa->dev_context_ptrs[slot_id] = 0; - if (!dev) - return; + /* If device ctx array still points to _this_ device, clear it */ + if (dev->out_ctx && + xhci->dcbaa->dev_context_ptrs[slot_id] == cpu_to_le64(dev->out_ctx->dma)) + xhci->dcbaa->dev_context_ptrs[slot_id] = 0; trace_xhci_free_virt_device(dev); @@ -920,8 +919,9 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) dev->udev->slot_id = 0; if (dev->rhub_port && dev->rhub_port->slot_id == slot_id) dev->rhub_port->slot_id = 0; - kfree(xhci->devs[slot_id]); - xhci->devs[slot_id] = NULL; + if (xhci->devs[slot_id] == dev) + xhci->devs[slot_id] = NULL; + kfree(dev); } /* @@ -962,7 +962,7 @@ static void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_i out: /* we are now at a leaf device */ xhci_debugfs_remove_slot(xhci, slot_id); - xhci_free_virt_device(xhci, slot_id); + xhci_free_virt_device(xhci, vdev, slot_id); } int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, diff --git a/drivers/usb/host/xhci-pci-renesas.c b/drivers/usb/host/xhci-pci-renesas.c index 620f8f0febb8..86df80399c9f 100644 --- a/drivers/usb/host/xhci-pci-renesas.c +++ b/drivers/usb/host/xhci-pci-renesas.c @@ -47,8 +47,9 @@ #define RENESAS_ROM_ERASE_MAGIC 0x5A65726F #define RENESAS_ROM_WRITE_MAGIC 0x53524F4D -#define RENESAS_RETRY 10000 -#define RENESAS_DELAY 10 +#define RENESAS_RETRY 50000 /* 50000 * RENESAS_DELAY ~= 500ms */ +#define RENESAS_CHIP_ERASE_RETRY 500000 /* 500000 * RENESAS_DELAY ~= 5s */ +#define RENESAS_DELAY 10 #define RENESAS_FW_NAME "renesas_usb_fw.mem" @@ -407,7 +408,7 @@ static void renesas_rom_erase(struct pci_dev *pdev) /* sleep a bit while ROM is erased */ msleep(20); - for (i = 0; i < RENESAS_RETRY; i++) { + for (i = 0; i < RENESAS_CHIP_ERASE_RETRY; i++) { retval = pci_read_config_byte(pdev, RENESAS_ROM_STATUS, &status); status &= RENESAS_ROM_STATUS_ERASE; diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index ecd757d482c5..4f8f5aab109d 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1592,7 +1592,8 @@ static void xhci_handle_cmd_enable_slot(int slot_id, struct xhci_command *comman command->slot_id = 0; } -static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) +static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id, + u32 cmd_comp_code) { struct xhci_virt_device *virt_dev; struct xhci_slot_ctx *slot_ctx; @@ -1607,6 +1608,10 @@ static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) if (xhci->quirks & XHCI_EP_LIMIT_QUIRK) /* Delete default control endpoint resources */ xhci_free_device_endpoint_resources(xhci, virt_dev, true); + if (cmd_comp_code == COMP_SUCCESS) { + xhci->dcbaa->dev_context_ptrs[slot_id] = 0; + xhci->devs[slot_id] = NULL; + } } static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id) @@ -1856,7 +1861,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, xhci_handle_cmd_enable_slot(slot_id, cmd, cmd_comp_code); break; case TRB_DISABLE_SLOT: - xhci_handle_cmd_disable_slot(xhci, slot_id); + xhci_handle_cmd_disable_slot(xhci, slot_id, cmd_comp_code); break; case TRB_CONFIG_EP: if (!cmd->completion) diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 47151ca527bf..742c23826e17 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -309,6 +309,7 @@ int xhci_enable_interrupter(struct xhci_interrupter *ir) return -EINVAL; iman = readl(&ir->ir_set->iman); + iman &= ~IMAN_IP; iman |= IMAN_IE; writel(iman, &ir->ir_set->iman); @@ -325,6 +326,7 @@ int xhci_disable_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir) return -EINVAL; iman = readl(&ir->ir_set->iman); + iman &= ~IMAN_IP; iman &= ~IMAN_IE; writel(iman, &ir->ir_set->iman); @@ -3932,8 +3934,7 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd, * Obtaining a new device slot to inform the xHCI host that * the USB device has been reset. */ - ret = xhci_disable_slot(xhci, udev->slot_id); - xhci_free_virt_device(xhci, udev->slot_id); + ret = xhci_disable_and_free_slot(xhci, udev->slot_id); if (!ret) { ret = xhci_alloc_dev(hcd, udev); if (ret == 1) @@ -4090,7 +4091,7 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) xhci_disable_slot(xhci, udev->slot_id); spin_lock_irqsave(&xhci->lock, flags); - xhci_free_virt_device(xhci, udev->slot_id); + xhci_free_virt_device(xhci, virt_dev, udev->slot_id); spin_unlock_irqrestore(&xhci->lock, flags); } @@ -4139,6 +4140,16 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) return 0; } +int xhci_disable_and_free_slot(struct xhci_hcd *xhci, u32 slot_id) +{ + struct xhci_virt_device *vdev = xhci->devs[slot_id]; + int ret; + + ret = xhci_disable_slot(xhci, slot_id); + xhci_free_virt_device(xhci, vdev, slot_id); + return ret; +} + /* * Checks if we have enough host controller resources for the default control * endpoint. @@ -4245,8 +4256,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) return 1; disable_slot: - xhci_disable_slot(xhci, udev->slot_id); - xhci_free_virt_device(xhci, udev->slot_id); + xhci_disable_and_free_slot(xhci, udev->slot_id); return 0; } @@ -4382,8 +4392,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, dev_warn(&udev->dev, "Device not responding to setup %s.\n", act); mutex_unlock(&xhci->mutex); - ret = xhci_disable_slot(xhci, udev->slot_id); - xhci_free_virt_device(xhci, udev->slot_id); + ret = xhci_disable_and_free_slot(xhci, udev->slot_id); if (!ret) { if (xhci_alloc_dev(hcd, udev) == 1) xhci_setup_addressable_virt_dev(xhci, udev); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index a20f4e7cd43a..85d5b964bf1e 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1791,7 +1791,7 @@ void xhci_dbg_trace(struct xhci_hcd *xhci, void (*trace)(struct va_format *), /* xHCI memory management */ void xhci_mem_cleanup(struct xhci_hcd *xhci); int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags); -void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id); +void xhci_free_virt_device(struct xhci_hcd *xhci, struct xhci_virt_device *dev, int slot_id); int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags); int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev); void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci, @@ -1888,6 +1888,7 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, struct usb_tt *tt, gfp_t mem_flags); int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id); +int xhci_disable_and_free_slot(struct xhci_hcd *xhci, u32 slot_id); int xhci_ext_cap_init(struct xhci_hcd *xhci); int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup); diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c index 7dea28c2b8ee..cb5bbb19060e 100644 --- a/drivers/usb/storage/realtek_cr.c +++ b/drivers/usb/storage/realtek_cr.c @@ -252,7 +252,7 @@ static int rts51x_bulk_transport(struct us_data *us, u8 lun, return USB_STOR_TRANSPORT_ERROR; } - residue = bcs->Residue; + residue = le32_to_cpu(bcs->Residue); if (bcs->Tag != us->tag) return USB_STOR_TRANSPORT_ERROR; diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 54f0b1c83317..dfa5276a5a43 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -934,6 +934,13 @@ UNUSUAL_DEV( 0x05e3, 0x0723, 0x9451, 0x9451, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_SANE_SENSE ), +/* Added by MaĆ«l GUERIN <mael.guerin@murena.io> */ +UNUSUAL_DEV( 0x0603, 0x8611, 0x0000, 0xffff, + "Novatek", + "NTK96550-based camera", + USB_SC_SCSI, USB_PR_BULK, NULL, + US_FL_BULK_IGNORE_TAG ), + /* * Reported by Hanno Boeck <hanno@gmx.de> * Taken from the Lycoris Kernel @@ -1494,6 +1501,28 @@ UNUSUAL_DEV( 0x0bc2, 0x3332, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NO_WP_DETECT ), +/* + * Reported by Zenm Chen <zenmchen@gmail.com> + * Ignore driver CD mode, otherwise usb_modeswitch may fail to switch + * the device into Wi-Fi mode. + */ +UNUSUAL_DEV( 0x0bda, 0x1a2b, 0x0000, 0xffff, + "Realtek", + "DISK", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_IGNORE_DEVICE ), + +/* + * Reported by Zenm Chen <zenmchen@gmail.com> + * Ignore driver CD mode, otherwise usb_modeswitch may fail to switch + * the device into Wi-Fi mode. + */ +UNUSUAL_DEV( 0x0bda, 0xa192, 0x0000, 0xffff, + "Realtek", + "DISK", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_IGNORE_DEVICE ), + UNUSUAL_DEV( 0x0d49, 0x7310, 0x0000, 0x9999, "Maxtor", "USB to SATA", diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c index a4ff2403ddd6..870a71f953f6 100644 --- a/drivers/usb/typec/tcpm/fusb302.c +++ b/drivers/usb/typec/tcpm/fusb302.c @@ -1485,6 +1485,9 @@ static irqreturn_t fusb302_irq_intn(int irq, void *dev_id) struct fusb302_chip *chip = dev_id; unsigned long flags; + /* Disable our level triggered IRQ until our irq_work has cleared it */ + disable_irq_nosync(chip->gpio_int_n_irq); + spin_lock_irqsave(&chip->irq_lock, flags); if (chip->irq_suspended) chip->irq_while_suspended = true; @@ -1627,6 +1630,7 @@ static void fusb302_irq_work(struct work_struct *work) } done: mutex_unlock(&chip->lock); + enable_irq(chip->gpio_int_n_irq); } static int init_gpio(struct fusb302_chip *chip) @@ -1751,10 +1755,9 @@ static int fusb302_probe(struct i2c_client *client) goto destroy_workqueue; } - ret = devm_request_threaded_irq(dev, chip->gpio_int_n_irq, - NULL, fusb302_irq_intn, - IRQF_ONESHOT | IRQF_TRIGGER_LOW, - "fsc_interrupt_int_n", chip); + ret = request_irq(chip->gpio_int_n_irq, fusb302_irq_intn, + IRQF_ONESHOT | IRQF_TRIGGER_LOW, + "fsc_interrupt_int_n", chip); if (ret < 0) { dev_err(dev, "cannot request IRQ for GPIO Int_N, ret=%d", ret); goto tcpm_unregister_port; @@ -1779,6 +1782,7 @@ static void fusb302_remove(struct i2c_client *client) struct fusb302_chip *chip = i2c_get_clientdata(client); disable_irq_wake(chip->gpio_int_n_irq); + free_irq(chip->gpio_int_n_irq, chip); cancel_work_sync(&chip->irq_work); cancel_delayed_work_sync(&chip->bc_lvl_handler); tcpm_unregister_port(chip->tcpm_port); diff --git a/drivers/usb/typec/tcpm/maxim_contaminant.c b/drivers/usb/typec/tcpm/maxim_contaminant.c index 0cdda06592fd..af8da6dc60ae 100644 --- a/drivers/usb/typec/tcpm/maxim_contaminant.c +++ b/drivers/usb/typec/tcpm/maxim_contaminant.c @@ -188,6 +188,11 @@ static int max_contaminant_read_comparators(struct max_tcpci_chip *chip, u8 *ven if (ret < 0) return ret; + /* Disable low power mode */ + ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCLPMODESEL, + FIELD_PREP(CCLPMODESEL, + LOW_POWER_MODE_DISABLE)); + /* Sleep to allow comparators settle */ usleep_range(5000, 6000); ret = regmap_update_bits(regmap, TCPC_TCPC_CTRL, TCPC_TCPC_CTRL_ORIENTATION, PLUG_ORNT_CC1); @@ -324,6 +329,39 @@ static int max_contaminant_enable_dry_detection(struct max_tcpci_chip *chip) return 0; } +static int max_contaminant_enable_toggling(struct max_tcpci_chip *chip) +{ + struct regmap *regmap = chip->data.regmap; + int ret; + + /* Disable dry detection if enabled. */ + ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCLPMODESEL, + FIELD_PREP(CCLPMODESEL, + LOW_POWER_MODE_DISABLE)); + if (ret) + return ret; + + ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL1, CCCONNDRY, 0); + if (ret) + return ret; + + ret = max_tcpci_write8(chip, TCPC_ROLE_CTRL, TCPC_ROLE_CTRL_DRP | + FIELD_PREP(TCPC_ROLE_CTRL_CC1, + TCPC_ROLE_CTRL_CC_RD) | + FIELD_PREP(TCPC_ROLE_CTRL_CC2, + TCPC_ROLE_CTRL_CC_RD)); + if (ret) + return ret; + + ret = regmap_update_bits(regmap, TCPC_TCPC_CTRL, + TCPC_TCPC_CTRL_EN_LK4CONN_ALRT, + TCPC_TCPC_CTRL_EN_LK4CONN_ALRT); + if (ret) + return ret; + + return max_tcpci_write8(chip, TCPC_COMMAND, TCPC_CMD_LOOK4CONNECTION); +} + bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect_while_debounce, bool *cc_handled) { @@ -340,6 +378,12 @@ bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect if (ret < 0) return false; + if (cc_status & TCPC_CC_STATUS_TOGGLING) { + if (chip->contaminant_state == DETECTED) + return true; + return false; + } + if (chip->contaminant_state == NOT_DETECTED || chip->contaminant_state == SINK) { if (!disconnect_while_debounce) msleep(100); @@ -372,6 +416,12 @@ bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect max_contaminant_enable_dry_detection(chip); return true; } + + ret = max_contaminant_enable_toggling(chip); + if (ret) + dev_err(chip->dev, + "Failed to enable toggling, ret=%d", + ret); } } else if (chip->contaminant_state == DETECTED) { if (!(cc_status & TCPC_CC_STATUS_TOGGLING)) { @@ -379,6 +429,14 @@ bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect if (chip->contaminant_state == DETECTED) { max_contaminant_enable_dry_detection(chip); return true; + } else { + ret = max_contaminant_enable_toggling(chip); + if (ret) { + dev_err(chip->dev, + "Failed to enable toggling, ret=%d", + ret); + return true; + } } } } diff --git a/drivers/usb/typec/tcpm/tcpci_maxim.h b/drivers/usb/typec/tcpm/tcpci_maxim.h index 76270d5c2838..b33540a42a95 100644 --- a/drivers/usb/typec/tcpm/tcpci_maxim.h +++ b/drivers/usb/typec/tcpm/tcpci_maxim.h @@ -21,6 +21,7 @@ #define CCOVPDIS BIT(6) #define SBURPCTRL BIT(5) #define CCLPMODESEL GENMASK(4, 3) +#define LOW_POWER_MODE_DISABLE 0 #define ULTRA_LOW_POWER_MODE 1 #define CCRPCTRL GENMASK(2, 0) #define UA_1_SRC 1 diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c index d2b3ae7113ab..b01ec99106cd 100644 --- a/drivers/virt/coco/sev-guest/sev-guest.c +++ b/drivers/virt/coco/sev-guest/sev-guest.c @@ -116,13 +116,11 @@ e_free: static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg) { + struct snp_derived_key_resp *derived_key_resp __free(kfree) = NULL; struct snp_derived_key_req *derived_key_req __free(kfree) = NULL; - struct snp_derived_key_resp derived_key_resp = {0}; struct snp_msg_desc *mdesc = snp_dev->msg_desc; struct snp_guest_req req = {}; int rc, resp_len; - /* Response data is 64 bytes and max authsize for GCM is 16 bytes. */ - u8 buf[64 + 16]; if (!arg->req_data || !arg->resp_data) return -EINVAL; @@ -132,8 +130,9 @@ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_reque * response payload. Make sure that it has enough space to cover the * authtag. */ - resp_len = sizeof(derived_key_resp.data) + mdesc->ctx->authsize; - if (sizeof(buf) < resp_len) + resp_len = sizeof(derived_key_resp->data) + mdesc->ctx->authsize; + derived_key_resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT); + if (!derived_key_resp) return -ENOMEM; derived_key_req = kzalloc(sizeof(*derived_key_req), GFP_KERNEL_ACCOUNT); @@ -149,23 +148,21 @@ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_reque req.vmpck_id = mdesc->vmpck_id; req.req_buf = derived_key_req; req.req_sz = sizeof(*derived_key_req); - req.resp_buf = buf; + req.resp_buf = derived_key_resp; req.resp_sz = resp_len; req.exit_code = SVM_VMGEXIT_GUEST_REQUEST; rc = snp_send_guest_request(mdesc, &req); arg->exitinfo2 = req.exitinfo2; - if (rc) - return rc; - - memcpy(derived_key_resp.data, buf, sizeof(derived_key_resp.data)); - if (copy_to_user((void __user *)arg->resp_data, &derived_key_resp, - sizeof(derived_key_resp))) - rc = -EFAULT; + if (!rc) { + if (copy_to_user((void __user *)arg->resp_data, derived_key_resp, + sizeof(derived_key_resp->data))) + rc = -EFAULT; + } /* The response buffer contains the sensitive data, explicitly clear it. */ - memzero_explicit(buf, sizeof(buf)); - memzero_explicit(&derived_key_resp, sizeof(derived_key_resp)); + memzero_explicit(derived_key_resp, sizeof(*derived_key_resp)); + return rc; } diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c index 3c9da446b85d..528682bf0c7f 100644 --- a/drivers/xen/xenbus/xenbus_xs.c +++ b/drivers/xen/xenbus/xenbus_xs.c @@ -718,26 +718,6 @@ int xs_watch_msg(struct xs_watch_event *event) return 0; } -/* - * Certain older XenBus toolstack cannot handle reading values that are - * not populated. Some Xen 3.4 installation are incapable of doing this - * so if we are running on anything older than 4 do not attempt to read - * control/platform-feature-xs_reset_watches. - */ -static bool xen_strict_xenbus_quirk(void) -{ -#ifdef CONFIG_X86 - uint32_t eax, ebx, ecx, edx, base; - - base = xen_cpuid_base(); - cpuid(base + 1, &eax, &ebx, &ecx, &edx); - - if ((eax >> 16) < 4) - return true; -#endif - return false; - -} static void xs_reset_watches(void) { int err; @@ -745,9 +725,6 @@ static void xs_reset_watches(void) if (!xen_hvm_domain() || xen_initial_domain()) return; - if (xen_strict_xenbus_quirk()) - return; - if (!xenbus_read_unsigned("control", "platform-feature-xs_reset_watches", 0)) return; |
