diff options
Diffstat (limited to 'drivers')
212 files changed, 2401 insertions, 1418 deletions
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c index ba79f397c9e8..467a60235370 100644 --- a/drivers/accel/ivpu/ivpu_drv.c +++ b/drivers/accel/ivpu/ivpu_drv.c @@ -327,7 +327,7 @@ static int ivpu_wait_for_ready(struct ivpu_device *vdev) } if (!ret) - ivpu_info(vdev, "VPU ready message received successfully\n"); + ivpu_dbg(vdev, PM, "VPU ready message received successfully\n"); else ivpu_hw_diagnose_failure(vdev); @@ -634,6 +634,7 @@ static void ivpu_dev_fini(struct ivpu_device *vdev) static struct pci_device_id ivpu_pci_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_ARL) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) }, { } }; diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h index 9e8c075fe9ef..03b3d6532fb6 100644 --- a/drivers/accel/ivpu/ivpu_drv.h +++ b/drivers/accel/ivpu/ivpu_drv.h @@ -23,6 +23,7 @@ #define DRIVER_DATE "20230117" #define PCI_DEVICE_ID_MTL 0x7d1d +#define PCI_DEVICE_ID_ARL 0xad1d #define PCI_DEVICE_ID_LNL 0x643e #define IVPU_HW_37XX 37 @@ -165,6 +166,7 @@ static inline int ivpu_hw_gen(struct ivpu_device *vdev) { switch (ivpu_device_id(vdev)) { case PCI_DEVICE_ID_MTL: + case PCI_DEVICE_ID_ARL: return IVPU_HW_37XX; case PCI_DEVICE_ID_LNL: return IVPU_HW_40XX; diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c index 9827ea4d7b83..0191cf8e5964 100644 --- a/drivers/accel/ivpu/ivpu_fw.c +++ b/drivers/accel/ivpu/ivpu_fw.c @@ -220,7 +220,8 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev) if (ret) return ret; - fw->mem = ivpu_bo_alloc_internal(vdev, fw->runtime_addr, fw->runtime_size, DRM_IVPU_BO_WC); + fw->mem = ivpu_bo_alloc_internal(vdev, fw->runtime_addr, fw->runtime_size, + DRM_IVPU_BO_CACHED | DRM_IVPU_BO_NOSNOOP); if (!fw->mem) { ivpu_err(vdev, "Failed to allocate firmware runtime memory\n"); return -ENOMEM; @@ -330,7 +331,7 @@ int ivpu_fw_load(struct ivpu_device *vdev) memset(start, 0, size); } - wmb(); /* Flush WC buffers after writing fw->mem */ + clflush_cache_range(fw->mem->kvaddr, fw->mem->base.size); return 0; } @@ -432,6 +433,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params if (!ivpu_fw_is_cold_boot(vdev)) { boot_params->save_restore_ret_address = 0; vdev->pm->is_warmboot = true; + clflush_cache_range(vdev->fw->mem->kvaddr, SZ_4K); return; } @@ -493,7 +495,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params boot_params->punit_telemetry_sram_size = ivpu_hw_reg_telemetry_size_get(vdev); boot_params->vpu_telemetry_enable = ivpu_hw_reg_telemetry_enable_get(vdev); - wmb(); /* Flush WC buffers after writing bootparams */ + clflush_cache_range(vdev->fw->mem->kvaddr, SZ_4K); ivpu_fw_boot_params_print(vdev, boot_params); } diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h index 6b0ceda5f253..f4130586ff1b 100644 --- a/drivers/accel/ivpu/ivpu_gem.h +++ b/drivers/accel/ivpu/ivpu_gem.h @@ -8,6 +8,8 @@ #include <drm/drm_gem.h> #include <drm/drm_mm.h> +#define DRM_IVPU_BO_NOSNOOP 0x10000000 + struct dma_buf; struct ivpu_bo_ops; struct ivpu_file_priv; @@ -83,6 +85,9 @@ static inline u32 ivpu_bo_cache_mode(struct ivpu_bo *bo) static inline bool ivpu_bo_is_snooped(struct ivpu_bo *bo) { + if (bo->flags & DRM_IVPU_BO_NOSNOOP) + return false; + return ivpu_bo_cache_mode(bo) == DRM_IVPU_BO_CACHED; } diff --git a/drivers/accel/ivpu/ivpu_hw_40xx.c b/drivers/accel/ivpu/ivpu_hw_40xx.c index 00c5dbbe6847..8bdb59a45da6 100644 --- a/drivers/accel/ivpu/ivpu_hw_40xx.c +++ b/drivers/accel/ivpu/ivpu_hw_40xx.c @@ -57,8 +57,7 @@ #define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK) -#define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)) | \ - (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \ +#define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \ (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI0_ERR)) | \ (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI1_ERR)) | \ (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR0_ERR)) | \ @@ -196,6 +195,14 @@ static int ivpu_pll_wait_for_status_ready(struct ivpu_device *vdev) return REGB_POLL_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, READY, 1, PLL_TIMEOUT_US); } +static int ivpu_wait_for_clock_own_resource_ack(struct ivpu_device *vdev) +{ + if (ivpu_is_simics(vdev)) + return 0; + + return REGB_POLL_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, CLOCK_RESOURCE_OWN_ACK, 1, TIMEOUT_US); +} + static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev) { struct ivpu_hw_info *hw = vdev->hw; @@ -556,6 +563,12 @@ static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev) { int ret; + ret = ivpu_wait_for_clock_own_resource_ack(vdev); + if (ret) { + ivpu_err(vdev, "Timed out waiting for clock own resource ACK\n"); + return ret; + } + ivpu_boot_pwr_island_trickle_drive(vdev, true); ivpu_boot_pwr_island_drive(vdev, true); @@ -1046,9 +1059,6 @@ static irqreturn_t ivpu_hw_40xx_irqb_handler(struct ivpu_device *vdev, int irq) if (status == 0) return IRQ_NONE; - /* Disable global interrupt before handling local buttress interrupts */ - REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x1); - if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status)) ivpu_dbg(vdev, IRQ, "FREQ_CHANGE"); @@ -1096,9 +1106,6 @@ static irqreturn_t ivpu_hw_40xx_irqb_handler(struct ivpu_device *vdev, int irq) /* This must be done after interrupts are cleared at the source. */ REGB_WR32(VPU_40XX_BUTTRESS_INTERRUPT_STAT, status); - /* Re-enable global interrupt */ - REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x0); - if (schedule_recovery) ivpu_pm_schedule_recovery(vdev); @@ -1110,9 +1117,14 @@ static irqreturn_t ivpu_hw_40xx_irq_handler(int irq, void *ptr) struct ivpu_device *vdev = ptr; irqreturn_t ret = IRQ_NONE; + REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x1); + ret |= ivpu_hw_40xx_irqv_handler(vdev, irq); ret |= ivpu_hw_40xx_irqb_handler(vdev, irq); + /* Re-enable global interrupts to re-trigger MSI for pending interrupts */ + REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x0); + if (ret & IRQ_WAKE_THREAD) return IRQ_WAKE_THREAD; diff --git a/drivers/accel/ivpu/ivpu_hw_40xx_reg.h b/drivers/accel/ivpu/ivpu_hw_40xx_reg.h index 5139cfe88532..ff4a5d4f5821 100644 --- a/drivers/accel/ivpu/ivpu_hw_40xx_reg.h +++ b/drivers/accel/ivpu/ivpu_hw_40xx_reg.h @@ -70,6 +70,8 @@ #define VPU_40XX_BUTTRESS_VPU_STATUS_READY_MASK BIT_MASK(0) #define VPU_40XX_BUTTRESS_VPU_STATUS_IDLE_MASK BIT_MASK(1) #define VPU_40XX_BUTTRESS_VPU_STATUS_DUP_IDLE_MASK BIT_MASK(2) +#define VPU_40XX_BUTTRESS_VPU_STATUS_CLOCK_RESOURCE_OWN_ACK_MASK BIT_MASK(6) +#define VPU_40XX_BUTTRESS_VPU_STATUS_POWER_RESOURCE_OWN_ACK_MASK BIT_MASK(7) #define VPU_40XX_BUTTRESS_VPU_STATUS_PERF_CLK_MASK BIT_MASK(11) #define VPU_40XX_BUTTRESS_VPU_STATUS_DISABLE_CLK_RELINQUISH_MASK BIT_MASK(12) diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c index fa0af59e39ab..295c0d7b5039 100644 --- a/drivers/accel/ivpu/ivpu_ipc.c +++ b/drivers/accel/ivpu/ivpu_ipc.c @@ -209,10 +209,10 @@ int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct ivpu_ipc_rx_msg *rx_msg; int wait_ret, ret = 0; - wait_ret = wait_event_interruptible_timeout(cons->rx_msg_wq, - (IS_KTHREAD() && kthread_should_stop()) || - !list_empty(&cons->rx_msg_list), - msecs_to_jiffies(timeout_ms)); + wait_ret = wait_event_timeout(cons->rx_msg_wq, + (IS_KTHREAD() && kthread_should_stop()) || + !list_empty(&cons->rx_msg_list), + msecs_to_jiffies(timeout_ms)); if (IS_KTHREAD() && kthread_should_stop()) return -EINTR; @@ -220,9 +220,6 @@ int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, if (wait_ret == 0) return -ETIMEDOUT; - if (wait_ret < 0) - return -ERESTARTSYS; - spin_lock_irq(&cons->rx_msg_lock); rx_msg = list_first_entry_or_null(&cons->rx_msg_list, struct ivpu_ipc_rx_msg, link); if (!rx_msg) { diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index 948e31f7ce6e..b411948594ff 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -2057,7 +2057,9 @@ static int acpi_video_bus_add(struct acpi_device *device) !auto_detect) acpi_video_bus_register_backlight(video); - acpi_video_bus_add_notify_handler(video); + error = acpi_video_bus_add_notify_handler(video); + if (error) + goto err_del; error = acpi_dev_install_notify_handler(device, ACPI_DEVICE_NOTIFY, acpi_video_bus_notify); @@ -2067,10 +2069,11 @@ static int acpi_video_bus_add(struct acpi_device *device) return 0; err_remove: + acpi_video_bus_remove_notify_handler(video); +err_del: mutex_lock(&video_list_lock); list_del(&video->entry); mutex_unlock(&video_list_lock); - acpi_video_bus_remove_notify_handler(video); acpi_video_bus_unregister_backlight(video); err_put_video: acpi_video_bus_put_devices(video); diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index f0e6738ae3c9..f96bf32cd368 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -855,7 +855,7 @@ static size_t sizeof_idt(struct acpi_nfit_interleave *idt) { if (idt->header.length < sizeof(*idt)) return 0; - return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1); + return sizeof(*idt) + sizeof(u32) * idt->line_count; } static bool add_idt(struct acpi_nfit_desc *acpi_desc, diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index dc615ef6550a..3a34a8c425fe 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -1217,8 +1217,7 @@ static int acpi_processor_setup_lpi_states(struct acpi_processor *pr) strscpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN); state->exit_latency = lpi->wake_latency; state->target_residency = lpi->min_residency; - if (lpi->arch_flags) - state->flags |= CPUIDLE_FLAG_TIMER_STOP; + state->flags |= arch_get_idle_state_flags(lpi->arch_flags); if (i != 0 && lpi->entry_method == ACPI_CSTATE_FFH) state->flags |= CPUIDLE_FLAG_RCU_IDLE; state->enter = acpi_idle_lpi_enter; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 0072e0f9ad39..d8cc1e27a125 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -1973,6 +1973,96 @@ retry: } /** + * ata_dev_power_set_standby - Set a device power mode to standby + * @dev: target device + * + * Issue a STANDBY IMMEDIATE command to set a device power mode to standby. + * For an HDD device, this spins down the disks. + * + * LOCKING: + * Kernel thread context (may sleep). + */ +void ata_dev_power_set_standby(struct ata_device *dev) +{ + unsigned long ap_flags = dev->link->ap->flags; + struct ata_taskfile tf; + unsigned int err_mask; + + /* Issue STANDBY IMMEDIATE command only if supported by the device */ + if (dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) + return; + + /* + * Some odd clown BIOSes issue spindown on power off (ACPI S4 or S5) + * causing some drives to spin up and down again. For these, do nothing + * if we are being called on shutdown. + */ + if ((ap_flags & ATA_FLAG_NO_POWEROFF_SPINDOWN) && + system_state == SYSTEM_POWER_OFF) + return; + + if ((ap_flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) && + system_entering_hibernation()) + return; + + ata_tf_init(dev, &tf); + tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; + tf.protocol = ATA_PROT_NODATA; + tf.command = ATA_CMD_STANDBYNOW1; + + ata_dev_notice(dev, "Entering standby power mode\n"); + + err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); + if (err_mask) + ata_dev_err(dev, "STANDBY IMMEDIATE failed (err_mask=0x%x)\n", + err_mask); +} + +/** + * ata_dev_power_set_active - Set a device power mode to active + * @dev: target device + * + * Issue a VERIFY command to enter to ensure that the device is in the + * active power mode. For a spun-down HDD (standby or idle power mode), + * the VERIFY command will complete after the disk spins up. + * + * LOCKING: + * Kernel thread context (may sleep). + */ +void ata_dev_power_set_active(struct ata_device *dev) +{ + struct ata_taskfile tf; + unsigned int err_mask; + + /* + * Issue READ VERIFY SECTORS command for 1 sector at lba=0 only + * if supported by the device. + */ + if (dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) + return; + + ata_tf_init(dev, &tf); + tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; + tf.protocol = ATA_PROT_NODATA; + tf.command = ATA_CMD_VERIFY; + tf.nsect = 1; + if (dev->flags & ATA_DFLAG_LBA) { + tf.flags |= ATA_TFLAG_LBA; + tf.device |= ATA_LBA; + } else { + /* CHS */ + tf.lbal = 0x1; /* sect */ + } + + ata_dev_notice(dev, "Entering active power mode\n"); + + err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); + if (err_mask) + ata_dev_err(dev, "VERIFY failed (err_mask=0x%x)\n", + err_mask); +} + +/** * ata_read_log_page - read a specific log page * @dev: target device * @log: log to read @@ -2529,7 +2619,7 @@ static int ata_dev_config_lba(struct ata_device *dev) { const u16 *id = dev->id; const char *lba_desc; - char ncq_desc[24]; + char ncq_desc[32]; int ret; dev->flags |= ATA_DFLAG_LBA; @@ -5037,17 +5127,19 @@ static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg, struct ata_link *link; unsigned long flags; - /* Previous resume operation might still be in - * progress. Wait for PM_PENDING to clear. + spin_lock_irqsave(ap->lock, flags); + + /* + * A previous PM operation might still be in progress. Wait for + * ATA_PFLAG_PM_PENDING to clear. */ if (ap->pflags & ATA_PFLAG_PM_PENDING) { + spin_unlock_irqrestore(ap->lock, flags); ata_port_wait_eh(ap); - WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); + spin_lock_irqsave(ap->lock, flags); } - /* request PM ops to EH */ - spin_lock_irqsave(ap->lock, flags); - + /* Request PM operation to EH */ ap->pm_mesg = mesg; ap->pflags |= ATA_PFLAG_PM_PENDING; ata_for_each_link(link, ap, HOST_FIRST) { @@ -5059,10 +5151,8 @@ static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg, spin_unlock_irqrestore(ap->lock, flags); - if (!async) { + if (!async) ata_port_wait_eh(ap); - WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); - } } /* @@ -5078,11 +5168,27 @@ static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg) { + /* + * We are about to suspend the port, so we do not care about + * scsi_rescan_device() calls scheduled by previous resume operations. + * The next resume will schedule the rescan again. So cancel any rescan + * that is not done yet. + */ + cancel_delayed_work_sync(&ap->scsi_rescan_task); + ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false); } static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg) { + /* + * We are about to suspend the port, so we do not care about + * scsi_rescan_device() calls scheduled by previous resume operations. + * The next resume will schedule the rescan again. So cancel any rescan + * that is not done yet. + */ + cancel_delayed_work_sync(&ap->scsi_rescan_task); + ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true); } @@ -5229,7 +5335,7 @@ EXPORT_SYMBOL_GPL(ata_host_resume); #endif const struct device_type ata_port_type = { - .name = "ata_port", + .name = ATA_PORT_TYPE_NAME, #ifdef CONFIG_PM .pm = &ata_port_pm_ops, #endif @@ -5948,11 +6054,30 @@ static void ata_port_detach(struct ata_port *ap) struct ata_link *link; struct ata_device *dev; - /* tell EH we're leaving & flush EH */ + /* Wait for any ongoing EH */ + ata_port_wait_eh(ap); + + mutex_lock(&ap->scsi_scan_mutex); spin_lock_irqsave(ap->lock, flags); + + /* Remove scsi devices */ + ata_for_each_link(link, ap, HOST_FIRST) { + ata_for_each_dev(dev, link, ALL) { + if (dev->sdev) { + spin_unlock_irqrestore(ap->lock, flags); + scsi_remove_device(dev->sdev); + spin_lock_irqsave(ap->lock, flags); + dev->sdev = NULL; + } + } + } + + /* Tell EH to disable all devices */ ap->pflags |= ATA_PFLAG_UNLOADING; ata_port_schedule_eh(ap); + spin_unlock_irqrestore(ap->lock, flags); + mutex_unlock(&ap->scsi_scan_mutex); /* wait till EH commits suicide */ ata_port_wait_eh(ap); diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 4cf4f57e57b8..5686353e442c 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -147,6 +147,8 @@ ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = { .timeouts = ata_eh_other_timeouts, }, { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT), .timeouts = ata_eh_flush_timeouts }, + { .commands = CMDS(ATA_CMD_VERIFY), + .timeouts = ata_eh_reset_timeouts }, }; #undef CMDS @@ -498,7 +500,19 @@ static void ata_eh_unload(struct ata_port *ap) struct ata_device *dev; unsigned long flags; - /* Restore SControl IPM and SPD for the next driver and + /* + * Unless we are restarting, transition all enabled devices to + * standby power mode. + */ + if (system_state != SYSTEM_RESTART) { + ata_for_each_link(link, ap, PMP_FIRST) { + ata_for_each_dev(dev, link, ENABLED) + ata_dev_power_set_standby(dev); + } + } + + /* + * Restore SControl IPM and SPD for the next driver and * disable attached devices. */ ata_for_each_link(link, ap, PMP_FIRST) { @@ -684,6 +698,10 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap) ehc->saved_xfer_mode[devno] = dev->xfer_mode; if (ata_ncq_enabled(dev)) ehc->saved_ncq_enabled |= 1 << devno; + + /* If we are resuming, wake up the device */ + if (ap->pflags & ATA_PFLAG_RESUMING) + ehc->i.dev_action[devno] |= ATA_EH_SET_ACTIVE; } } @@ -743,6 +761,8 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap) /* clean up */ spin_lock_irqsave(ap->lock, flags); + ap->pflags &= ~ATA_PFLAG_RESUMING; + if (ap->pflags & ATA_PFLAG_LOADING) ap->pflags &= ~ATA_PFLAG_LOADING; else if ((ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) && @@ -1218,6 +1238,13 @@ void ata_eh_detach_dev(struct ata_device *dev) struct ata_eh_context *ehc = &link->eh_context; unsigned long flags; + /* + * If the device is still enabled, transition it to standby power mode + * (i.e. spin down HDDs). + */ + if (ata_dev_enabled(dev)) + ata_dev_power_set_standby(dev); + ata_dev_disable(dev); spin_lock_irqsave(ap->lock, flags); @@ -2305,7 +2332,7 @@ static void ata_eh_link_report(struct ata_link *link) struct ata_eh_context *ehc = &link->eh_context; struct ata_queued_cmd *qc; const char *frozen, *desc; - char tries_buf[6] = ""; + char tries_buf[16] = ""; int tag, nr_failed = 0; if (ehc->i.flags & ATA_EHI_QUIET) @@ -3016,6 +3043,15 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link, if (ehc->i.flags & ATA_EHI_DID_RESET) readid_flags |= ATA_READID_POSTRESET; + /* + * When resuming, before executing any command, make sure to + * transition the device to the active power mode. + */ + if ((action & ATA_EH_SET_ACTIVE) && ata_dev_enabled(dev)) { + ata_dev_power_set_active(dev); + ata_eh_done(link, dev, ATA_EH_SET_ACTIVE); + } + if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) { WARN_ON(dev->class == ATA_DEV_PMP); @@ -3989,6 +4025,7 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap) unsigned long flags; int rc = 0; struct ata_device *dev; + struct ata_link *link; /* are we suspending? */ spin_lock_irqsave(ap->lock, flags); @@ -4001,6 +4038,12 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap) WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED); + /* Set all devices attached to the port in standby mode */ + ata_for_each_link(link, ap, HOST_FIRST) { + ata_for_each_dev(dev, link, ENABLED) + ata_dev_power_set_standby(dev); + } + /* * If we have a ZPODD attached, check its zero * power ready status before the port is frozen. @@ -4083,6 +4126,7 @@ static void ata_eh_handle_port_resume(struct ata_port *ap) /* update the flags */ spin_lock_irqsave(ap->lock, flags); ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED); + ap->pflags |= ATA_PFLAG_RESUMING; spin_unlock_irqrestore(ap->lock, flags); } #endif /* CONFIG_PM */ diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index d3f28b82c97b..a371b497035e 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1050,14 +1050,13 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev) } } else { sdev->sector_size = ata_id_logical_sector_size(dev->id); + /* - * Stop the drive on suspend but do not issue START STOP UNIT - * on resume as this is not necessary and may fail: the device - * will be woken up by ata_port_pm_resume() with a port reset - * and device revalidation. + * Ask the sd driver to issue START STOP UNIT on runtime suspend + * and resume only. For system level suspend/resume, devices + * power state is handled directly by libata EH. */ - sdev->manage_start_stop = 1; - sdev->no_start_on_resume = 1; + sdev->manage_runtime_start_stop = true; } /* @@ -1090,6 +1089,42 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev) } /** + * ata_scsi_slave_alloc - Early setup of SCSI device + * @sdev: SCSI device to examine + * + * This is called from scsi_alloc_sdev() when the scsi device + * associated with an ATA device is scanned on a port. + * + * LOCKING: + * Defined by SCSI layer. We don't really care. + */ + +int ata_scsi_slave_alloc(struct scsi_device *sdev) +{ + struct ata_port *ap = ata_shost_to_port(sdev->host); + struct device_link *link; + + ata_scsi_sdev_config(sdev); + + /* + * Create a link from the ata_port device to the scsi device to ensure + * that PM does suspend/resume in the correct order: the scsi device is + * consumer (child) and the ata port the supplier (parent). + */ + link = device_link_add(&sdev->sdev_gendev, &ap->tdev, + DL_FLAG_STATELESS | + DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE); + if (!link) { + ata_port_err(ap, "Failed to create link to scsi device %s\n", + dev_name(&sdev->sdev_gendev)); + return -ENODEV; + } + + return 0; +} +EXPORT_SYMBOL_GPL(ata_scsi_slave_alloc); + +/** * ata_scsi_slave_config - Set SCSI device attributes * @sdev: SCSI device to examine * @@ -1105,14 +1140,11 @@ int ata_scsi_slave_config(struct scsi_device *sdev) { struct ata_port *ap = ata_shost_to_port(sdev->host); struct ata_device *dev = __ata_scsi_find_dev(ap, sdev); - int rc = 0; - - ata_scsi_sdev_config(sdev); if (dev) - rc = ata_scsi_dev_config(sdev, dev); + return ata_scsi_dev_config(sdev, dev); - return rc; + return 0; } EXPORT_SYMBOL_GPL(ata_scsi_slave_config); @@ -1136,6 +1168,8 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev) unsigned long flags; struct ata_device *dev; + device_link_remove(&sdev->sdev_gendev, &ap->tdev); + spin_lock_irqsave(ap->lock, flags); dev = __ata_scsi_find_dev(ap, sdev); if (dev && dev->sdev) { @@ -1195,7 +1229,7 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc) } if (cdb[4] & 0x1) { - tf->nsect = 1; /* 1 sector, lba=0 */ + tf->nsect = 1; /* 1 sector, lba=0 */ if (qc->dev->flags & ATA_DFLAG_LBA) { tf->flags |= ATA_TFLAG_LBA; @@ -1211,7 +1245,7 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc) tf->lbah = 0x0; /* cyl high */ } - tf->command = ATA_CMD_VERIFY; /* READ VERIFY */ + tf->command = ATA_CMD_VERIFY; /* READ VERIFY */ } else { /* Some odd clown BIOSen issue spindown on power off (ACPI S4 * or S5) causing some drives to spin up and down again. @@ -1221,7 +1255,7 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc) goto skip; if ((qc->ap->flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) && - system_entering_hibernation()) + system_entering_hibernation()) goto skip; /* Issue ATA STANDBY IMMEDIATE command */ @@ -1835,6 +1869,9 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf) hdr[2] = 0x7; /* claim SPC-5 version compatibility */ } + if (args->dev->flags & ATA_DFLAG_CDL) + hdr[2] = 0xd; /* claim SPC-6 version compatibility */ + memcpy(rbuf, hdr, sizeof(hdr)); memcpy(&rbuf[8], "ATA ", 8); ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16); @@ -4312,7 +4349,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd) break; case MAINTENANCE_IN: - if (scsicmd[1] == MI_REPORT_SUPPORTED_OPERATION_CODES) + if ((scsicmd[1] & 0x1f) == MI_REPORT_SUPPORTED_OPERATION_CODES) ata_scsi_rbuf_fill(&args, ata_scsiop_maint_in); else ata_scsi_set_invalid_field(dev, cmd, 1, 0xff); @@ -4722,7 +4759,7 @@ void ata_scsi_dev_rescan(struct work_struct *work) struct ata_link *link; struct ata_device *dev; unsigned long flags; - bool delay_rescan = false; + int ret = 0; mutex_lock(&ap->scsi_scan_mutex); spin_lock_irqsave(ap->lock, flags); @@ -4731,37 +4768,34 @@ void ata_scsi_dev_rescan(struct work_struct *work) ata_for_each_dev(dev, link, ENABLED) { struct scsi_device *sdev = dev->sdev; + /* + * If the port was suspended before this was scheduled, + * bail out. + */ + if (ap->pflags & ATA_PFLAG_SUSPENDED) + goto unlock; + if (!sdev) continue; if (scsi_device_get(sdev)) continue; - /* - * If the rescan work was scheduled because of a resume - * event, the port is already fully resumed, but the - * SCSI device may not yet be fully resumed. In such - * case, executing scsi_rescan_device() may cause a - * deadlock with the PM code on device_lock(). Prevent - * this by giving up and retrying rescan after a short - * delay. - */ - delay_rescan = sdev->sdev_gendev.power.is_suspended; - if (delay_rescan) { - scsi_device_put(sdev); - break; - } - spin_unlock_irqrestore(ap->lock, flags); - scsi_rescan_device(sdev); + ret = scsi_rescan_device(sdev); scsi_device_put(sdev); spin_lock_irqsave(ap->lock, flags); + + if (ret) + goto unlock; } } +unlock: spin_unlock_irqrestore(ap->lock, flags); mutex_unlock(&ap->scsi_scan_mutex); - if (delay_rescan) + /* Reschedule with a delay if scsi_rescan_device() returned an error */ + if (ret) schedule_delayed_work(&ap->scsi_rescan_task, msecs_to_jiffies(5)); } diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c index e4fb9d1b9b39..3e49a877500e 100644 --- a/drivers/ata/libata-transport.c +++ b/drivers/ata/libata-transport.c @@ -266,6 +266,10 @@ void ata_tport_delete(struct ata_port *ap) put_device(dev); } +static const struct device_type ata_port_sas_type = { + .name = ATA_PORT_TYPE_NAME, +}; + /** ata_tport_add - initialize a transport ATA port structure * * @parent: parent device @@ -283,7 +287,10 @@ int ata_tport_add(struct device *parent, struct device *dev = &ap->tdev; device_initialize(dev); - dev->type = &ata_port_type; + if (ap->flags & ATA_FLAG_SAS_HOST) + dev->type = &ata_port_sas_type; + else + dev->type = &ata_port_type; dev->parent = parent; ata_host_get(ap->host); diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index 6e7d352803bd..05ac80da8ebc 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h @@ -30,6 +30,8 @@ enum { ATA_DNXFER_QUIET = (1 << 31), }; +#define ATA_PORT_TYPE_NAME "ata_port" + extern atomic_t ata_print_id; extern int atapi_passthru16; extern int libata_fua; @@ -60,6 +62,8 @@ extern int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags); extern int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, unsigned int readid_flags); extern int ata_dev_configure(struct ata_device *dev); +extern void ata_dev_power_set_standby(struct ata_device *dev); +extern void ata_dev_power_set_active(struct ata_device *dev); extern int sata_down_spd_limit(struct ata_link *link, u32 spd_limit); extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel); extern unsigned int ata_dev_set_feature(struct ata_device *dev, diff --git a/drivers/ata/pata_parport/fit3.c b/drivers/ata/pata_parport/fit3.c index bad7aa920cdc..d2b81cf2e16d 100644 --- a/drivers/ata/pata_parport/fit3.c +++ b/drivers/ata/pata_parport/fit3.c @@ -9,11 +9,6 @@ * * The TD-2000 and certain older devices use a different protocol. * Try the fit2 protocol module with them. - * - * NB: The FIT adapters do not appear to support the control - * registers. So, we map ALT_STATUS to STATUS and NO-OP writes - * to the device control register - this means that IDE reset - * will not work on these devices. */ #include <linux/module.h> @@ -37,8 +32,7 @@ static void fit3_write_regr(struct pi_adapter *pi, int cont, int regr, int val) { - if (cont == 1) - return; + regr += cont << 3; switch (pi->mode) { case 0: @@ -59,11 +53,7 @@ static int fit3_read_regr(struct pi_adapter *pi, int cont, int regr) { int a, b; - if (cont) { - if (regr != 6) - return 0xff; - regr = 7; - } + regr += cont << 3; switch (pi->mode) { case 0: diff --git a/drivers/ata/pata_parport/pata_parport.c b/drivers/ata/pata_parport/pata_parport.c index 1af64d435d3c..a7adfdcb5e27 100644 --- a/drivers/ata/pata_parport/pata_parport.c +++ b/drivers/ata/pata_parport/pata_parport.c @@ -51,6 +51,13 @@ static void pata_parport_dev_select(struct ata_port *ap, unsigned int device) ata_sff_pause(ap); } +static void pata_parport_set_devctl(struct ata_port *ap, u8 ctl) +{ + struct pi_adapter *pi = ap->host->private_data; + + pi->proto->write_regr(pi, 1, 6, ctl); +} + static bool pata_parport_devchk(struct ata_port *ap, unsigned int device) { struct pi_adapter *pi = ap->host->private_data; @@ -64,7 +71,7 @@ static bool pata_parport_devchk(struct ata_port *ap, unsigned int device) pi->proto->write_regr(pi, 0, ATA_REG_NSECT, 0xaa); pi->proto->write_regr(pi, 0, ATA_REG_LBAL, 0x55); - pi->proto->write_regr(pi, 0, ATA_REG_NSECT, 055); + pi->proto->write_regr(pi, 0, ATA_REG_NSECT, 0x55); pi->proto->write_regr(pi, 0, ATA_REG_LBAL, 0xaa); nsect = pi->proto->read_regr(pi, 0, ATA_REG_NSECT); @@ -73,6 +80,72 @@ static bool pata_parport_devchk(struct ata_port *ap, unsigned int device) return (nsect == 0x55) && (lbal == 0xaa); } +static int pata_parport_wait_after_reset(struct ata_link *link, + unsigned int devmask, + unsigned long deadline) +{ + struct ata_port *ap = link->ap; + struct pi_adapter *pi = ap->host->private_data; + unsigned int dev0 = devmask & (1 << 0); + unsigned int dev1 = devmask & (1 << 1); + int rc, ret = 0; + + ata_msleep(ap, ATA_WAIT_AFTER_RESET); + + /* always check readiness of the master device */ + rc = ata_sff_wait_ready(link, deadline); + if (rc) { + /* + * some adapters return bogus values if master device is not + * present, so don't abort now if a slave device is present + */ + if (!dev1) + return rc; + ret = -ENODEV; + } + + /* + * if device 1 was found in ata_devchk, wait for register + * access briefly, then wait for BSY to clear. + */ + if (dev1) { + int i; + + pata_parport_dev_select(ap, 1); + + /* + * Wait for register access. Some ATAPI devices fail + * to set nsect/lbal after reset, so don't waste too + * much time on it. We're gonna wait for !BSY anyway. + */ + for (i = 0; i < 2; i++) { + u8 nsect, lbal; + + nsect = pi->proto->read_regr(pi, 0, ATA_REG_NSECT); + lbal = pi->proto->read_regr(pi, 0, ATA_REG_LBAL); + if (nsect == 1 && lbal == 1) + break; + /* give drive a breather */ + ata_msleep(ap, 50); + } + + rc = ata_sff_wait_ready(link, deadline); + if (rc) { + if (rc != -ENODEV) + return rc; + ret = rc; + } + } + + pata_parport_dev_select(ap, 0); + if (dev1) + pata_parport_dev_select(ap, 1); + if (dev0) + pata_parport_dev_select(ap, 0); + + return ret; +} + static int pata_parport_bus_softreset(struct ata_port *ap, unsigned int devmask, unsigned long deadline) { @@ -87,7 +160,7 @@ static int pata_parport_bus_softreset(struct ata_port *ap, unsigned int devmask, ap->last_ctl = ap->ctl; /* wait the port to become ready */ - return ata_sff_wait_after_reset(&ap->link, devmask, deadline); + return pata_parport_wait_after_reset(&ap->link, devmask, deadline); } static int pata_parport_softreset(struct ata_link *link, unsigned int *classes, @@ -252,6 +325,7 @@ static struct ata_port_operations pata_parport_port_ops = { .hardreset = NULL, .sff_dev_select = pata_parport_dev_select, + .sff_set_devctl = pata_parport_set_devctl, .sff_check_status = pata_parport_check_status, .sff_check_altstatus = pata_parport_check_altstatus, .sff_tf_load = pata_parport_tf_load, diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index db716ffd083e..3db88bbcae0f 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c @@ -453,7 +453,8 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg, if (!rbnode) return -ENOMEM; regcache_rbtree_set_register(map, rbnode, - reg - rbnode->base_reg, value); + (reg - rbnode->base_reg) / map->reg_stride, + value); regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode); rbtree_ctx->cached_rbnode = rbnode; } diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index df1cd0f718b8..800f131222fc 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -1436,8 +1436,9 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd) static void nbd_clear_sock_ioctl(struct nbd_device *nbd) { - blk_mark_disk_dead(nbd->disk); nbd_clear_sock(nbd); + disk_force_media_change(nbd->disk); + nbd_bdev_reset(nbd); if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF, &nbd->config->runtime_flags)) nbd_config_put(nbd); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 3de11f077144..a999b698b131 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -632,9 +632,8 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...) static void rbd_dev_remove_parent(struct rbd_device *rbd_dev); static int rbd_dev_refresh(struct rbd_device *rbd_dev); -static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev); -static int rbd_dev_header_info(struct rbd_device *rbd_dev); -static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev); +static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev, + struct rbd_image_header *header); static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u64 snap_id); static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id, @@ -995,15 +994,24 @@ static void rbd_init_layout(struct rbd_device *rbd_dev) RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL); } +static void rbd_image_header_cleanup(struct rbd_image_header *header) +{ + kfree(header->object_prefix); + ceph_put_snap_context(header->snapc); + kfree(header->snap_sizes); + kfree(header->snap_names); + + memset(header, 0, sizeof(*header)); +} + /* * Fill an rbd image header with information from the given format 1 * on-disk header. */ -static int rbd_header_from_disk(struct rbd_device *rbd_dev, - struct rbd_image_header_ondisk *ondisk) +static int rbd_header_from_disk(struct rbd_image_header *header, + struct rbd_image_header_ondisk *ondisk, + bool first_time) { - struct rbd_image_header *header = &rbd_dev->header; - bool first_time = header->object_prefix == NULL; struct ceph_snap_context *snapc; char *object_prefix = NULL; char *snap_names = NULL; @@ -1070,11 +1078,6 @@ static int rbd_header_from_disk(struct rbd_device *rbd_dev, if (first_time) { header->object_prefix = object_prefix; header->obj_order = ondisk->options.order; - rbd_init_layout(rbd_dev); - } else { - ceph_put_snap_context(header->snapc); - kfree(header->snap_names); - kfree(header->snap_sizes); } /* The remaining fields always get updated (when we refresh) */ @@ -4859,7 +4862,9 @@ out_req: * return, the rbd_dev->header field will contain up-to-date * information about the image. */ -static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev) +static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev, + struct rbd_image_header *header, + bool first_time) { struct rbd_image_header_ondisk *ondisk = NULL; u32 snap_count = 0; @@ -4907,7 +4912,7 @@ static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev) snap_count = le32_to_cpu(ondisk->snap_count); } while (snap_count != want_count); - ret = rbd_header_from_disk(rbd_dev, ondisk); + ret = rbd_header_from_disk(header, ondisk, first_time); out: kfree(ondisk); @@ -4931,39 +4936,6 @@ static void rbd_dev_update_size(struct rbd_device *rbd_dev) } } -static int rbd_dev_refresh(struct rbd_device *rbd_dev) -{ - u64 mapping_size; - int ret; - - down_write(&rbd_dev->header_rwsem); - mapping_size = rbd_dev->mapping.size; - - ret = rbd_dev_header_info(rbd_dev); - if (ret) - goto out; - - /* - * If there is a parent, see if it has disappeared due to the - * mapped image getting flattened. - */ - if (rbd_dev->parent) { - ret = rbd_dev_v2_parent_info(rbd_dev); - if (ret) - goto out; - } - - rbd_assert(!rbd_is_snap(rbd_dev)); - rbd_dev->mapping.size = rbd_dev->header.image_size; - -out: - up_write(&rbd_dev->header_rwsem); - if (!ret && mapping_size != rbd_dev->mapping.size) - rbd_dev_update_size(rbd_dev); - - return ret; -} - static const struct blk_mq_ops rbd_mq_ops = { .queue_rq = rbd_queue_rq, }; @@ -5503,17 +5475,12 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id, return 0; } -static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev) -{ - return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP, - &rbd_dev->header.obj_order, - &rbd_dev->header.image_size); -} - -static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev) +static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev, + char **pobject_prefix) { size_t size; void *reply_buf; + char *object_prefix; int ret; void *p; @@ -5531,16 +5498,16 @@ static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev) goto out; p = reply_buf; - rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p, - p + ret, NULL, GFP_NOIO); + object_prefix = ceph_extract_encoded_string(&p, p + ret, NULL, + GFP_NOIO); + if (IS_ERR(object_prefix)) { + ret = PTR_ERR(object_prefix); + goto out; + } ret = 0; - if (IS_ERR(rbd_dev->header.object_prefix)) { - ret = PTR_ERR(rbd_dev->header.object_prefix); - rbd_dev->header.object_prefix = NULL; - } else { - dout(" object_prefix = %s\n", rbd_dev->header.object_prefix); - } + *pobject_prefix = object_prefix; + dout(" object_prefix = %s\n", object_prefix); out: kfree(reply_buf); @@ -5591,13 +5558,6 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id, return 0; } -static int rbd_dev_v2_features(struct rbd_device *rbd_dev) -{ - return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP, - rbd_is_ro(rbd_dev), - &rbd_dev->header.features); -} - /* * These are generic image flags, but since they are used only for * object map, store them in rbd_dev->object_map_flags. @@ -5634,6 +5594,14 @@ struct parent_image_info { u64 overlap; }; +static void rbd_parent_info_cleanup(struct parent_image_info *pii) +{ + kfree(pii->pool_ns); + kfree(pii->image_id); + + memset(pii, 0, sizeof(*pii)); +} + /* * The caller is responsible for @pii. */ @@ -5703,6 +5671,9 @@ static int __get_parent_info(struct rbd_device *rbd_dev, if (pii->has_overlap) ceph_decode_64_safe(&p, end, pii->overlap, e_inval); + dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n", + __func__, pii->pool_id, pii->pool_ns, pii->image_id, pii->snap_id, + pii->has_overlap, pii->overlap); return 0; e_inval: @@ -5741,14 +5712,17 @@ static int __get_parent_info_legacy(struct rbd_device *rbd_dev, pii->has_overlap = true; ceph_decode_64_safe(&p, end, pii->overlap, e_inval); + dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n", + __func__, pii->pool_id, pii->pool_ns, pii->image_id, pii->snap_id, + pii->has_overlap, pii->overlap); return 0; e_inval: return -EINVAL; } -static int get_parent_info(struct rbd_device *rbd_dev, - struct parent_image_info *pii) +static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev, + struct parent_image_info *pii) { struct page *req_page, *reply_page; void *p; @@ -5776,7 +5750,7 @@ static int get_parent_info(struct rbd_device *rbd_dev, return ret; } -static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) +static int rbd_dev_setup_parent(struct rbd_device *rbd_dev) { struct rbd_spec *parent_spec; struct parent_image_info pii = { 0 }; @@ -5786,37 +5760,12 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) if (!parent_spec) return -ENOMEM; - ret = get_parent_info(rbd_dev, &pii); + ret = rbd_dev_v2_parent_info(rbd_dev, &pii); if (ret) goto out_err; - dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n", - __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id, - pii.has_overlap, pii.overlap); - - if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) { - /* - * Either the parent never existed, or we have - * record of it but the image got flattened so it no - * longer has a parent. When the parent of a - * layered image disappears we immediately set the - * overlap to 0. The effect of this is that all new - * requests will be treated as if the image had no - * parent. - * - * If !pii.has_overlap, the parent image spec is not - * applicable. It's there to avoid duplication in each - * snapshot record. - */ - if (rbd_dev->parent_overlap) { - rbd_dev->parent_overlap = 0; - rbd_dev_parent_put(rbd_dev); - pr_info("%s: clone image has been flattened\n", - rbd_dev->disk->disk_name); - } - + if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) goto out; /* No parent? No problem. */ - } /* The ceph file layout needs to fit pool id in 32 bits */ @@ -5828,58 +5777,46 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) } /* - * The parent won't change (except when the clone is - * flattened, already handled that). So we only need to - * record the parent spec we have not already done so. + * The parent won't change except when the clone is flattened, + * so we only need to record the parent image spec once. */ - if (!rbd_dev->parent_spec) { - parent_spec->pool_id = pii.pool_id; - if (pii.pool_ns && *pii.pool_ns) { - parent_spec->pool_ns = pii.pool_ns; - pii.pool_ns = NULL; - } - parent_spec->image_id = pii.image_id; - pii.image_id = NULL; - parent_spec->snap_id = pii.snap_id; - - rbd_dev->parent_spec = parent_spec; - parent_spec = NULL; /* rbd_dev now owns this */ + parent_spec->pool_id = pii.pool_id; + if (pii.pool_ns && *pii.pool_ns) { + parent_spec->pool_ns = pii.pool_ns; + pii.pool_ns = NULL; } + parent_spec->image_id = pii.image_id; + pii.image_id = NULL; + parent_spec->snap_id = pii.snap_id; + + rbd_assert(!rbd_dev->parent_spec); + rbd_dev->parent_spec = parent_spec; + parent_spec = NULL; /* rbd_dev now owns this */ /* - * We always update the parent overlap. If it's zero we issue - * a warning, as we will proceed as if there was no parent. + * Record the parent overlap. If it's zero, issue a warning as + * we will proceed as if there is no parent. */ - if (!pii.overlap) { - if (parent_spec) { - /* refresh, careful to warn just once */ - if (rbd_dev->parent_overlap) - rbd_warn(rbd_dev, - "clone now standalone (overlap became 0)"); - } else { - /* initial probe */ - rbd_warn(rbd_dev, "clone is standalone (overlap 0)"); - } - } + if (!pii.overlap) + rbd_warn(rbd_dev, "clone is standalone (overlap 0)"); rbd_dev->parent_overlap = pii.overlap; out: ret = 0; out_err: - kfree(pii.pool_ns); - kfree(pii.image_id); + rbd_parent_info_cleanup(&pii); rbd_spec_put(parent_spec); return ret; } -static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev) +static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev, + u64 *stripe_unit, u64 *stripe_count) { struct { __le64 stripe_unit; __le64 stripe_count; } __attribute__ ((packed)) striping_info_buf = { 0 }; size_t size = sizeof (striping_info_buf); - void *p; int ret; ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, @@ -5891,27 +5828,33 @@ static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev) if (ret < size) return -ERANGE; - p = &striping_info_buf; - rbd_dev->header.stripe_unit = ceph_decode_64(&p); - rbd_dev->header.stripe_count = ceph_decode_64(&p); + *stripe_unit = le64_to_cpu(striping_info_buf.stripe_unit); + *stripe_count = le64_to_cpu(striping_info_buf.stripe_count); + dout(" stripe_unit = %llu stripe_count = %llu\n", *stripe_unit, + *stripe_count); + return 0; } -static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev) +static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev, s64 *data_pool_id) { - __le64 data_pool_id; + __le64 data_pool_buf; int ret; ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, &rbd_dev->header_oloc, "get_data_pool", - NULL, 0, &data_pool_id, sizeof(data_pool_id)); + NULL, 0, &data_pool_buf, + sizeof(data_pool_buf)); + dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret < 0) return ret; - if (ret < sizeof(data_pool_id)) + if (ret < sizeof(data_pool_buf)) return -EBADMSG; - rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id); - WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL); + *data_pool_id = le64_to_cpu(data_pool_buf); + dout(" data_pool_id = %lld\n", *data_pool_id); + WARN_ON(*data_pool_id == CEPH_NOPOOL); + return 0; } @@ -6103,7 +6046,8 @@ out_err: return ret; } -static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev) +static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, + struct ceph_snap_context **psnapc) { size_t size; int ret; @@ -6164,9 +6108,7 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev) for (i = 0; i < snap_count; i++) snapc->snaps[i] = ceph_decode_64(&p); - ceph_put_snap_context(rbd_dev->header.snapc); - rbd_dev->header.snapc = snapc; - + *psnapc = snapc; dout(" snap context seq = %llu, snap_count = %u\n", (unsigned long long)seq, (unsigned int)snap_count); out: @@ -6215,38 +6157,42 @@ out: return snap_name; } -static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev) +static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev, + struct rbd_image_header *header, + bool first_time) { - bool first_time = rbd_dev->header.object_prefix == NULL; int ret; - ret = rbd_dev_v2_image_size(rbd_dev); + ret = _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP, + first_time ? &header->obj_order : NULL, + &header->image_size); if (ret) return ret; if (first_time) { - ret = rbd_dev_v2_header_onetime(rbd_dev); + ret = rbd_dev_v2_header_onetime(rbd_dev, header); if (ret) return ret; } - ret = rbd_dev_v2_snap_context(rbd_dev); - if (ret && first_time) { - kfree(rbd_dev->header.object_prefix); - rbd_dev->header.object_prefix = NULL; - } + ret = rbd_dev_v2_snap_context(rbd_dev, &header->snapc); + if (ret) + return ret; - return ret; + return 0; } -static int rbd_dev_header_info(struct rbd_device *rbd_dev) +static int rbd_dev_header_info(struct rbd_device *rbd_dev, + struct rbd_image_header *header, + bool first_time) { rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); + rbd_assert(!header->object_prefix && !header->snapc); if (rbd_dev->image_format == 1) - return rbd_dev_v1_header_info(rbd_dev); + return rbd_dev_v1_header_info(rbd_dev, header, first_time); - return rbd_dev_v2_header_info(rbd_dev); + return rbd_dev_v2_header_info(rbd_dev, header, first_time); } /* @@ -6734,60 +6680,49 @@ out: */ static void rbd_dev_unprobe(struct rbd_device *rbd_dev) { - struct rbd_image_header *header; - rbd_dev_parent_put(rbd_dev); rbd_object_map_free(rbd_dev); rbd_dev_mapping_clear(rbd_dev); /* Free dynamic fields from the header, then zero it out */ - header = &rbd_dev->header; - ceph_put_snap_context(header->snapc); - kfree(header->snap_sizes); - kfree(header->snap_names); - kfree(header->object_prefix); - memset(header, 0, sizeof (*header)); + rbd_image_header_cleanup(&rbd_dev->header); } -static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev) +static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev, + struct rbd_image_header *header) { int ret; - ret = rbd_dev_v2_object_prefix(rbd_dev); + ret = rbd_dev_v2_object_prefix(rbd_dev, &header->object_prefix); if (ret) - goto out_err; + return ret; /* * Get the and check features for the image. Currently the * features are assumed to never change. */ - ret = rbd_dev_v2_features(rbd_dev); + ret = _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP, + rbd_is_ro(rbd_dev), &header->features); if (ret) - goto out_err; + return ret; /* If the image supports fancy striping, get its parameters */ - if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) { - ret = rbd_dev_v2_striping_info(rbd_dev); - if (ret < 0) - goto out_err; + if (header->features & RBD_FEATURE_STRIPINGV2) { + ret = rbd_dev_v2_striping_info(rbd_dev, &header->stripe_unit, + &header->stripe_count); + if (ret) + return ret; } - if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) { - ret = rbd_dev_v2_data_pool(rbd_dev); + if (header->features & RBD_FEATURE_DATA_POOL) { + ret = rbd_dev_v2_data_pool(rbd_dev, &header->data_pool_id); if (ret) - goto out_err; + return ret; } - rbd_init_layout(rbd_dev); return 0; - -out_err: - rbd_dev->header.features = 0; - kfree(rbd_dev->header.object_prefix); - rbd_dev->header.object_prefix = NULL; - return ret; } /* @@ -6982,13 +6917,15 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth) if (!depth) down_write(&rbd_dev->header_rwsem); - ret = rbd_dev_header_info(rbd_dev); + ret = rbd_dev_header_info(rbd_dev, &rbd_dev->header, true); if (ret) { if (ret == -ENOENT && !need_watch) rbd_print_dne(rbd_dev, false); goto err_out_probe; } + rbd_init_layout(rbd_dev); + /* * If this image is the one being mapped, we have pool name and * id, image name and id, and snap name - need to fill snap id. @@ -7017,7 +6954,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth) } if (rbd_dev->header.features & RBD_FEATURE_LAYERING) { - ret = rbd_dev_v2_parent_info(rbd_dev); + ret = rbd_dev_setup_parent(rbd_dev); if (ret) goto err_out_probe; } @@ -7043,6 +6980,107 @@ err_out_format: return ret; } +static void rbd_dev_update_header(struct rbd_device *rbd_dev, + struct rbd_image_header *header) +{ + rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); + rbd_assert(rbd_dev->header.object_prefix); /* !first_time */ + + if (rbd_dev->header.image_size != header->image_size) { + rbd_dev->header.image_size = header->image_size; + + if (!rbd_is_snap(rbd_dev)) { + rbd_dev->mapping.size = header->image_size; + rbd_dev_update_size(rbd_dev); + } + } + + ceph_put_snap_context(rbd_dev->header.snapc); + rbd_dev->header.snapc = header->snapc; + header->snapc = NULL; + + if (rbd_dev->image_format == 1) { + kfree(rbd_dev->header.snap_names); + rbd_dev->header.snap_names = header->snap_names; + header->snap_names = NULL; + + kfree(rbd_dev->header.snap_sizes); + rbd_dev->header.snap_sizes = header->snap_sizes; + header->snap_sizes = NULL; + } +} + +static void rbd_dev_update_parent(struct rbd_device *rbd_dev, + struct parent_image_info *pii) +{ + if (pii->pool_id == CEPH_NOPOOL || !pii->has_overlap) { + /* + * Either the parent never existed, or we have + * record of it but the image got flattened so it no + * longer has a parent. When the parent of a + * layered image disappears we immediately set the + * overlap to 0. The effect of this is that all new + * requests will be treated as if the image had no + * parent. + * + * If !pii.has_overlap, the parent image spec is not + * applicable. It's there to avoid duplication in each + * snapshot record. + */ + if (rbd_dev->parent_overlap) { + rbd_dev->parent_overlap = 0; + rbd_dev_parent_put(rbd_dev); + pr_info("%s: clone has been flattened\n", + rbd_dev->disk->disk_name); + } + } else { + rbd_assert(rbd_dev->parent_spec); + + /* + * Update the parent overlap. If it became zero, issue + * a warning as we will proceed as if there is no parent. + */ + if (!pii->overlap && rbd_dev->parent_overlap) + rbd_warn(rbd_dev, + "clone has become standalone (overlap 0)"); + rbd_dev->parent_overlap = pii->overlap; + } +} + +static int rbd_dev_refresh(struct rbd_device *rbd_dev) +{ + struct rbd_image_header header = { 0 }; + struct parent_image_info pii = { 0 }; + int ret; + + dout("%s rbd_dev %p\n", __func__, rbd_dev); + + ret = rbd_dev_header_info(rbd_dev, &header, false); + if (ret) + goto out; + + /* + * If there is a parent, see if it has disappeared due to the + * mapped image getting flattened. + */ + if (rbd_dev->parent) { + ret = rbd_dev_v2_parent_info(rbd_dev, &pii); + if (ret) + goto out; + } + + down_write(&rbd_dev->header_rwsem); + rbd_dev_update_header(rbd_dev, &header); + if (rbd_dev->parent) + rbd_dev_update_parent(rbd_dev, &pii); + up_write(&rbd_dev->header_rwsem); + +out: + rbd_parent_info_cleanup(&pii); + rbd_image_header_cleanup(&header); + return ret; +} + static ssize_t do_rbd_add(const char *buf, size_t count) { struct rbd_device *rbd_dev = NULL; diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 82597ab4f747..499f4809fcdf 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -4419,6 +4419,7 @@ static int btusb_probe(struct usb_interface *intf, if (id->driver_info & BTUSB_QCA_ROME) { data->setup_on_usb = btusb_setup_qca; + hdev->shutdown = btusb_shutdown_qca; hdev->set_bdaddr = btusb_set_bdaddr_ath3012; hdev->cmd_timeout = btusb_qca_cmd_timeout; set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index eb4e7bee1e20..d57bc066dce6 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -38,6 +38,7 @@ enum sysc_soc { SOC_2420, SOC_2430, SOC_3430, + SOC_AM35, SOC_3630, SOC_4430, SOC_4460, @@ -1097,6 +1098,11 @@ static int sysc_enable_module(struct device *dev) if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_SIDLE_ACT)) { best_mode = SYSC_IDLE_NO; + + /* Clear WAKEUP */ + if (regbits->enwkup_shift >= 0 && + ddata->cfg.sysc_val & BIT(regbits->enwkup_shift)) + reg &= ~BIT(regbits->enwkup_shift); } else { best_mode = fls(ddata->cfg.sidlemodes) - 1; if (best_mode > SYSC_IDLE_MASK) { @@ -1224,6 +1230,13 @@ set_sidle: } } + if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_SIDLE_ACT) { + /* Set WAKEUP */ + if (regbits->enwkup_shift >= 0 && + ddata->cfg.sysc_val & BIT(regbits->enwkup_shift)) + reg |= BIT(regbits->enwkup_shift); + } + reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift); reg |= best_mode << regbits->sidle_shift; if (regbits->autoidle_shift >= 0 && @@ -1518,16 +1531,16 @@ struct sysc_revision_quirk { static const struct sysc_revision_quirk sysc_revision_quirks[] = { /* These drivers need to be fixed to not use pm_runtime_irq_safe() */ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff, - SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE), + SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff, - SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE), + SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), /* Uarts on omap4 and later */ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff, - SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE), + SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff, - SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE), + SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47424e03, 0xffffffff, - SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE), + SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), /* Quirks that need to be set based on the module address */ SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff, @@ -1862,7 +1875,7 @@ static void sysc_pre_reset_quirk_dss(struct sysc *ddata) dev_warn(ddata->dev, "%s: timed out %08x !+ %08x\n", __func__, val, irq_mask); - if (sysc_soc->soc == SOC_3430) { + if (sysc_soc->soc == SOC_3430 || sysc_soc->soc == SOC_AM35) { /* Clear DSS_SDI_CONTROL */ sysc_write(ddata, 0x44, 0); @@ -2150,8 +2163,7 @@ static int sysc_reset(struct sysc *ddata) } if (ddata->cfg.srst_udelay) - usleep_range(ddata->cfg.srst_udelay, - ddata->cfg.srst_udelay * 2); + fsleep(ddata->cfg.srst_udelay); if (ddata->post_reset_quirk) ddata->post_reset_quirk(ddata); @@ -3025,6 +3037,7 @@ static void ti_sysc_idle(struct work_struct *work) static const struct soc_device_attribute sysc_soc_match[] = { SOC_FLAG("OMAP242*", SOC_2420), SOC_FLAG("OMAP243*", SOC_2430), + SOC_FLAG("AM35*", SOC_AM35), SOC_FLAG("OMAP3[45]*", SOC_3430), SOC_FLAG("OMAP3[67]*", SOC_3630), SOC_FLAG("OMAP443*", SOC_4430), @@ -3229,7 +3242,7 @@ static int sysc_check_active_timer(struct sysc *ddata) * can be dropped if we stop supporting old beagleboard revisions * A to B4 at some point. */ - if (sysc_soc->soc == SOC_3430) + if (sysc_soc->soc == SOC_3430 || sysc_soc->soc == SOC_AM35) error = -ENXIO; else error = -EBUSY; diff --git a/drivers/clk/clk-si521xx.c b/drivers/clk/clk-si521xx.c index 4eaf1b53f06b..ef4ba467e747 100644 --- a/drivers/clk/clk-si521xx.c +++ b/drivers/clk/clk-si521xx.c @@ -96,7 +96,7 @@ static int si521xx_regmap_i2c_write(void *context, unsigned int reg, unsigned int val) { struct i2c_client *i2c = context; - const u8 data[3] = { reg, 1, val }; + const u8 data[2] = { reg, val }; const int count = ARRAY_SIZE(data); int ret; @@ -146,7 +146,7 @@ static int si521xx_regmap_i2c_read(void *context, unsigned int reg, static const struct regmap_config si521xx_regmap_config = { .reg_bits = 8, .val_bits = 8, - .cache_type = REGCACHE_NONE, + .cache_type = REGCACHE_FLAT, .max_register = SI521XX_REG_DA, .rd_table = &si521xx_readable_table, .wr_table = &si521xx_writeable_table, @@ -281,9 +281,10 @@ static int si521xx_probe(struct i2c_client *client) { const u16 chip_info = (u16)(uintptr_t)device_get_match_data(&client->dev); const struct clk_parent_data clk_parent_data = { .index = 0 }; - struct si521xx *si; + const u8 data[3] = { SI521XX_REG_BC, 1, 1 }; unsigned char name[6] = "DIFF0"; struct clk_init_data init = {}; + struct si521xx *si; int i, ret; if (!chip_info) @@ -308,7 +309,7 @@ static int si521xx_probe(struct i2c_client *client) "Failed to allocate register map\n"); /* Always read back 1 Byte via I2C */ - ret = regmap_write(si->regmap, SI521XX_REG_BC, 1); + ret = i2c_master_send(client, data, ARRAY_SIZE(data)); if (ret < 0) return ret; diff --git a/drivers/clk/clk-versaclock3.c b/drivers/clk/clk-versaclock3.c index 7ab2447bd203..3d7de355f8f6 100644 --- a/drivers/clk/clk-versaclock3.c +++ b/drivers/clk/clk-versaclock3.c @@ -118,21 +118,21 @@ enum vc3_div { VC3_DIV5, }; -enum vc3_clk_mux { - VC3_DIFF2_MUX, - VC3_DIFF1_MUX, - VC3_SE3_MUX, - VC3_SE2_MUX, - VC3_SE1_MUX, -}; - enum vc3_clk { - VC3_DIFF2, - VC3_DIFF1, - VC3_SE3, - VC3_SE2, - VC3_SE1, VC3_REF, + VC3_SE1, + VC3_SE2, + VC3_SE3, + VC3_DIFF1, + VC3_DIFF2, +}; + +enum vc3_clk_mux { + VC3_SE1_MUX = VC3_SE1 - 1, + VC3_SE2_MUX = VC3_SE2 - 1, + VC3_SE3_MUX = VC3_SE3 - 1, + VC3_DIFF1_MUX = VC3_DIFF1 - 1, + VC3_DIFF2_MUX = VC3_DIFF2 - 1, }; struct vc3_clk_data { @@ -401,11 +401,10 @@ static long vc3_pll_round_rate(struct clk_hw *hw, unsigned long rate, /* Determine best fractional part, which is 16 bit wide */ div_frc = rate % *parent_rate; div_frc *= BIT(16) - 1; - do_div(div_frc, *parent_rate); - vc3->div_frc = (u32)div_frc; + vc3->div_frc = min_t(u64, div64_ul(div_frc, *parent_rate), U16_MAX); rate = (*parent_rate * - (vc3->div_int * VC3_2_POW_16 + div_frc) / VC3_2_POW_16); + (vc3->div_int * VC3_2_POW_16 + vc3->div_frc) / VC3_2_POW_16); } else { rate = *parent_rate * vc3->div_int; } @@ -897,33 +896,33 @@ static struct vc3_hw_data clk_div[] = { }; static struct vc3_hw_data clk_mux[] = { - [VC3_DIFF2_MUX] = { + [VC3_SE1_MUX] = { .data = &(struct vc3_clk_data) { - .offs = VC3_DIFF2_CTRL_REG, - .bitmsk = VC3_DIFF2_CTRL_REG_DIFF2_CLK_SEL + .offs = VC3_SE1_DIV4_CTRL, + .bitmsk = VC3_SE1_DIV4_CTRL_SE1_CLK_SEL }, .hw.init = &(struct clk_init_data){ - .name = "diff2_mux", + .name = "se1_mux", .ops = &vc3_clk_mux_ops, .parent_hws = (const struct clk_hw *[]) { - &clk_div[VC3_DIV1].hw, - &clk_div[VC3_DIV3].hw + &clk_div[VC3_DIV5].hw, + &clk_div[VC3_DIV4].hw }, .num_parents = 2, .flags = CLK_SET_RATE_PARENT } }, - [VC3_DIFF1_MUX] = { + [VC3_SE2_MUX] = { .data = &(struct vc3_clk_data) { - .offs = VC3_DIFF1_CTRL_REG, - .bitmsk = VC3_DIFF1_CTRL_REG_DIFF1_CLK_SEL + .offs = VC3_SE2_CTRL_REG0, + .bitmsk = VC3_SE2_CTRL_REG0_SE2_CLK_SEL }, .hw.init = &(struct clk_init_data){ - .name = "diff1_mux", + .name = "se2_mux", .ops = &vc3_clk_mux_ops, .parent_hws = (const struct clk_hw *[]) { - &clk_div[VC3_DIV1].hw, - &clk_div[VC3_DIV3].hw + &clk_div[VC3_DIV5].hw, + &clk_div[VC3_DIV4].hw }, .num_parents = 2, .flags = CLK_SET_RATE_PARENT @@ -945,33 +944,33 @@ static struct vc3_hw_data clk_mux[] = { .flags = CLK_SET_RATE_PARENT } }, - [VC3_SE2_MUX] = { + [VC3_DIFF1_MUX] = { .data = &(struct vc3_clk_data) { - .offs = VC3_SE2_CTRL_REG0, - .bitmsk = VC3_SE2_CTRL_REG0_SE2_CLK_SEL + .offs = VC3_DIFF1_CTRL_REG, + .bitmsk = VC3_DIFF1_CTRL_REG_DIFF1_CLK_SEL }, .hw.init = &(struct clk_init_data){ - .name = "se2_mux", + .name = "diff1_mux", .ops = &vc3_clk_mux_ops, .parent_hws = (const struct clk_hw *[]) { - &clk_div[VC3_DIV5].hw, - &clk_div[VC3_DIV4].hw + &clk_div[VC3_DIV1].hw, + &clk_div[VC3_DIV3].hw }, .num_parents = 2, .flags = CLK_SET_RATE_PARENT } }, - [VC3_SE1_MUX] = { + [VC3_DIFF2_MUX] = { .data = &(struct vc3_clk_data) { - .offs = VC3_SE1_DIV4_CTRL, - .bitmsk = VC3_SE1_DIV4_CTRL_SE1_CLK_SEL + .offs = VC3_DIFF2_CTRL_REG, + .bitmsk = VC3_DIFF2_CTRL_REG_DIFF2_CLK_SEL }, .hw.init = &(struct clk_init_data){ - .name = "se1_mux", + .name = "diff2_mux", .ops = &vc3_clk_mux_ops, .parent_hws = (const struct clk_hw *[]) { - &clk_div[VC3_DIV5].hw, - &clk_div[VC3_DIV4].hw + &clk_div[VC3_DIV1].hw, + &clk_div[VC3_DIV3].hw }, .num_parents = 2, .flags = CLK_SET_RATE_PARENT @@ -1110,7 +1109,7 @@ static int vc3_probe(struct i2c_client *client) name, 0, CLK_SET_RATE_PARENT, 1, 1); else clk_out[i] = devm_clk_hw_register_fixed_factor_parent_hw(dev, - name, &clk_mux[i].hw, CLK_SET_RATE_PARENT, 1, 1); + name, &clk_mux[i - 1].hw, CLK_SET_RATE_PARENT, 1, 1); if (IS_ERR(clk_out[i])) return PTR_ERR(clk_out[i]); diff --git a/drivers/clk/sprd/ums512-clk.c b/drivers/clk/sprd/ums512-clk.c index 8f4441dd572b..9384ecc6c741 100644 --- a/drivers/clk/sprd/ums512-clk.c +++ b/drivers/clk/sprd/ums512-clk.c @@ -800,7 +800,7 @@ static SPRD_MUX_CLK_DATA(uart1_clk, "uart1-clk", uart_parents, 0x250, 0, 3, UMS512_MUX_FLAG); static const struct clk_parent_data thm_parents[] = { - { .fw_name = "ext-32m" }, + { .fw_name = "ext-32k" }, { .hw = &clk_250k.hw }, }; static SPRD_MUX_CLK_DATA(thm0_clk, "thm0-clk", thm_parents, diff --git a/drivers/clk/tegra/clk-bpmp.c b/drivers/clk/tegra/clk-bpmp.c index a9f3fb448de6..7bfba0afd778 100644 --- a/drivers/clk/tegra/clk-bpmp.c +++ b/drivers/clk/tegra/clk-bpmp.c @@ -159,7 +159,7 @@ static unsigned long tegra_bpmp_clk_recalc_rate(struct clk_hw *hw, err = tegra_bpmp_clk_transfer(clk->bpmp, &msg); if (err < 0) - return err; + return 0; return response.rate; } diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c index 789193ed0386..c278d5facf7d 100644 --- a/drivers/dma/ti/k3-udma-glue.c +++ b/drivers/dma/ti/k3-udma-glue.c @@ -558,6 +558,9 @@ int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn) tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq); } + if (!tx_chn->virq) + return -ENXIO; + return tx_chn->virq; } EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq); diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index 26db5b8dfc1e..749868b9e80d 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c @@ -81,7 +81,8 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device " * * - power condition * Set the power condition field in the START STOP UNIT commands sent by - * sd_mod on suspend, resume, and shutdown (if manage_start_stop is on). + * sd_mod on suspend, resume, and shutdown (if manage_system_start_stop or + * manage_runtime_start_stop is on). * Some disks need this to spin down or to resume properly. * * - override internal blacklist @@ -1517,8 +1518,10 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev) sdev->use_10_for_rw = 1; - if (sbp2_param_exclusive_login) - sdev->manage_start_stop = 1; + if (sbp2_param_exclusive_login) { + sdev->manage_system_start_stop = true; + sdev->manage_runtime_start_stop = true; + } if (sdev->type == TYPE_ROM) sdev->use_10_for_ms = 1; diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c index 2109cd178ff7..121f4fc903cd 100644 --- a/drivers/firmware/arm_ffa/driver.c +++ b/drivers/firmware/arm_ffa/driver.c @@ -397,6 +397,19 @@ static u32 ffa_get_num_pages_sg(struct scatterlist *sg) return num_pages; } +static u8 ffa_memory_attributes_get(u32 func_id) +{ + /* + * For the memory lend or donate operation, if the receiver is a PE or + * a proxy endpoint, the owner/sender must not specify the attributes + */ + if (func_id == FFA_FN_NATIVE(MEM_LEND) || + func_id == FFA_MEM_LEND) + return 0; + + return FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK | FFA_MEM_INNER_SHAREABLE; +} + static int ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize, struct ffa_mem_ops_args *args) @@ -413,8 +426,7 @@ ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize, mem_region->tag = args->tag; mem_region->flags = args->flags; mem_region->sender_id = drv_info->vm_id; - mem_region->attributes = FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK | - FFA_MEM_INNER_SHAREABLE; + mem_region->attributes = ffa_memory_attributes_get(func_id); ep_mem_access = &mem_region->ep_mem_access[0]; for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) { diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c index c0cd556fbaae..30dedd6ebfde 100644 --- a/drivers/firmware/arm_scmi/perf.c +++ b/drivers/firmware/arm_scmi/perf.c @@ -1080,6 +1080,8 @@ static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph) if (!pinfo) return -ENOMEM; + pinfo->version = version; + ret = scmi_perf_attributes_get(ph, pinfo); if (ret) return ret; @@ -1104,8 +1106,6 @@ static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph) if (ret) return ret; - pinfo->version = version; - return ph->set_priv(ph, pinfo); } diff --git a/drivers/firmware/imx/imx-dsp.c b/drivers/firmware/imx/imx-dsp.c index 3dba590a2a95..508eab346fc6 100644 --- a/drivers/firmware/imx/imx-dsp.c +++ b/drivers/firmware/imx/imx-dsp.c @@ -114,6 +114,7 @@ static int imx_dsp_setup_channels(struct imx_dsp_ipc *dsp_ipc) dsp_chan->idx = i % 2; dsp_chan->ch = mbox_request_channel_byname(cl, chan_name); if (IS_ERR(dsp_chan->ch)) { + kfree(dsp_chan->name); ret = PTR_ERR(dsp_chan->ch); if (ret != -EPROBE_DEFER) dev_err(dev, "Failed to request mbox chan %s ret %d\n", diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c index da33bbbdacb9..58f107194fda 100644 --- a/drivers/gpio/gpio-aspeed.c +++ b/drivers/gpio/gpio-aspeed.c @@ -973,7 +973,7 @@ static int aspeed_gpio_set_config(struct gpio_chip *chip, unsigned int offset, else if (param == PIN_CONFIG_BIAS_DISABLE || param == PIN_CONFIG_BIAS_PULL_DOWN || param == PIN_CONFIG_DRIVE_STRENGTH) - return pinctrl_gpio_set_config(offset, config); + return pinctrl_gpio_set_config(chip->base + offset, config); else if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN || param == PIN_CONFIG_DRIVE_OPEN_SOURCE) /* Return -ENOTSUPP to trigger emulation, as per datasheet */ diff --git a/drivers/gpio/gpio-pmic-eic-sprd.c b/drivers/gpio/gpio-pmic-eic-sprd.c index 2b9b7be9b8fd..01c0fd0a9d8c 100644 --- a/drivers/gpio/gpio-pmic-eic-sprd.c +++ b/drivers/gpio/gpio-pmic-eic-sprd.c @@ -352,6 +352,7 @@ static int sprd_pmic_eic_probe(struct platform_device *pdev) pmic_eic->chip.set_config = sprd_pmic_eic_set_config; pmic_eic->chip.set = sprd_pmic_eic_set; pmic_eic->chip.get = sprd_pmic_eic_get; + pmic_eic->chip.can_sleep = true; irq = &pmic_eic->chip.irq; gpio_irq_chip_set_chip(irq, &pmic_eic_irq_chip); diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c index 7e9f7a32d3ee..cae9661862fe 100644 --- a/drivers/gpio/gpio-pxa.c +++ b/drivers/gpio/gpio-pxa.c @@ -237,6 +237,7 @@ static bool pxa_gpio_has_pinctrl(void) switch (gpio_type) { case PXA3XX_GPIO: case MMP2_GPIO: + case MMP_GPIO: return false; default: diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c index bbd9e9191199..fad979797486 100644 --- a/drivers/gpio/gpio-timberdale.c +++ b/drivers/gpio/gpio-timberdale.c @@ -43,9 +43,10 @@ static int timbgpio_update_bit(struct gpio_chip *gpio, unsigned index, unsigned offset, bool enabled) { struct timbgpio *tgpio = gpiochip_get_data(gpio); + unsigned long flags; u32 reg; - spin_lock(&tgpio->lock); + spin_lock_irqsave(&tgpio->lock, flags); reg = ioread32(tgpio->membase + offset); if (enabled) @@ -54,7 +55,7 @@ static int timbgpio_update_bit(struct gpio_chip *gpio, unsigned index, reg &= ~(1 << index); iowrite32(reg, tgpio->membase + offset); - spin_unlock(&tgpio->lock); + spin_unlock_irqrestore(&tgpio->lock, flags); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 30c4f5cca02c..2b8356699f23 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2093,7 +2093,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) adev->flags |= AMD_IS_PX; if (!(adev->flags & AMD_IS_APU)) { - parent = pci_upstream_bridge(adev->pdev); + parent = pcie_find_root_port(adev->pdev); adev->has_pr3 = parent ? pci_pr3_present(parent) : false; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c index 9c66d98af6d8..7cd0dfaeee20 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c @@ -170,6 +170,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev) csum += pia[size - 1]; if (csum) { DRM_ERROR("Bad Product Info Area checksum: 0x%02x", csum); + kfree(pia); return -EIO; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index c435f7632e8e..5ee87965a078 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -157,7 +157,7 @@ void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, struct int32_t N; int32_t j; - if (!pipe_ctx->stream) + if (!resource_is_pipe_type(pipe_ctx, OTG_MASTER)) continue; /* Virtual encoders don't have this function */ if (!stream_enc->funcs->get_fifo_cal_average_level) @@ -188,7 +188,7 @@ void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, struct int32_t N; int32_t j; - if (!pipe_ctx->stream) + if (!resource_is_pipe_type(pipe_ctx, OTG_MASTER)) continue; /* Virtual encoders don't have this function */ if (!stream_enc->funcs->get_fifo_cal_average_level) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index 984b52923534..e9345f6554db 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -355,7 +355,7 @@ static void dcn32_update_clocks_update_dentist( int32_t N; int32_t j; - if (!pipe_ctx->stream) + if (!resource_is_pipe_type(pipe_ctx, OTG_MASTER)) continue; /* Virtual encoders don't have this function */ if (!stream_enc->funcs->get_fifo_cal_average_level) @@ -401,7 +401,7 @@ static void dcn32_update_clocks_update_dentist( int32_t N; int32_t j; - if (!pipe_ctx->stream) + if (!resource_is_pipe_type(pipe_ctx, OTG_MASTER)) continue; /* Virtual encoders don't have this function */ if (!stream_enc->funcs->get_fifo_cal_average_level) diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 41147da54458..8bb2da13826f 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -2040,6 +2040,7 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 2): + case IP_VERSION(11, 0, 3): *states = ATTR_STATE_SUPPORTED; break; default: diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 4bb289f9b4b8..da2860da6018 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -2082,36 +2082,41 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context return ret; } +#define MAX(a, b) ((a) > (b) ? (a) : (b)) + static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap) { struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table; - u32 smu_pcie_arg; + uint8_t *table_member1, *table_member2; + uint32_t min_gen_speed, max_gen_speed; + uint32_t min_lane_width, max_lane_width; + uint32_t smu_pcie_arg; int ret, i; - /* PCIE gen speed and lane width override */ - if (!amdgpu_device_pcie_dynamic_switching_supported()) { - if (pcie_table->pcie_gen[NUM_LINK_LEVELS - 1] < pcie_gen_cap) - pcie_gen_cap = pcie_table->pcie_gen[NUM_LINK_LEVELS - 1]; + GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1); + GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2); - if (pcie_table->pcie_lane[NUM_LINK_LEVELS - 1] < pcie_width_cap) - pcie_width_cap = pcie_table->pcie_lane[NUM_LINK_LEVELS - 1]; + min_gen_speed = MAX(0, table_member1[0]); + max_gen_speed = MIN(pcie_gen_cap, table_member1[1]); + min_gen_speed = min_gen_speed > max_gen_speed ? + max_gen_speed : min_gen_speed; + min_lane_width = MAX(1, table_member2[0]); + max_lane_width = MIN(pcie_width_cap, table_member2[1]); + min_lane_width = min_lane_width > max_lane_width ? + max_lane_width : min_lane_width; - /* Force all levels to use the same settings */ - for (i = 0; i < NUM_LINK_LEVELS; i++) { - pcie_table->pcie_gen[i] = pcie_gen_cap; - pcie_table->pcie_lane[i] = pcie_width_cap; - } + if (!amdgpu_device_pcie_dynamic_switching_supported()) { + pcie_table->pcie_gen[0] = max_gen_speed; + pcie_table->pcie_lane[0] = max_lane_width; } else { - for (i = 0; i < NUM_LINK_LEVELS; i++) { - if (pcie_table->pcie_gen[i] > pcie_gen_cap) - pcie_table->pcie_gen[i] = pcie_gen_cap; - if (pcie_table->pcie_lane[i] > pcie_width_cap) - pcie_table->pcie_lane[i] = pcie_width_cap; - } + pcie_table->pcie_gen[0] = min_gen_speed; + pcie_table->pcie_lane[0] = min_lane_width; } + pcie_table->pcie_gen[1] = max_gen_speed; + pcie_table->pcie_lane[1] = max_lane_width; for (i = 0; i < NUM_LINK_LEVELS; i++) { smu_pcie_arg = (i << 16 | diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index 0cb646cb04ee..d5c15292ae93 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -38,6 +38,14 @@ static const struct drm_dmi_panel_orientation_data gpd_micropc = { .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, }; +static const struct drm_dmi_panel_orientation_data gpd_onemix2s = { + .width = 1200, + .height = 1920, + .bios_dates = (const char * const []){ "05/21/2018", "10/26/2018", + "03/04/2019", NULL }, + .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, +}; + static const struct drm_dmi_panel_orientation_data gpd_pocket = { .width = 1200, .height = 1920, @@ -401,6 +409,14 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LTH17"), }, .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* One Mix 2S (generic strings, also match on bios date) */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"), + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Default string"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"), + }, + .driver_data = (void *)&gpd_onemix2s, }, {} }; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 6b6d22c19411..0ba955611dfb 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -198,7 +198,7 @@ static void flush_tlb_invalidate(struct drm_i915_gem_object *obj) for_each_gt(gt, i915, id) { if (!obj->mm.tlb[id]) - return; + continue; intel_gt_invalidate_tlb_full(gt, obj->mm.tlb[id]); obj->mm.tlb[id] = 0; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index 8f1633c3fb93..73a4a4eb29e0 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -100,6 +100,7 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st, st->nents = 0; for (i = 0; i < page_count; i++) { struct folio *folio; + unsigned long nr_pages; const unsigned int shrink[] = { I915_SHRINK_BOUND | I915_SHRINK_UNBOUND, 0, @@ -150,6 +151,8 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st, } } while (1); + nr_pages = min_t(unsigned long, + folio_nr_pages(folio), page_count - i); if (!i || sg->length >= max_segment || folio_pfn(folio) != next_pfn) { @@ -157,13 +160,13 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st, sg = sg_next(sg); st->nents++; - sg_set_folio(sg, folio, folio_size(folio), 0); + sg_set_folio(sg, folio, nr_pages * PAGE_SIZE, 0); } else { /* XXX: could overflow? */ - sg->length += folio_size(folio); + sg->length += nr_pages * PAGE_SIZE; } - next_pfn = folio_pfn(folio) + folio_nr_pages(folio); - i += folio_nr_pages(folio) - 1; + next_pfn = folio_pfn(folio) + nr_pages; + i += nr_pages - 1; /* Check that the i965g/gm workaround works. */ GEM_BUG_ON(gfp & __GFP_DMA32 && next_pfn >= 0x00100000UL); diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c index a4ff55aa5e55..7ad36198aab2 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c @@ -271,8 +271,17 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) if (GRAPHICS_VER_FULL(rq->i915) >= IP_VER(12, 70)) bit_group_0 |= PIPE_CONTROL_CCS_FLUSH; + /* + * L3 fabric flush is needed for AUX CCS invalidation + * which happens as part of pipe-control so we can + * ignore PIPE_CONTROL_FLUSH_L3. Also PIPE_CONTROL_FLUSH_L3 + * deals with Protected Memory which is not needed for + * AUX CCS invalidation and lead to unwanted side effects. + */ + if (mode & EMIT_FLUSH) + bit_group_1 |= PIPE_CONTROL_FLUSH_L3; + bit_group_1 |= PIPE_CONTROL_TILE_CACHE_FLUSH; - bit_group_1 |= PIPE_CONTROL_FLUSH_L3; bit_group_1 |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; bit_group_1 |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; /* Wa_1409600907:tgl,adl-p */ diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index dd0ed941441a..da21f2786b5d 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -511,20 +511,31 @@ void intel_ggtt_unbind_vma(struct i915_address_space *vm, vm->clear_range(vm, vma_res->start, vma_res->vma_size); } +/* + * Reserve the top of the GuC address space for firmware images. Addresses + * beyond GUC_GGTT_TOP in the GuC address space are inaccessible by GuC, + * which makes for a suitable range to hold GuC/HuC firmware images if the + * size of the GGTT is 4G. However, on a 32-bit platform the size of the GGTT + * is limited to 2G, which is less than GUC_GGTT_TOP, but we reserve a chunk + * of the same size anyway, which is far more than needed, to keep the logic + * in uc_fw_ggtt_offset() simple. + */ +#define GUC_TOP_RESERVE_SIZE (SZ_4G - GUC_GGTT_TOP) + static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt) { - u64 size; + u64 offset; int ret; if (!intel_uc_uses_guc(&ggtt->vm.gt->uc)) return 0; - GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP); - size = ggtt->vm.total - GUC_GGTT_TOP; + GEM_BUG_ON(ggtt->vm.total <= GUC_TOP_RESERVE_SIZE); + offset = ggtt->vm.total - GUC_TOP_RESERVE_SIZE; - ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &ggtt->uc_fw, size, - GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE, - PIN_NOEVICT); + ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &ggtt->uc_fw, + GUC_TOP_RESERVE_SIZE, offset, + I915_COLOR_UNEVICTABLE, PIN_NOEVICT); if (ret) drm_dbg(&ggtt->vm.i915->drm, "Failed to reserve top of GGTT for GuC\n"); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index b5b7f2fe8c78..dc7b40e06e38 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -1433,6 +1433,36 @@ static void guc_timestamp_ping(struct work_struct *wrk) int srcu, ret; /* + * Ideally the busyness worker should take a gt pm wakeref because the + * worker only needs to be active while gt is awake. However, the + * gt_park path cancels the worker synchronously and this complicates + * the flow if the worker is also running at the same time. The cancel + * waits for the worker and when the worker releases the wakeref, that + * would call gt_park and would lead to a deadlock. + * + * The resolution is to take the global pm wakeref if runtime pm is + * already active. If not, we don't need to update the busyness stats as + * the stats would already be updated when the gt was parked. + * + * Note: + * - We do not requeue the worker if we cannot take a reference to runtime + * pm since intel_guc_busyness_unpark would requeue the worker in the + * resume path. + * + * - If the gt was parked longer than time taken for GT timestamp to roll + * over, we ignore those rollovers since we don't care about tracking + * the exact GT time. We only care about roll overs when the gt is + * active and running workloads. + * + * - There is a window of time between gt_park and runtime suspend, + * where the worker may run. This is acceptable since the worker will + * not find any new data to update busyness. + */ + wakeref = intel_runtime_pm_get_if_active(>->i915->runtime_pm); + if (!wakeref) + return; + + /* * Synchronize with gt reset to make sure the worker does not * corrupt the engine/guc stats. NB: can't actually block waiting * for a reset to complete as the reset requires flushing out @@ -1440,10 +1470,9 @@ static void guc_timestamp_ping(struct work_struct *wrk) */ ret = intel_gt_reset_trylock(gt, &srcu); if (ret) - return; + goto err_trylock; - with_intel_runtime_pm(>->i915->runtime_pm, wakeref) - __update_guc_busyness_stats(guc); + __update_guc_busyness_stats(guc); /* adjust context stats for overflow */ xa_for_each(&guc->context_lookup, index, ce) @@ -1452,6 +1481,9 @@ static void guc_timestamp_ping(struct work_struct *wrk) intel_gt_reset_unlock(gt, srcu); guc_enable_busyness_worker(guc); + +err_trylock: + intel_runtime_pm_put(>->i915->runtime_pm, wakeref); } static int guc_action_enable_usage_stats(struct intel_guc *guc) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 1f65bb33dd21..a8551ce322de 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1199,6 +1199,13 @@ int i915_gem_init(struct drm_i915_private *dev_priv) goto err_unlock; } + /* + * Register engines early to ensure the engine list is in its final + * rb-tree form, lowering the amount of code that has to deal with + * the intermediate llist state. + */ + intel_engines_driver_register(dev_priv); + return 0; /* @@ -1246,8 +1253,6 @@ err_unlock: void i915_gem_driver_register(struct drm_i915_private *i915) { i915_gem_driver_register__shrinker(i915); - - intel_engines_driver_register(i915); } void i915_gem_driver_unregister(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index 30afbec9e3b1..2edd7bb13fae 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -31,6 +31,7 @@ #include "nouveau_drv.h" #include "nouveau_dma.h" +#include "nouveau_exec.h" #include "nouveau_gem.h" #include "nouveau_chan.h" #include "nouveau_abi16.h" @@ -183,6 +184,20 @@ nouveau_abi16_fini(struct nouveau_abi16 *abi16) cli->abi16 = NULL; } +static inline int +getparam_dma_ib_max(struct nvif_device *device) +{ + const struct nvif_mclass dmas[] = { + { NV03_CHANNEL_DMA, 0 }, + { NV10_CHANNEL_DMA, 0 }, + { NV17_CHANNEL_DMA, 0 }, + { NV40_CHANNEL_DMA, 0 }, + {} + }; + + return nvif_mclass(&device->object, dmas) < 0 ? NV50_DMA_IB_MAX : 0; +} + int nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) { @@ -247,6 +262,12 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) case NOUVEAU_GETPARAM_GRAPH_UNITS: getparam->value = nvkm_gr_units(gr); break; + case NOUVEAU_GETPARAM_EXEC_PUSH_MAX: { + int ib_max = getparam_dma_ib_max(device); + + getparam->value = nouveau_exec_push_max_from_ib_max(ib_max); + break; + } default: NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param); return -EINVAL; diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index bb3d6e5c122f..7c97b2886807 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -257,10 +257,7 @@ static int nouveau_channel_ctor(struct nouveau_drm *drm, struct nvif_device *device, bool priv, u64 runm, struct nouveau_channel **pchan) { - static const struct { - s32 oclass; - int version; - } hosts[] = { + const struct nvif_mclass hosts[] = { { AMPERE_CHANNEL_GPFIFO_B, 0 }, { AMPERE_CHANNEL_GPFIFO_A, 0 }, { TURING_CHANNEL_GPFIFO_A, 0 }, @@ -443,9 +440,11 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) } /* initialise dma tracking parameters */ - switch (chan->user.oclass & 0x00ff) { - case 0x006b: - case 0x006e: + switch (chan->user.oclass) { + case NV03_CHANNEL_DMA: + case NV10_CHANNEL_DMA: + case NV17_CHANNEL_DMA: + case NV40_CHANNEL_DMA: chan->user_put = 0x40; chan->user_get = 0x44; chan->dma.max = (0x10000 / 4) - 2; @@ -455,7 +454,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) chan->user_get = 0x44; chan->user_get_hi = 0x60; chan->dma.ib_base = 0x10000 / 4; - chan->dma.ib_max = (0x02000 / 8) - 1; + chan->dma.ib_max = NV50_DMA_IB_MAX; chan->dma.ib_put = 0; chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put; chan->dma.max = chan->dma.ib_base; diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h index 1744d95b233e..c52cda82353e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.h +++ b/drivers/gpu/drm/nouveau/nouveau_dma.h @@ -49,6 +49,9 @@ void nv50_dma_push(struct nouveau_channel *, u64 addr, u32 length, /* Maximum push buffer size. */ #define NV50_DMA_PUSH_MAX_LENGTH 0x7fffff +/* Maximum IBs per ring. */ +#define NV50_DMA_IB_MAX ((0x02000 / 8) - 1) + /* Object handles - for stuff that's doesn't use handle == oclass. */ enum { NvDmaFB = 0x80000002, diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c index 5dda94e1318c..c1837ba95fb5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_exec.c +++ b/drivers/gpu/drm/nouveau/nouveau_exec.c @@ -379,7 +379,7 @@ nouveau_exec_ioctl_exec(struct drm_device *dev, struct nouveau_channel *chan = NULL; struct nouveau_exec_job_args args = {}; struct drm_nouveau_exec *req = data; - int ret = 0; + int push_max, ret = 0; if (unlikely(!abi16)) return -ENOMEM; @@ -404,9 +404,10 @@ nouveau_exec_ioctl_exec(struct drm_device *dev, if (!chan->dma.ib_max) return nouveau_abi16_put(abi16, -ENOSYS); - if (unlikely(req->push_count > NOUVEAU_GEM_MAX_PUSH)) { + push_max = nouveau_exec_push_max_from_ib_max(chan->dma.ib_max); + if (unlikely(req->push_count > push_max)) { NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n", - req->push_count, NOUVEAU_GEM_MAX_PUSH); + req->push_count, push_max); return nouveau_abi16_put(abi16, -EINVAL); } diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.h b/drivers/gpu/drm/nouveau/nouveau_exec.h index 778cacd90f65..5488d337bcc0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_exec.h +++ b/drivers/gpu/drm/nouveau/nouveau_exec.h @@ -51,4 +51,14 @@ int nouveau_exec_job_init(struct nouveau_exec_job **job, int nouveau_exec_ioctl_exec(struct drm_device *dev, void *data, struct drm_file *file_priv); +static inline unsigned int +nouveau_exec_push_max_from_ib_max(int ib_max) +{ + /* Limit the number of IBs per job to half the size of the ring in order + * to avoid the ring running dry between submissions and preserve one + * more slot for the job's HW fence. + */ + return ib_max > 1 ? ib_max / 2 - 1 : 0; +} + #endif diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c index c1dfbfcaa000..bccb33b900f3 100644 --- a/drivers/gpu/drm/tests/drm_kunit_helpers.c +++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c @@ -118,7 +118,7 @@ void drm_kunit_helper_free_device(struct kunit *test, struct device *dev) kunit_release_action(test, kunit_action_platform_driver_unregister, - pdev); + &fake_platform_driver); } EXPORT_SYMBOL_GPL(drm_kunit_helper_free_device); diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 0cea301cc9a9..790aa908e2a7 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -799,6 +799,8 @@ config HID_NVIDIA_SHIELD tristate "NVIDIA SHIELD devices" depends on USB_HID depends on BT_HIDP + depends on LEDS_CLASS + select POWER_SUPPLY help Support for NVIDIA SHIELD accessories. diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c index 403506b9697e..b346d68a06f5 100644 --- a/drivers/hid/hid-holtek-kbd.c +++ b/drivers/hid/hid-holtek-kbd.c @@ -130,6 +130,10 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type, return -ENODEV; boot_hid = usb_get_intfdata(boot_interface); + if (list_empty(&boot_hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } boot_hid_input = list_first_entry(&boot_hid->inputs, struct hid_input, list); diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 7e499992a793..e4d2dfd5d253 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -425,6 +425,7 @@ #define I2C_DEVICE_ID_HP_SPECTRE_X360_13T_AW100 0x29F5 #define I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V1 0x2BED #define I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V2 0x2BEE +#define I2C_DEVICE_ID_HP_ENVY_X360_15_EU0556NG 0x2D02 #define USB_VENDOR_ID_ELECOM 0x056e #define USB_DEVICE_ID_ELECOM_BM084 0x0061 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 0235cc1690a1..c8b20d44b147 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -409,6 +409,8 @@ static const struct hid_device_id hid_battery_quirks[] = { HID_BATTERY_QUIRK_IGNORE }, { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V2), HID_BATTERY_QUIRK_IGNORE }, + { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15_EU0556NG), + HID_BATTERY_QUIRK_IGNORE }, {} }; diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index 05f5b5f588a2..a209d51bd247 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -4515,7 +4515,8 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) goto hid_hw_init_fail; } - hidpp_connect_event(hidpp); + schedule_work(&hidpp->work); + flush_work(&hidpp->work); if (will_restart) { /* Reset the HID node state */ @@ -4677,6 +4678,8 @@ static const struct hid_device_id hidpp_devices[] = { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb008) }, { /* MX Master mouse over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb012) }, + { /* M720 Triathlon mouse over Bluetooth */ + HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb015) }, { /* MX Ergo trackball over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01d) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01e) }, diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 521b2ffb4244..8db4ae05febc 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -2146,6 +2146,10 @@ static const struct hid_device_id mt_devices[] = { /* Synaptics devices */ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, + USB_VENDOR_ID_SYNAPTICS, 0xcd7e) }, + + { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, + HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_SYNAPTICS, 0xce08) }, { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c index 250f5d2f888a..10468f727e5b 100644 --- a/drivers/hid/hid-nintendo.c +++ b/drivers/hid/hid-nintendo.c @@ -2088,7 +2088,9 @@ static int joycon_read_info(struct joycon_ctlr *ctlr) struct joycon_input_report *report; req.subcmd_id = JC_SUBCMD_REQ_DEV_INFO; + mutex_lock(&ctlr->output_mutex); ret = joycon_send_subcmd(ctlr, &req, 0, HZ); + mutex_unlock(&ctlr->output_mutex); if (ret) { hid_err(ctlr->hdev, "Failed to get joycon info; ret=%d\n", ret); return ret; @@ -2117,6 +2119,85 @@ static int joycon_read_info(struct joycon_ctlr *ctlr) return 0; } +static int joycon_init(struct hid_device *hdev) +{ + struct joycon_ctlr *ctlr = hid_get_drvdata(hdev); + int ret = 0; + + mutex_lock(&ctlr->output_mutex); + /* if handshake command fails, assume ble pro controller */ + if ((jc_type_is_procon(ctlr) || jc_type_is_chrggrip(ctlr)) && + !joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE, HZ)) { + hid_dbg(hdev, "detected USB controller\n"); + /* set baudrate for improved latency */ + ret = joycon_send_usb(ctlr, JC_USB_CMD_BAUDRATE_3M, HZ); + if (ret) { + hid_err(hdev, "Failed to set baudrate; ret=%d\n", ret); + goto out_unlock; + } + /* handshake */ + ret = joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE, HZ); + if (ret) { + hid_err(hdev, "Failed handshake; ret=%d\n", ret); + goto out_unlock; + } + /* + * Set no timeout (to keep controller in USB mode). + * This doesn't send a response, so ignore the timeout. + */ + joycon_send_usb(ctlr, JC_USB_CMD_NO_TIMEOUT, HZ/10); + } else if (jc_type_is_chrggrip(ctlr)) { + hid_err(hdev, "Failed charging grip handshake\n"); + ret = -ETIMEDOUT; + goto out_unlock; + } + + /* get controller calibration data, and parse it */ + ret = joycon_request_calibration(ctlr); + if (ret) { + /* + * We can function with default calibration, but it may be + * inaccurate. Provide a warning, and continue on. + */ + hid_warn(hdev, "Analog stick positions may be inaccurate\n"); + } + + /* get IMU calibration data, and parse it */ + ret = joycon_request_imu_calibration(ctlr); + if (ret) { + /* + * We can function with default calibration, but it may be + * inaccurate. Provide a warning, and continue on. + */ + hid_warn(hdev, "Unable to read IMU calibration data\n"); + } + + /* Set the reporting mode to 0x30, which is the full report mode */ + ret = joycon_set_report_mode(ctlr); + if (ret) { + hid_err(hdev, "Failed to set report mode; ret=%d\n", ret); + goto out_unlock; + } + + /* Enable rumble */ + ret = joycon_enable_rumble(ctlr); + if (ret) { + hid_err(hdev, "Failed to enable rumble; ret=%d\n", ret); + goto out_unlock; + } + + /* Enable the IMU */ + ret = joycon_enable_imu(ctlr); + if (ret) { + hid_err(hdev, "Failed to enable the IMU; ret=%d\n", ret); + goto out_unlock; + } + +out_unlock: + mutex_unlock(&ctlr->output_mutex); + return ret; +} + /* Common handler for parsing inputs */ static int joycon_ctlr_read_handler(struct joycon_ctlr *ctlr, u8 *data, int size) @@ -2248,85 +2329,19 @@ static int nintendo_hid_probe(struct hid_device *hdev, hid_device_io_start(hdev); - /* Initialize the controller */ - mutex_lock(&ctlr->output_mutex); - /* if handshake command fails, assume ble pro controller */ - if ((jc_type_is_procon(ctlr) || jc_type_is_chrggrip(ctlr)) && - !joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE, HZ)) { - hid_dbg(hdev, "detected USB controller\n"); - /* set baudrate for improved latency */ - ret = joycon_send_usb(ctlr, JC_USB_CMD_BAUDRATE_3M, HZ); - if (ret) { - hid_err(hdev, "Failed to set baudrate; ret=%d\n", ret); - goto err_mutex; - } - /* handshake */ - ret = joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE, HZ); - if (ret) { - hid_err(hdev, "Failed handshake; ret=%d\n", ret); - goto err_mutex; - } - /* - * Set no timeout (to keep controller in USB mode). - * This doesn't send a response, so ignore the timeout. - */ - joycon_send_usb(ctlr, JC_USB_CMD_NO_TIMEOUT, HZ/10); - } else if (jc_type_is_chrggrip(ctlr)) { - hid_err(hdev, "Failed charging grip handshake\n"); - ret = -ETIMEDOUT; - goto err_mutex; - } - - /* get controller calibration data, and parse it */ - ret = joycon_request_calibration(ctlr); + ret = joycon_init(hdev); if (ret) { - /* - * We can function with default calibration, but it may be - * inaccurate. Provide a warning, and continue on. - */ - hid_warn(hdev, "Analog stick positions may be inaccurate\n"); - } - - /* get IMU calibration data, and parse it */ - ret = joycon_request_imu_calibration(ctlr); - if (ret) { - /* - * We can function with default calibration, but it may be - * inaccurate. Provide a warning, and continue on. - */ - hid_warn(hdev, "Unable to read IMU calibration data\n"); - } - - /* Set the reporting mode to 0x30, which is the full report mode */ - ret = joycon_set_report_mode(ctlr); - if (ret) { - hid_err(hdev, "Failed to set report mode; ret=%d\n", ret); - goto err_mutex; - } - - /* Enable rumble */ - ret = joycon_enable_rumble(ctlr); - if (ret) { - hid_err(hdev, "Failed to enable rumble; ret=%d\n", ret); - goto err_mutex; - } - - /* Enable the IMU */ - ret = joycon_enable_imu(ctlr); - if (ret) { - hid_err(hdev, "Failed to enable the IMU; ret=%d\n", ret); - goto err_mutex; + hid_err(hdev, "Failed to initialize controller; ret=%d\n", ret); + goto err_close; } ret = joycon_read_info(ctlr); if (ret) { hid_err(hdev, "Failed to retrieve controller info; ret=%d\n", ret); - goto err_mutex; + goto err_close; } - mutex_unlock(&ctlr->output_mutex); - /* Initialize the leds */ ret = joycon_leds_create(ctlr); if (ret) { @@ -2352,8 +2367,6 @@ static int nintendo_hid_probe(struct hid_device *hdev, hid_dbg(hdev, "probe - success\n"); return 0; -err_mutex: - mutex_unlock(&ctlr->output_mutex); err_close: hid_hw_close(hdev); err_stop: @@ -2383,6 +2396,20 @@ static void nintendo_hid_remove(struct hid_device *hdev) hid_hw_stop(hdev); } +#ifdef CONFIG_PM + +static int nintendo_hid_resume(struct hid_device *hdev) +{ + int ret = joycon_init(hdev); + + if (ret) + hid_err(hdev, "Failed to restore controller after resume"); + + return ret; +} + +#endif + static const struct hid_device_id nintendo_hid_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_PROCON) }, @@ -2404,6 +2431,10 @@ static struct hid_driver nintendo_hid_driver = { .probe = nintendo_hid_probe, .remove = nintendo_hid_remove, .raw_event = nintendo_hid_event, + +#ifdef CONFIG_PM + .resume = nintendo_hid_resume, +#endif }; module_hid_driver(nintendo_hid_driver); diff --git a/drivers/hid/hid-nvidia-shield.c b/drivers/hid/hid-nvidia-shield.c index 9a3576dbf421..c463e54decbc 100644 --- a/drivers/hid/hid-nvidia-shield.c +++ b/drivers/hid/hid-nvidia-shield.c @@ -801,7 +801,7 @@ static inline int thunderstrike_led_create(struct thunderstrike *ts) led->name = devm_kasprintf(&ts->base.hdev->dev, GFP_KERNEL, "thunderstrike%d:blue:led", ts->id); led->max_brightness = 1; - led->flags = LED_CORE_SUSPENDRESUME; + led->flags = LED_CORE_SUSPENDRESUME | LED_RETAIN_AT_SHUTDOWN; led->brightness_get = &thunderstrike_led_get_brightness; led->brightness_set = &thunderstrike_led_set_brightness; @@ -1058,7 +1058,7 @@ static int shield_probe(struct hid_device *hdev, const struct hid_device_id *id) ret = hid_hw_start(hdev, HID_CONNECT_HIDINPUT); if (ret) { hid_err(hdev, "Failed to start HID device\n"); - goto err_haptics; + goto err_ts_create; } ret = hid_hw_open(hdev); @@ -1073,9 +1073,12 @@ static int shield_probe(struct hid_device *hdev, const struct hid_device_id *id) err_stop: hid_hw_stop(hdev); -err_haptics: +err_ts_create: + power_supply_unregister(ts->base.battery_dev.psy); if (ts->haptics_dev) input_unregister_device(ts->haptics_dev); + led_classdev_unregister(&ts->led_dev); + ida_free(&thunderstrike_ida, ts->id); return ret; } diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c index dd942061fd77..ebc0aa4e4345 100644 --- a/drivers/hid/hid-sony.c +++ b/drivers/hid/hid-sony.c @@ -2155,6 +2155,8 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) return ret; err: + usb_free_urb(sc->ghl_urb); + hid_hw_stop(hdev); return ret; } diff --git a/drivers/hid/hid-steelseries.c b/drivers/hid/hid-steelseries.c index 43d2cf7153d7..b3edadf42d6d 100644 --- a/drivers/hid/hid-steelseries.c +++ b/drivers/hid/hid-steelseries.c @@ -390,7 +390,7 @@ static int steelseries_headset_arctis_1_fetch_battery(struct hid_device *hdev) ret = hid_hw_raw_request(hdev, arctis_1_battery_request[0], write_buf, sizeof(arctis_1_battery_request), HID_OUTPUT_REPORT, HID_REQ_SET_REPORT); - if (ret < sizeof(arctis_1_battery_request)) { + if (ret < (int)sizeof(arctis_1_battery_request)) { hid_err(hdev, "hid_hw_raw_request() failed with %d\n", ret); ret = -ENODATA; } diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c index 9601c0605fd9..2735cd585af0 100644 --- a/drivers/hid/i2c-hid/i2c-hid-core.c +++ b/drivers/hid/i2c-hid/i2c-hid-core.c @@ -998,45 +998,29 @@ static int i2c_hid_core_resume(struct i2c_hid *ihid) return hid_driver_reset_resume(hid); } -/** - * __do_i2c_hid_core_initial_power_up() - First time power up of the i2c-hid device. - * @ihid: The ihid object created during probe. - * - * This function is called at probe time. - * - * The initial power on is where we do some basic validation that the device - * exists, where we fetch the HID descriptor, and where we create the actual - * HID devices. - * - * Return: 0 or error code. +/* + * Check that the device exists and parse the HID descriptor. */ -static int __do_i2c_hid_core_initial_power_up(struct i2c_hid *ihid) +static int __i2c_hid_core_probe(struct i2c_hid *ihid) { struct i2c_client *client = ihid->client; struct hid_device *hid = ihid->hid; int ret; - ret = i2c_hid_core_power_up(ihid); - if (ret) - return ret; - /* Make sure there is something at this address */ ret = i2c_smbus_read_byte(client); if (ret < 0) { i2c_hid_dbg(ihid, "nothing at this address: %d\n", ret); - ret = -ENXIO; - goto err; + return -ENXIO; } ret = i2c_hid_fetch_hid_descriptor(ihid); if (ret < 0) { dev_err(&client->dev, "Failed to fetch the HID Descriptor\n"); - goto err; + return ret; } - enable_irq(client->irq); - hid->version = le16_to_cpu(ihid->hdesc.bcdVersion); hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID); hid->product = le16_to_cpu(ihid->hdesc.wProductID); @@ -1050,17 +1034,49 @@ static int __do_i2c_hid_core_initial_power_up(struct i2c_hid *ihid) ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product); + return 0; +} + +static int i2c_hid_core_register_hid(struct i2c_hid *ihid) +{ + struct i2c_client *client = ihid->client; + struct hid_device *hid = ihid->hid; + int ret; + + enable_irq(client->irq); + ret = hid_add_device(hid); if (ret) { if (ret != -ENODEV) hid_err(client, "can't add hid device: %d\n", ret); - goto err; + disable_irq(client->irq); + return ret; } return 0; +} + +static int i2c_hid_core_probe_panel_follower(struct i2c_hid *ihid) +{ + int ret; + + ret = i2c_hid_core_power_up(ihid); + if (ret) + return ret; -err: + ret = __i2c_hid_core_probe(ihid); + if (ret) + goto err_power_down; + + ret = i2c_hid_core_register_hid(ihid); + if (ret) + goto err_power_down; + + return 0; + +err_power_down: i2c_hid_core_power_down(ihid); + return ret; } @@ -1077,7 +1093,7 @@ static void ihid_core_panel_prepare_work(struct work_struct *work) * steps. */ if (!hid->version) - ret = __do_i2c_hid_core_initial_power_up(ihid); + ret = i2c_hid_core_probe_panel_follower(ihid); else ret = i2c_hid_core_resume(ihid); @@ -1136,7 +1152,6 @@ static int i2c_hid_core_register_panel_follower(struct i2c_hid *ihid) struct device *dev = &ihid->client->dev; int ret; - ihid->is_panel_follower = true; ihid->panel_follower.funcs = &i2c_hid_core_panel_follower_funcs; /* @@ -1156,30 +1171,6 @@ static int i2c_hid_core_register_panel_follower(struct i2c_hid *ihid) return 0; } -static int i2c_hid_core_initial_power_up(struct i2c_hid *ihid) -{ - /* - * If we're a panel follower, we'll register and do our initial power - * up when the panel turns on; otherwise we do it right away. - */ - if (drm_is_panel_follower(&ihid->client->dev)) - return i2c_hid_core_register_panel_follower(ihid); - else - return __do_i2c_hid_core_initial_power_up(ihid); -} - -static void i2c_hid_core_final_power_down(struct i2c_hid *ihid) -{ - /* - * If we're a follower, the act of unfollowing will cause us to be - * powered down. Otherwise we need to manually do it. - */ - if (ihid->is_panel_follower) - drm_panel_remove_follower(&ihid->panel_follower); - else - i2c_hid_core_suspend(ihid, true); -} - int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops, u16 hid_descriptor_address, u32 quirks) { @@ -1211,6 +1202,7 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops, ihid->ops = ops; ihid->client = client; ihid->wHIDDescRegister = cpu_to_le16(hid_descriptor_address); + ihid->is_panel_follower = drm_is_panel_follower(&client->dev); init_waitqueue_head(&ihid->wait); mutex_init(&ihid->reset_lock); @@ -1224,14 +1216,10 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops, return ret; device_enable_async_suspend(&client->dev); - ret = i2c_hid_init_irq(client); - if (ret < 0) - goto err_buffers_allocated; - hid = hid_allocate_device(); if (IS_ERR(hid)) { ret = PTR_ERR(hid); - goto err_irq; + goto err_free_buffers; } ihid->hid = hid; @@ -1242,19 +1230,42 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops, hid->bus = BUS_I2C; hid->initial_quirks = quirks; - ret = i2c_hid_core_initial_power_up(ihid); + /* Power on and probe unless device is a panel follower. */ + if (!ihid->is_panel_follower) { + ret = i2c_hid_core_power_up(ihid); + if (ret < 0) + goto err_destroy_device; + + ret = __i2c_hid_core_probe(ihid); + if (ret < 0) + goto err_power_down; + } + + ret = i2c_hid_init_irq(client); + if (ret < 0) + goto err_power_down; + + /* + * If we're a panel follower, we'll register when the panel turns on; + * otherwise we do it right away. + */ + if (ihid->is_panel_follower) + ret = i2c_hid_core_register_panel_follower(ihid); + else + ret = i2c_hid_core_register_hid(ihid); if (ret) - goto err_mem_free; + goto err_free_irq; return 0; -err_mem_free: - hid_destroy_device(hid); - -err_irq: +err_free_irq: free_irq(client->irq, ihid); - -err_buffers_allocated: +err_power_down: + if (!ihid->is_panel_follower) + i2c_hid_core_power_down(ihid); +err_destroy_device: + hid_destroy_device(hid); +err_free_buffers: i2c_hid_free_buffers(ihid); return ret; @@ -1266,7 +1277,14 @@ void i2c_hid_core_remove(struct i2c_client *client) struct i2c_hid *ihid = i2c_get_clientdata(client); struct hid_device *hid; - i2c_hid_core_final_power_down(ihid); + /* + * If we're a follower, the act of unfollowing will cause us to be + * powered down. Otherwise we need to manually do it. + */ + if (ihid->is_panel_follower) + drm_panel_remove_follower(&ihid->panel_follower); + else + i2c_hid_core_suspend(ihid, true); hid = ihid->hid; hid_destroy_device(hid); diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c index 55cb25038e63..710fda5f19e1 100644 --- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c +++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c @@ -133,6 +133,14 @@ static int enable_gpe(struct device *dev) } wakeup = &adev->wakeup; + /* + * Call acpi_disable_gpe(), so that reference count + * gpe_event_info->runtime_count doesn't overflow. + * When gpe_event_info->runtime_count = 0, the call + * to acpi_disable_gpe() simply return. + */ + acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number); + acpi_sts = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number); if (ACPI_FAILURE(acpi_sts)) { dev_err(dev, "enable ose_gpe failed\n"); diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c index 495a8b5f6a2b..ae4bae63ad4f 100644 --- a/drivers/i2c/busses/i2c-npcm7xx.c +++ b/drivers/i2c/busses/i2c-npcm7xx.c @@ -694,6 +694,7 @@ static void npcm_i2c_callback(struct npcm_i2c *bus, { struct i2c_msg *msgs; int msgs_num; + bool do_complete = false; msgs = bus->msgs; msgs_num = bus->msgs_num; @@ -722,23 +723,17 @@ static void npcm_i2c_callback(struct npcm_i2c *bus, msgs[1].flags & I2C_M_RD) msgs[1].len = info; } - if (completion_done(&bus->cmd_complete) == false) - complete(&bus->cmd_complete); - break; - + do_complete = true; + break; case I2C_NACK_IND: /* MASTER transmit got a NACK before tx all bytes */ bus->cmd_err = -ENXIO; - if (bus->master_or_slave == I2C_MASTER) - complete(&bus->cmd_complete); - + do_complete = true; break; case I2C_BUS_ERR_IND: /* Bus error */ bus->cmd_err = -EAGAIN; - if (bus->master_or_slave == I2C_MASTER) - complete(&bus->cmd_complete); - + do_complete = true; break; case I2C_WAKE_UP_IND: /* I2C wake up */ @@ -752,6 +747,8 @@ static void npcm_i2c_callback(struct npcm_i2c *bus, if (bus->slave) bus->master_or_slave = I2C_SLAVE; #endif + if (do_complete) + complete(&bus->cmd_complete); } static u8 npcm_i2c_fifo_usage(struct npcm_i2c *bus) diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c index 313904be5f3b..57ff09f18c37 100644 --- a/drivers/i2c/i2c-mux.c +++ b/drivers/i2c/i2c-mux.c @@ -341,7 +341,7 @@ int i2c_mux_add_adapter(struct i2c_mux_core *muxc, priv->adap.lock_ops = &i2c_parent_lock_ops; /* Sanity check on class */ - if (i2c_mux_parent_classes(parent) & class) + if (i2c_mux_parent_classes(parent) & class & ~I2C_CLASS_DEPRECATED) dev_err(&parent->dev, "Segment %d behind mux can't share classes with ancestors\n", chan_id); diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index c343edf2f664..1e2cd7c8716e 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -4968,7 +4968,7 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, int err = 0; struct sockaddr *addr = (struct sockaddr *)&mc->addr; struct net_device *ndev = NULL; - struct ib_sa_multicast ib; + struct ib_sa_multicast ib = {}; enum ib_gid_type gid_type; bool send_only; diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c index 7b68b3ea979f..f2fb2d8a6597 100644 --- a/drivers/infiniband/core/cma_configfs.c +++ b/drivers/infiniband/core/cma_configfs.c @@ -217,7 +217,7 @@ static int make_cma_ports(struct cma_dev_group *cma_dev_group, return -ENOMEM; for (i = 0; i < ports_num; i++) { - char port_str[10]; + char port_str[11]; ports[i].port_num = i + 1; snprintf(port_str, sizeof(port_str), "%u", i + 1); diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index d5d3e4f0de77..6d1dbc978759 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -2529,6 +2529,7 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = { }, [RDMA_NLDEV_CMD_SYS_SET] = { .doit = nldev_set_sys_set_doit, + .flags = RDMA_NL_ADMIN_PERM, }, [RDMA_NLDEV_CMD_STAT_SET] = { .doit = nldev_stat_set_doit, diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index bf800f8cb3e4..495d5a5d0373 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -546,7 +546,7 @@ static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr, if (hdr->in_words * 4 != count) return -EINVAL; - if (count < method_elm->req_size + sizeof(hdr)) { + if (count < method_elm->req_size + sizeof(*hdr)) { /* * rdma-core v18 and v19 have a bug where they send DESTROY_CQ * with a 16 byte write instead of 24. Old kernels didn't diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 0848c2c2ffcf..faa88d12ee86 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -910,6 +910,10 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) list_del(&qp->list); mutex_unlock(&rdev->qp_lock); atomic_dec(&rdev->stats.res.qp_count); + if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RC) + atomic_dec(&rdev->stats.res.rc_qp_count); + else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD) + atomic_dec(&rdev->stats.res.ud_qp_count); ib_umem_release(qp->rumem); ib_umem_release(qp->sumem); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index c8c4017fe405..e47b4ca64d33 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -665,7 +665,6 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, blocked = cookie & RCFW_CMD_IS_BLOCKING; cookie &= RCFW_MAX_COOKIE_VALUE; crsqe = &rcfw->crsqe_tbl[cookie]; - crsqe->is_in_used = false; if (WARN_ONCE(test_bit(FIRMWARE_STALL_DETECTED, &rcfw->cmdq.flags), @@ -681,8 +680,14 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, atomic_dec(&rcfw->timeout_send); if (crsqe->is_waiter_alive) { - if (crsqe->resp) + if (crsqe->resp) { memcpy(crsqe->resp, qp_event, sizeof(*qp_event)); + /* Insert write memory barrier to ensure that + * response data is copied before clearing the + * flags + */ + smp_wmb(); + } if (!blocked) wait_cmds++; } @@ -694,6 +699,8 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, if (!is_waiter_alive) crsqe->resp = NULL; + crsqe->is_in_used = false; + hwq->cons += req_size; /* This is a case to handle below scenario - diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index ced615b5ea09..040ba2224f9f 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -1965,6 +1965,9 @@ static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) int win; skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); + if (!skb) + return -ENOMEM; + req = __skb_put_zero(skb, sizeof(*req)); req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR)); req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16))); diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c index dcccb6015232..c317947563fb 100644 --- a/drivers/infiniband/hw/erdma/erdma_verbs.c +++ b/drivers/infiniband/hw/erdma/erdma_verbs.c @@ -133,8 +133,8 @@ static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp) static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr) { struct erdma_pd *pd = to_epd(mr->ibmr.pd); + u32 mtt_level = ERDMA_MR_MTT_0LEVEL; struct erdma_cmdq_reg_mr_req req; - u32 mtt_level; erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, CMDQ_OPCODE_REG_MR); @@ -147,10 +147,9 @@ static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr) req.phy_addr[0] = sg_dma_address(mr->mem.mtt->sglist); mtt_level = mr->mem.mtt->level; } - } else { + } else if (mr->type != ERDMA_MR_TYPE_DMA) { memcpy(req.phy_addr, mr->mem.mtt->buf, MTT_SIZE(mr->mem.page_cnt)); - mtt_level = ERDMA_MR_MTT_0LEVEL; } req.cfg0 = FIELD_PREP(ERDMA_CMD_MR_VALID_MASK, mr->valid) | @@ -655,7 +654,7 @@ static struct erdma_mtt *erdma_create_scatter_mtt(struct erdma_dev *dev, mtt = kzalloc(sizeof(*mtt), GFP_KERNEL); if (!mtt) - return NULL; + return ERR_PTR(-ENOMEM); mtt->size = ALIGN(size, PAGE_SIZE); mtt->buf = vzalloc(mtt->size); diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c index 24ee79aa2122..88f534cf690e 100644 --- a/drivers/infiniband/hw/mlx4/sysfs.c +++ b/drivers/infiniband/hw/mlx4/sysfs.c @@ -223,7 +223,7 @@ void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num, static int add_port_entries(struct mlx4_ib_dev *device, int port_num) { int i; - char buff[11]; + char buff[12]; struct mlx4_ib_iov_port *port = NULL; int ret = 0 ; struct ib_port_attr attr; diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c index 1e419e080b53..520034acf73a 100644 --- a/drivers/infiniband/hw/mlx5/fs.c +++ b/drivers/infiniband/hw/mlx5/fs.c @@ -2470,8 +2470,8 @@ destroy_res: mlx5_steering_anchor_destroy_res(ft_prio); put_flow_table: put_flow_table(dev, ft_prio, true); - mutex_unlock(&dev->flow_db->lock); free_obj: + mutex_unlock(&dev->flow_db->lock); kfree(obj); return err; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index aed5cdea50e6..555629b798b9 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -2084,7 +2084,7 @@ static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd) case MLX5_IB_MMAP_DEVICE_MEM: return "Device Memory"; default: - return NULL; + return "Unknown"; } } diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 3e345ef380f1..8a3762d9ff58 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -301,7 +301,8 @@ static int get_mkc_octo_size(unsigned int access_mode, unsigned int ndescs) static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc) { - set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd); + set_mkc_access_pd_addr_fields(mkc, ent->rb_key.access_flags, 0, + ent->dev->umrc.pd); MLX5_SET(mkc, mkc, free, 1); MLX5_SET(mkc, mkc, umr_en, 1); MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3); @@ -1024,19 +1025,26 @@ void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev) if (!dev->cache.wq) return; - cancel_delayed_work_sync(&dev->cache.remove_ent_dwork); mutex_lock(&dev->cache.rb_lock); for (node = rb_first(root); node; node = rb_next(node)) { ent = rb_entry(node, struct mlx5_cache_ent, node); xa_lock_irq(&ent->mkeys); ent->disabled = true; xa_unlock_irq(&ent->mkeys); - cancel_delayed_work_sync(&ent->dwork); } + mutex_unlock(&dev->cache.rb_lock); + + /* + * After all entries are disabled and will not reschedule on WQ, + * flush it and all async commands. + */ + flush_workqueue(dev->cache.wq); mlx5_mkey_cache_debugfs_cleanup(dev); mlx5_cmd_cleanup_async_ctx(&dev->async_ctx); + /* At this point all entries are disabled and have no concurrent work. */ + mutex_lock(&dev->cache.rb_lock); node = rb_first(root); while (node) { ent = rb_entry(node, struct mlx5_cache_ent, node); diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c index a2605178f4ed..43e776073f49 100644 --- a/drivers/infiniband/sw/siw/siw_cm.c +++ b/drivers/infiniband/sw/siw/siw_cm.c @@ -976,6 +976,7 @@ static void siw_accept_newconn(struct siw_cep *cep) siw_cep_put(cep); new_cep->listen_cep = NULL; if (rv) { + siw_cancel_mpatimer(new_cep); siw_cep_set_free(new_cep); goto error; } @@ -1100,9 +1101,12 @@ static void siw_cm_work_handler(struct work_struct *w) /* * Socket close before MPA request received. */ - siw_dbg_cep(cep, "no mpareq: drop listener\n"); - siw_cep_put(cep->listen_cep); - cep->listen_cep = NULL; + if (cep->listen_cep) { + siw_dbg_cep(cep, + "no mpareq: drop listener\n"); + siw_cep_put(cep->listen_cep); + cep->listen_cep = NULL; + } } } release_cep = 1; @@ -1227,7 +1231,11 @@ static void siw_cm_llp_data_ready(struct sock *sk) if (!cep) goto out; - siw_dbg_cep(cep, "state: %d\n", cep->state); + siw_dbg_cep(cep, "cep state: %d, socket state %d\n", + cep->state, sk->sk_state); + + if (sk->sk_state != TCP_ESTABLISHED) + goto out; switch (cep->state) { case SIW_EPSTATE_RDMA_MODE: diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 1574218764e0..2916e77f589b 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -2784,7 +2784,6 @@ static int srp_abort(struct scsi_cmnd *scmnd) u32 tag; u16 ch_idx; struct srp_rdma_ch *ch; - int ret; shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); @@ -2798,19 +2797,14 @@ static int srp_abort(struct scsi_cmnd *scmnd) shost_printk(KERN_ERR, target->scsi_host, "Sending SRP abort for tag %#x\n", tag); if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun, - SRP_TSK_ABORT_TASK, NULL) == 0) - ret = SUCCESS; - else if (target->rport->state == SRP_RPORT_LOST) - ret = FAST_IO_FAIL; - else - ret = FAILED; - if (ret == SUCCESS) { + SRP_TSK_ABORT_TASK, NULL) == 0) { srp_free_req(ch, req, scmnd, 0); - scmnd->result = DID_ABORT << 16; - scsi_done(scmnd); + return SUCCESS; } + if (target->rport->state == SRP_RPORT_LOST) + return FAST_IO_FAIL; - return ret; + return FAILED; } static int srp_reset_device(struct scsi_cmnd *scmnd) diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c index 2082081402d3..0b8927508427 100644 --- a/drivers/iommu/apple-dart.c +++ b/drivers/iommu/apple-dart.c @@ -671,8 +671,7 @@ static int apple_dart_attach_dev(struct iommu_domain *domain, return ret; switch (domain->type) { - case IOMMU_DOMAIN_DMA: - case IOMMU_DOMAIN_UNMANAGED: + default: ret = apple_dart_domain_add_streams(dart_domain, cfg); if (ret) return ret; diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c index 4d83edc2be99..8a16cd3ef487 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c @@ -186,6 +186,15 @@ static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd) } } +/* + * Cloned from the MAX_TLBI_OPS in arch/arm64/include/asm/tlbflush.h, this + * is used as a threshold to replace per-page TLBI commands to issue in the + * command queue with an address-space TLBI command, when SMMU w/o a range + * invalidation feature handles too many per-page TLBI commands, which will + * otherwise result in a soft lockup. + */ +#define CMDQ_MAX_TLBI_OPS (1 << (PAGE_SHIFT - 3)) + static void arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, @@ -201,8 +210,13 @@ static void arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn, * range. So do a simple translation here by calculating size correctly. */ size = end - start; - if (size == ULONG_MAX) - size = 0; + if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_RANGE_INV)) { + if (size >= CMDQ_MAX_TLBI_OPS * PAGE_SIZE) + size = 0; + } else { + if (size == ULONG_MAX) + size = 0; + } if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM)) { if (!size) diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index e82bf1c449a3..bd0a596f9863 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -1895,18 +1895,23 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd, /* Get the leaf page size */ tg = __ffs(smmu_domain->domain.pgsize_bitmap); + num_pages = size >> tg; + /* Convert page size of 12,14,16 (log2) to 1,2,3 */ cmd->tlbi.tg = (tg - 10) / 2; /* - * Determine what level the granule is at. For non-leaf, io-pgtable - * assumes .tlb_flush_walk can invalidate multiple levels at once, - * so ignore the nominal last-level granule and leave TTL=0. + * Determine what level the granule is at. For non-leaf, both + * io-pgtable and SVA pass a nominal last-level granule because + * they don't know what level(s) actually apply, so ignore that + * and leave TTL=0. However for various errata reasons we still + * want to use a range command, so avoid the SVA corner case + * where both scale and num could be 0 as well. */ if (cmd->tlbi.leaf) cmd->tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3)); - - num_pages = size >> tg; + else if ((num_pages & CMDQ_TLBI_RANGE_NUM_MAX) == 1) + num_pages++; } cmds.num = 0; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 5db283c17e0d..3685ba90ec88 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -2998,13 +2998,6 @@ static int iommu_suspend(void) struct intel_iommu *iommu = NULL; unsigned long flag; - for_each_active_iommu(iommu, drhd) { - iommu->iommu_state = kcalloc(MAX_SR_DMAR_REGS, sizeof(u32), - GFP_KERNEL); - if (!iommu->iommu_state) - goto nomem; - } - iommu_flush_all(); for_each_active_iommu(iommu, drhd) { @@ -3024,12 +3017,6 @@ static int iommu_suspend(void) raw_spin_unlock_irqrestore(&iommu->register_lock, flag); } return 0; - -nomem: - for_each_active_iommu(iommu, drhd) - kfree(iommu->iommu_state); - - return -ENOMEM; } static void iommu_resume(void) @@ -3061,9 +3048,6 @@ static void iommu_resume(void) raw_spin_unlock_irqrestore(&iommu->register_lock, flag); } - - for_each_active_iommu(iommu, drhd) - kfree(iommu->iommu_state); } static struct syscore_ops iommu_syscore_ops = { diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h index c18fb699c87a..7dac94f62b4e 100644 --- a/drivers/iommu/intel/iommu.h +++ b/drivers/iommu/intel/iommu.h @@ -681,7 +681,7 @@ struct intel_iommu { struct iopf_queue *iopf_queue; unsigned char iopfq_name[16]; struct q_inval *qi; /* Queued invalidation info */ - u32 *iommu_state; /* Store iommu states between suspend and resume.*/ + u32 iommu_state[MAX_SR_DMAR_REGS]; /* Store iommu states between suspend and resume.*/ #ifdef CONFIG_IRQ_REMAP struct ir_table *ir_table; /* Interrupt remapping info */ diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 640275873a27..fab6c347ce57 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -262,7 +262,7 @@ struct mtk_iommu_data { struct device *smicomm_dev; struct mtk_iommu_bank_data *bank; - struct mtk_iommu_domain *share_dom; /* For 2 HWs share pgtable */ + struct mtk_iommu_domain *share_dom; struct regmap *pericfg; struct mutex mutex; /* Protect m4u_group/m4u_dom above */ @@ -643,8 +643,8 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom, struct mtk_iommu_domain *share_dom = data->share_dom; const struct mtk_iommu_iova_region *region; - /* Always use share domain in sharing pgtable case */ - if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE) && share_dom) { + /* Share pgtable when 2 MM IOMMU share the pgtable or one IOMMU use multiple iova ranges */ + if (share_dom) { dom->iop = share_dom->iop; dom->cfg = share_dom->cfg; dom->domain.pgsize_bitmap = share_dom->cfg.pgsize_bitmap; @@ -677,8 +677,7 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom, /* Update our support page sizes bitmap */ dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap; - if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE)) - data->share_dom = dom; + data->share_dom = dom; update_iova_region: /* Update the iova region for this domain */ diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h index 3db4592cda1c..f407cce9ecaa 100644 --- a/drivers/irqchip/irq-gic-common.h +++ b/drivers/irqchip/irq-gic-common.h @@ -29,4 +29,8 @@ void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks, void gic_enable_of_quirks(const struct device_node *np, const struct gic_quirk *quirks, void *data); +#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) +#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1) +#define RDIST_FLAGS_FORCE_NON_SHAREABLE (1 << 2) + #endif /* _IRQ_GIC_COMMON_H */ diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index e0c2b10d154d..75a2dd550625 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -44,10 +44,6 @@ #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) #define ITS_FLAGS_FORCE_NON_SHAREABLE (1ULL << 3) -#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) -#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1) -#define RDIST_FLAGS_FORCE_NON_SHAREABLE (1 << 2) - #define RD_LOCAL_LPI_ENABLED BIT(0) #define RD_LOCAL_PENDTABLE_PREALLOCATED BIT(1) #define RD_LOCAL_MEMRESERVE_DONE BIT(2) @@ -4754,6 +4750,14 @@ static bool __maybe_unused its_enable_rk3588001(void *data) return true; } +static bool its_set_non_coherent(void *data) +{ + struct its_node *its = data; + + its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE; + return true; +} + static const struct gic_quirk its_quirks[] = { #ifdef CONFIG_CAVIUM_ERRATUM_22375 { @@ -4809,6 +4813,11 @@ static const struct gic_quirk its_quirks[] = { }, #endif { + .desc = "ITS: non-coherent attribute", + .property = "dma-noncoherent", + .init = its_set_non_coherent, + }, + { } }; @@ -4817,6 +4826,10 @@ static void its_enable_quirks(struct its_node *its) u32 iidr = readl_relaxed(its->base + GITS_IIDR); gic_enable_quirks(iidr, its_quirks, its); + + if (is_of_node(its->fwnode_handle)) + gic_enable_of_quirks(to_of_node(its->fwnode_handle), + its_quirks, its); } static int its_save_disable(void) @@ -4952,7 +4965,7 @@ out_unmap: return NULL; } -static int its_init_domain(struct fwnode_handle *handle, struct its_node *its) +static int its_init_domain(struct its_node *its) { struct irq_domain *inner_domain; struct msi_domain_info *info; @@ -4966,7 +4979,7 @@ static int its_init_domain(struct fwnode_handle *handle, struct its_node *its) inner_domain = irq_domain_create_hierarchy(its_parent, its->msi_domain_flags, 0, - handle, &its_domain_ops, + its->fwnode_handle, &its_domain_ops, info); if (!inner_domain) { kfree(info); @@ -5017,8 +5030,7 @@ static int its_init_vpe_domain(void) return 0; } -static int __init its_compute_its_list_map(struct resource *res, - void __iomem *its_base) +static int __init its_compute_its_list_map(struct its_node *its) { int its_number; u32 ctlr; @@ -5032,15 +5044,15 @@ static int __init its_compute_its_list_map(struct resource *res, its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX); if (its_number >= GICv4_ITS_LIST_MAX) { pr_err("ITS@%pa: No ITSList entry available!\n", - &res->start); + &its->phys_base); return -EINVAL; } - ctlr = readl_relaxed(its_base + GITS_CTLR); + ctlr = readl_relaxed(its->base + GITS_CTLR); ctlr &= ~GITS_CTLR_ITS_NUMBER; ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT; - writel_relaxed(ctlr, its_base + GITS_CTLR); - ctlr = readl_relaxed(its_base + GITS_CTLR); + writel_relaxed(ctlr, its->base + GITS_CTLR); + ctlr = readl_relaxed(its->base + GITS_CTLR); if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) { its_number = ctlr & GITS_CTLR_ITS_NUMBER; its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT; @@ -5048,75 +5060,50 @@ static int __init its_compute_its_list_map(struct resource *res, if (test_and_set_bit(its_number, &its_list_map)) { pr_err("ITS@%pa: Duplicate ITSList entry %d\n", - &res->start, its_number); + &its->phys_base, its_number); return -EINVAL; } return its_number; } -static int __init its_probe_one(struct resource *res, - struct fwnode_handle *handle, int numa_node) +static int __init its_probe_one(struct its_node *its) { - struct its_node *its; - void __iomem *its_base; - u64 baser, tmp, typer; + u64 baser, tmp; struct page *page; u32 ctlr; int err; - its_base = its_map_one(res, &err); - if (!its_base) - return err; - - pr_info("ITS %pR\n", res); - - its = kzalloc(sizeof(*its), GFP_KERNEL); - if (!its) { - err = -ENOMEM; - goto out_unmap; - } - - raw_spin_lock_init(&its->lock); - mutex_init(&its->dev_alloc_lock); - INIT_LIST_HEAD(&its->entry); - INIT_LIST_HEAD(&its->its_device_list); - typer = gic_read_typer(its_base + GITS_TYPER); - its->typer = typer; - its->base = its_base; - its->phys_base = res->start; if (is_v4(its)) { - if (!(typer & GITS_TYPER_VMOVP)) { - err = its_compute_its_list_map(res, its_base); + if (!(its->typer & GITS_TYPER_VMOVP)) { + err = its_compute_its_list_map(its); if (err < 0) - goto out_free_its; + goto out; its->list_nr = err; pr_info("ITS@%pa: Using ITS number %d\n", - &res->start, err); + &its->phys_base, err); } else { - pr_info("ITS@%pa: Single VMOVP capable\n", &res->start); + pr_info("ITS@%pa: Single VMOVP capable\n", &its->phys_base); } if (is_v4_1(its)) { - u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer); + u32 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer); - its->sgir_base = ioremap(res->start + SZ_128K, SZ_64K); + its->sgir_base = ioremap(its->phys_base + SZ_128K, SZ_64K); if (!its->sgir_base) { err = -ENOMEM; - goto out_free_its; + goto out; } - its->mpidr = readl_relaxed(its_base + GITS_MPIDR); + its->mpidr = readl_relaxed(its->base + GITS_MPIDR); pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n", - &res->start, its->mpidr, svpet); + &its->phys_base, its->mpidr, svpet); } } - its->numa_node = numa_node; - page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, get_order(ITS_CMD_QUEUE_SZ)); if (!page) { @@ -5125,12 +5112,9 @@ static int __init its_probe_one(struct resource *res, } its->cmd_base = (void *)page_address(page); its->cmd_write = its->cmd_base; - its->fwnode_handle = handle; its->get_msi_base = its_irq_get_msi_base; its->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI; - its_enable_quirks(its); - err = its_alloc_tables(its); if (err) goto out_free_cmd; @@ -5174,7 +5158,7 @@ static int __init its_probe_one(struct resource *res, ctlr |= GITS_CTLR_ImDe; writel_relaxed(ctlr, its->base + GITS_CTLR); - err = its_init_domain(handle, its); + err = its_init_domain(its); if (err) goto out_free_tables; @@ -5191,11 +5175,8 @@ out_free_cmd: out_unmap_sgir: if (its->sgir_base) iounmap(its->sgir_base); -out_free_its: - kfree(its); -out_unmap: - iounmap(its_base); - pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err); +out: + pr_err("ITS@%pa: failed probing (%d)\n", &its->phys_base, err); return err; } @@ -5356,10 +5337,53 @@ static const struct of_device_id its_device_id[] = { {}, }; +static struct its_node __init *its_node_init(struct resource *res, + struct fwnode_handle *handle, int numa_node) +{ + void __iomem *its_base; + struct its_node *its; + int err; + + its_base = its_map_one(res, &err); + if (!its_base) + return NULL; + + pr_info("ITS %pR\n", res); + + its = kzalloc(sizeof(*its), GFP_KERNEL); + if (!its) + goto out_unmap; + + raw_spin_lock_init(&its->lock); + mutex_init(&its->dev_alloc_lock); + INIT_LIST_HEAD(&its->entry); + INIT_LIST_HEAD(&its->its_device_list); + + its->typer = gic_read_typer(its_base + GITS_TYPER); + its->base = its_base; + its->phys_base = res->start; + + its->numa_node = numa_node; + its->fwnode_handle = handle; + + return its; + +out_unmap: + iounmap(its_base); + return NULL; +} + +static void its_node_destroy(struct its_node *its) +{ + iounmap(its->base); + kfree(its); +} + static int __init its_of_probe(struct device_node *node) { struct device_node *np; struct resource res; + int err; /* * Make sure *all* the ITS are reset before we probe any, as @@ -5369,8 +5393,6 @@ static int __init its_of_probe(struct device_node *node) */ for (np = of_find_matching_node(node, its_device_id); np; np = of_find_matching_node(np, its_device_id)) { - int err; - if (!of_device_is_available(np) || !of_property_read_bool(np, "msi-controller") || of_address_to_resource(np, 0, &res)) @@ -5383,6 +5405,8 @@ static int __init its_of_probe(struct device_node *node) for (np = of_find_matching_node(node, its_device_id); np; np = of_find_matching_node(np, its_device_id)) { + struct its_node *its; + if (!of_device_is_available(np)) continue; if (!of_property_read_bool(np, "msi-controller")) { @@ -5396,7 +5420,17 @@ static int __init its_of_probe(struct device_node *node) continue; } - its_probe_one(&res, &np->fwnode, of_node_to_nid(np)); + + its = its_node_init(&res, &np->fwnode, of_node_to_nid(np)); + if (!its) + return -ENOMEM; + + its_enable_quirks(its); + err = its_probe_one(its); + if (err) { + its_node_destroy(its); + return err; + } } return 0; } @@ -5508,6 +5542,7 @@ static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header, { struct acpi_madt_generic_translator *its_entry; struct fwnode_handle *dom_handle; + struct its_node *its; struct resource res; int err; @@ -5532,11 +5567,18 @@ static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header, goto dom_err; } - err = its_probe_one(&res, dom_handle, - acpi_get_its_numa_node(its_entry->translation_id)); + its = its_node_init(&res, dom_handle, + acpi_get_its_numa_node(its_entry->translation_id)); + if (!its) { + err = -ENOMEM; + goto node_err; + } + + err = its_probe_one(its); if (!err) return 0; +node_err: iort_deregister_domain_token(its_entry->translation_id); dom_err: irq_domain_free_fwnode(dom_handle); diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index eedfa8e9f077..f59ac9586b7b 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -1857,6 +1857,14 @@ static bool gic_enable_quirk_arm64_2941627(void *data) return true; } +static bool rd_set_non_coherent(void *data) +{ + struct gic_chip_data *d = data; + + d->rdists.flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE; + return true; +} + static const struct gic_quirk gic_quirks[] = { { .desc = "GICv3: Qualcomm MSM8996 broken firmware", @@ -1924,6 +1932,11 @@ static const struct gic_quirk gic_quirks[] = { .init = gic_enable_quirk_arm64_2941627, }, { + .desc = "GICv3: non-coherent attribute", + .property = "dma-noncoherent", + .init = rd_set_non_coherent, + }, + { } }; diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c index 4bbfa2b0a4df..96f4e322ed6b 100644 --- a/drivers/irqchip/irq-renesas-rzg2l.c +++ b/drivers/irqchip/irq-renesas-rzg2l.c @@ -118,7 +118,7 @@ static void rzg2l_irqc_irq_disable(struct irq_data *d) raw_spin_lock(&priv->lock); reg = readl_relaxed(priv->base + TSSR(tssr_index)); - reg &= ~(TSSEL_MASK << tssr_offset); + reg &= ~(TSSEL_MASK << TSSEL_SHIFT(tssr_offset)); writel_relaxed(reg, priv->base + TSSR(tssr_index)); raw_spin_unlock(&priv->lock); } @@ -130,8 +130,8 @@ static void rzg2l_irqc_irq_enable(struct irq_data *d) unsigned int hw_irq = irqd_to_hwirq(d); if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) { + unsigned long tint = (uintptr_t)irq_data_get_irq_chip_data(d); struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); - unsigned long tint = (uintptr_t)d->chip_data; u32 offset = hw_irq - IRQC_TINT_START; u32 tssr_offset = TSSR_OFFSET(offset); u8 tssr_index = TSSR_INDEX(offset); diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c index 4adeee1bc391..e8d01b14ccdd 100644 --- a/drivers/irqchip/irq-riscv-intc.c +++ b/drivers/irqchip/irq-riscv-intc.c @@ -155,8 +155,16 @@ static int __init riscv_intc_init(struct device_node *node, * for each INTC DT node. We only need to do INTC initialization * for the INTC DT node belonging to boot CPU (or boot HART). */ - if (riscv_hartid_to_cpuid(hartid) != smp_processor_id()) + if (riscv_hartid_to_cpuid(hartid) != smp_processor_id()) { + /* + * The INTC nodes of each CPU are suppliers for downstream + * interrupt controllers (such as PLIC, IMSIC and APLIC + * direct-mode) so we should mark an INTC node as initialized + * if we are not creating IRQ domain for it. + */ + fwnode_dev_initialized(of_fwnode_handle(node), true); return 0; + } return riscv_intc_init_common(of_node_to_fwnode(node)); } diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c index d8ba5fba7450..971240e2e31b 100644 --- a/drivers/irqchip/irq-stm32-exti.c +++ b/drivers/irqchip/irq-stm32-exti.c @@ -460,6 +460,7 @@ static const struct irq_domain_ops irq_exti_domain_ops = { .map = irq_map_generic_chip, .alloc = stm32_exti_alloc, .free = stm32_exti_free, + .xlate = irq_domain_xlate_twocell, }; static void stm32_irq_ack(struct irq_data *d) diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c index 8c581c985aa7..7f314e58f3ce 100644 --- a/drivers/irqchip/irq-xtensa-mx.c +++ b/drivers/irqchip/irq-xtensa-mx.c @@ -12,6 +12,7 @@ #include <linux/irqdomain.h> #include <linux/irq.h> #include <linux/irqchip.h> +#include <linux/irqchip/xtensa-mx.h> #include <linux/of.h> #include <asm/mxregs.h> diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c index a32c0d28d038..74b2f124116e 100644 --- a/drivers/irqchip/qcom-pdc.c +++ b/drivers/irqchip/qcom-pdc.c @@ -22,9 +22,20 @@ #define PDC_MAX_GPIO_IRQS 256 +/* Valid only on HW version < 3.2 */ #define IRQ_ENABLE_BANK 0x10 #define IRQ_i_CFG 0x110 +/* Valid only on HW version >= 3.2 */ +#define IRQ_i_CFG_IRQ_ENABLE 3 + +#define IRQ_i_CFG_TYPE_MASK GENMASK(2, 0) + +#define PDC_VERSION_REG 0x1000 + +/* Notable PDC versions */ +#define PDC_VERSION_3_2 0x30200 + struct pdc_pin_region { u32 pin_base; u32 parent_base; @@ -37,6 +48,7 @@ static DEFINE_RAW_SPINLOCK(pdc_lock); static void __iomem *pdc_base; static struct pdc_pin_region *pdc_region; static int pdc_region_cnt; +static unsigned int pdc_version; static void pdc_reg_write(int reg, u32 i, u32 val) { @@ -48,20 +60,32 @@ static u32 pdc_reg_read(int reg, u32 i) return readl_relaxed(pdc_base + reg + i * sizeof(u32)); } -static void pdc_enable_intr(struct irq_data *d, bool on) +static void __pdc_enable_intr(int pin_out, bool on) { - int pin_out = d->hwirq; unsigned long enable; - unsigned long flags; - u32 index, mask; - index = pin_out / 32; - mask = pin_out % 32; + if (pdc_version < PDC_VERSION_3_2) { + u32 index, mask; + + index = pin_out / 32; + mask = pin_out % 32; + + enable = pdc_reg_read(IRQ_ENABLE_BANK, index); + __assign_bit(mask, &enable, on); + pdc_reg_write(IRQ_ENABLE_BANK, index, enable); + } else { + enable = pdc_reg_read(IRQ_i_CFG, pin_out); + __assign_bit(IRQ_i_CFG_IRQ_ENABLE, &enable, on); + pdc_reg_write(IRQ_i_CFG, pin_out, enable); + } +} + +static void pdc_enable_intr(struct irq_data *d, bool on) +{ + unsigned long flags; raw_spin_lock_irqsave(&pdc_lock, flags); - enable = pdc_reg_read(IRQ_ENABLE_BANK, index); - __assign_bit(mask, &enable, on); - pdc_reg_write(IRQ_ENABLE_BANK, index, enable); + __pdc_enable_intr(d->hwirq, on); raw_spin_unlock_irqrestore(&pdc_lock, flags); } @@ -142,6 +166,7 @@ static int qcom_pdc_gic_set_type(struct irq_data *d, unsigned int type) } old_pdc_type = pdc_reg_read(IRQ_i_CFG, d->hwirq); + pdc_type |= (old_pdc_type & ~IRQ_i_CFG_TYPE_MASK); pdc_reg_write(IRQ_i_CFG, d->hwirq, pdc_type); ret = irq_chip_set_type_parent(d, type); @@ -246,7 +271,6 @@ static const struct irq_domain_ops qcom_pdc_ops = { static int pdc_setup_pin_mapping(struct device_node *np) { int ret, n, i; - u32 irq_index, reg_index, val; n = of_property_count_elems_of_size(np, "qcom,pdc-ranges", sizeof(u32)); if (n <= 0 || n % 3) @@ -276,29 +300,38 @@ static int pdc_setup_pin_mapping(struct device_node *np) if (ret) return ret; - for (i = 0; i < pdc_region[n].cnt; i++) { - reg_index = (i + pdc_region[n].pin_base) >> 5; - irq_index = (i + pdc_region[n].pin_base) & 0x1f; - val = pdc_reg_read(IRQ_ENABLE_BANK, reg_index); - val &= ~BIT(irq_index); - pdc_reg_write(IRQ_ENABLE_BANK, reg_index, val); - } + for (i = 0; i < pdc_region[n].cnt; i++) + __pdc_enable_intr(i + pdc_region[n].pin_base, 0); } return 0; } +#define QCOM_PDC_SIZE 0x30000 + static int qcom_pdc_init(struct device_node *node, struct device_node *parent) { struct irq_domain *parent_domain, *pdc_domain; + resource_size_t res_size; + struct resource res; int ret; - pdc_base = of_iomap(node, 0); + /* compat with old sm8150 DT which had very small region for PDC */ + if (of_address_to_resource(node, 0, &res)) + return -EINVAL; + + res_size = max_t(resource_size_t, resource_size(&res), QCOM_PDC_SIZE); + if (res_size > resource_size(&res)) + pr_warn("%pOF: invalid reg size, please fix DT\n", node); + + pdc_base = ioremap(res.start, res_size); if (!pdc_base) { pr_err("%pOF: unable to map PDC registers\n", node); return -ENXIO; } + pdc_version = pdc_reg_read(PDC_VERSION_REG, 0); + parent_domain = irq_find_host(parent); if (!parent_domain) { pr_err("%pOF: unable to find PDC's parent domain\n", node); diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c index 04f9ea675f2c..214ed81eb0e9 100644 --- a/drivers/leds/led-core.c +++ b/drivers/leds/led-core.c @@ -479,10 +479,6 @@ int led_compose_name(struct device *dev, struct led_init_data *init_data, led_parse_fwnode_props(dev, fwnode, &props); - /* We want to label LEDs that can produce full range of colors - * as RGB, not multicolor */ - BUG_ON(props.color == LED_COLOR_ID_MULTI); - if (props.label) { /* * If init_data.devicename is NULL, then it indicates that diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index f2662c21a6df..5315fd261c23 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -753,7 +753,8 @@ static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv, int err; u8 *buf; - reqsize = ALIGN(crypto_skcipher_reqsize(tfm), __alignof__(__le64)); + reqsize = sizeof(*req) + crypto_skcipher_reqsize(tfm); + reqsize = ALIGN(reqsize, __alignof__(__le64)); req = kmalloc(reqsize + cc->iv_size, GFP_NOIO); if (!req) diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index ad8e670a2f9b..b487f7acc860 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -748,17 +748,16 @@ err: /* * Cleanup zoned device information. */ -static void dmz_put_zoned_device(struct dm_target *ti) +static void dmz_put_zoned_devices(struct dm_target *ti) { struct dmz_target *dmz = ti->private; int i; - for (i = 0; i < dmz->nr_ddevs; i++) { - if (dmz->ddev[i]) { + for (i = 0; i < dmz->nr_ddevs; i++) + if (dmz->ddev[i]) dm_put_device(ti, dmz->ddev[i]); - dmz->ddev[i] = NULL; - } - } + + kfree(dmz->ddev); } static int dmz_fixup_devices(struct dm_target *ti) @@ -948,7 +947,7 @@ err_bio: err_meta: dmz_dtr_metadata(dmz->metadata); err_dev: - dmz_put_zoned_device(ti); + dmz_put_zoned_devices(ti); err: kfree(dmz->dev); kfree(dmz); @@ -978,7 +977,7 @@ static void dmz_dtr(struct dm_target *ti) bioset_exit(&dmz->bio_set); - dmz_put_zoned_device(ti); + dmz_put_zoned_devices(ti); mutex_destroy(&dmz->chunk_lock); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 4cb9c608ee19..284cd71bcc68 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -854,6 +854,13 @@ struct stripe_head *raid5_get_active_stripe(struct r5conf *conf, set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state); r5l_wake_reclaim(conf->log, 0); + + /* release batch_last before wait to avoid risk of deadlock */ + if (ctx && ctx->batch_last) { + raid5_release_stripe(ctx->batch_last); + ctx->batch_last = NULL; + } + wait_event_lock_irq(conf->wait_for_stripe, is_inactive_blocked(conf, hash), *(conf->hash_locks + hash)); diff --git a/drivers/media/pci/intel/Kconfig b/drivers/media/pci/intel/Kconfig index e113902fa806..ee4684159d3d 100644 --- a/drivers/media/pci/intel/Kconfig +++ b/drivers/media/pci/intel/Kconfig @@ -1,11 +1,19 @@ # SPDX-License-Identifier: GPL-2.0-only + +source "drivers/media/pci/intel/ipu3/Kconfig" +source "drivers/media/pci/intel/ivsc/Kconfig" + config IPU_BRIDGE - tristate + tristate "Intel IPU Bridge" depends on I2C && ACPI help - This is a helper module for the IPU bridge, which can be - used by ipu3 and other drivers. In order to handle module - dependencies, this is selected by each driver that needs it. + The IPU bridge is a helper library for Intel IPU drivers to + function on systems shipped with Windows. -source "drivers/media/pci/intel/ipu3/Kconfig" -source "drivers/media/pci/intel/ivsc/Kconfig" + Currently used by the ipu3-cio2 and atomisp drivers. + + Supported systems include: + + - Microsoft Surface models (except Surface Pro 3) + - The Lenovo Miix line (for example the 510, 520, 710 and 720) + - Dell 7285 diff --git a/drivers/media/pci/intel/ipu3/Kconfig b/drivers/media/pci/intel/ipu3/Kconfig index 0951545eab21..c0a250daa927 100644 --- a/drivers/media/pci/intel/ipu3/Kconfig +++ b/drivers/media/pci/intel/ipu3/Kconfig @@ -2,13 +2,13 @@ config VIDEO_IPU3_CIO2 tristate "Intel ipu3-cio2 driver" depends on VIDEO_DEV && PCI + depends on IPU_BRIDGE || !IPU_BRIDGE depends on ACPI || COMPILE_TEST depends on X86 select MEDIA_CONTROLLER select VIDEO_V4L2_SUBDEV_API select V4L2_FWNODE select VIDEOBUF2_DMA_SG - select IPU_BRIDGE if CIO2_BRIDGE help This is the Intel IPU3 CIO2 CSI-2 receiver unit, found in Intel @@ -18,22 +18,3 @@ config VIDEO_IPU3_CIO2 Say Y or M here if you have a Skylake/Kaby Lake SoC with MIPI CSI-2 connected camera. The module will be called ipu3-cio2. - -config CIO2_BRIDGE - bool "IPU3 CIO2 Sensors Bridge" - depends on VIDEO_IPU3_CIO2 && ACPI - depends on I2C - help - This extension provides an API for the ipu3-cio2 driver to create - connections to cameras that are hidden in the SSDB buffer in ACPI. - It can be used to enable support for cameras in detachable / hybrid - devices that ship with Windows. - - Say Y here if your device is a detachable / hybrid laptop that comes - with Windows installed by the OEM, for example: - - - Microsoft Surface models (except Surface Pro 3) - - The Lenovo Miix line (for example the 510, 520, 710 and 720) - - Dell 7285 - - If in doubt, say N here. diff --git a/drivers/media/pci/intel/ivsc/Kconfig b/drivers/media/pci/intel/ivsc/Kconfig index 212753450576..a8cb981544f7 100644 --- a/drivers/media/pci/intel/ivsc/Kconfig +++ b/drivers/media/pci/intel/ivsc/Kconfig @@ -6,7 +6,7 @@ config INTEL_VSC depends on INTEL_MEI && ACPI && VIDEO_DEV select MEDIA_CONTROLLER select VIDEO_V4L2_SUBDEV_API - select V4L2_ASYNC + select V4L2_FWNODE help This adds support for Intel Visual Sensing Controller (IVSC). diff --git a/drivers/media/platform/intel/pxa_camera.c b/drivers/media/platform/intel/pxa_camera.c index 6e6caf50e11e..59b89e421dc2 100644 --- a/drivers/media/platform/intel/pxa_camera.c +++ b/drivers/media/platform/intel/pxa_camera.c @@ -2398,7 +2398,7 @@ static int pxa_camera_probe(struct platform_device *pdev) PXA_CAM_DRV_NAME, pcdev); if (err) { dev_err(&pdev->dev, "Camera interrupt register failed\n"); - goto exit_v4l2_device_unregister; + goto exit_deactivate; } pcdev->notifier.ops = &pxa_camera_sensor_ops; diff --git a/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c b/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c index d299cc2962a5..ae6290d28f8e 100644 --- a/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c +++ b/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c @@ -138,7 +138,8 @@ int vpu_enc_init(struct venc_vpu_inst *vpu) vpu->ctx->vpu_inst = vpu; status = mtk_vcodec_fw_ipi_register(vpu->ctx->dev->fw_handler, vpu->id, - vpu_enc_ipi_handler, "venc", NULL); + vpu_enc_ipi_handler, "venc", + vpu->ctx->dev); if (status) { mtk_venc_err(vpu->ctx, "vpu_ipi_register fail %d", status); diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index aea95745c73f..90ce58fd629e 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -241,6 +241,7 @@ config MFD_CS42L43 tristate select MFD_CORE select REGMAP + select REGMAP_IRQ config MFD_CS42L43_I2C tristate "Cirrus Logic CS42L43 (I2C)" diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c index 3dae5e3a1697..cd512284bfb3 100644 --- a/drivers/misc/cardreader/rts5227.c +++ b/drivers/misc/cardreader/rts5227.c @@ -83,63 +83,20 @@ static void rts5227_fetch_vendor_settings(struct rtsx_pcr *pcr) static void rts5227_init_from_cfg(struct rtsx_pcr *pcr) { - struct pci_dev *pdev = pcr->pci; - int l1ss; - u32 lval; struct rtsx_cr_option *option = &pcr->option; - l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); - if (!l1ss) - return; - - pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval); - if (CHK_PCI_PID(pcr, 0x522A)) { - if (0 == (lval & 0x0F)) - rtsx_pci_enable_oobs_polling(pcr); - else + if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN + | PM_L1_1_EN | PM_L1_2_EN)) rtsx_pci_disable_oobs_polling(pcr); + else + rtsx_pci_enable_oobs_polling(pcr); } - if (lval & PCI_L1SS_CTL1_ASPM_L1_1) - rtsx_set_dev_flag(pcr, ASPM_L1_1_EN); - else - rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN); - - if (lval & PCI_L1SS_CTL1_ASPM_L1_2) - rtsx_set_dev_flag(pcr, ASPM_L1_2_EN); - else - rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN); - - if (lval & PCI_L1SS_CTL1_PCIPM_L1_1) - rtsx_set_dev_flag(pcr, PM_L1_1_EN); - else - rtsx_clear_dev_flag(pcr, PM_L1_1_EN); - - if (lval & PCI_L1SS_CTL1_PCIPM_L1_2) - rtsx_set_dev_flag(pcr, PM_L1_2_EN); - else - rtsx_clear_dev_flag(pcr, PM_L1_2_EN); - if (option->ltr_en) { - u16 val; - - pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val); - if (val & PCI_EXP_DEVCTL2_LTR_EN) { - option->ltr_enabled = true; - option->ltr_active = true; + if (option->ltr_enabled) rtsx_set_ltr_latency(pcr, option->ltr_active_latency); - } else { - option->ltr_enabled = false; - } } - - if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN - | PM_L1_1_EN | PM_L1_2_EN)) - option->force_clkreq_0 = false; - else - option->force_clkreq_0 = true; - } static int rts5227_extra_init_hw(struct rtsx_pcr *pcr) @@ -195,7 +152,7 @@ static int rts5227_extra_init_hw(struct rtsx_pcr *pcr) } } - if (option->force_clkreq_0 && pcr->aspm_mode == ASPM_MODE_CFG) + if (option->force_clkreq_0) rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW); else diff --git a/drivers/misc/cardreader/rts5228.c b/drivers/misc/cardreader/rts5228.c index f4ab09439da7..0c7f10bcf6f1 100644 --- a/drivers/misc/cardreader/rts5228.c +++ b/drivers/misc/cardreader/rts5228.c @@ -386,59 +386,25 @@ static void rts5228_process_ocp(struct rtsx_pcr *pcr) static void rts5228_init_from_cfg(struct rtsx_pcr *pcr) { - struct pci_dev *pdev = pcr->pci; - int l1ss; - u32 lval; struct rtsx_cr_option *option = &pcr->option; - l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); - if (!l1ss) - return; - - pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval); - - if (0 == (lval & 0x0F)) - rtsx_pci_enable_oobs_polling(pcr); - else + if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN + | PM_L1_1_EN | PM_L1_2_EN)) rtsx_pci_disable_oobs_polling(pcr); - - if (lval & PCI_L1SS_CTL1_ASPM_L1_1) - rtsx_set_dev_flag(pcr, ASPM_L1_1_EN); - else - rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN); - - if (lval & PCI_L1SS_CTL1_ASPM_L1_2) - rtsx_set_dev_flag(pcr, ASPM_L1_2_EN); - else - rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN); - - if (lval & PCI_L1SS_CTL1_PCIPM_L1_1) - rtsx_set_dev_flag(pcr, PM_L1_1_EN); else - rtsx_clear_dev_flag(pcr, PM_L1_1_EN); - - if (lval & PCI_L1SS_CTL1_PCIPM_L1_2) - rtsx_set_dev_flag(pcr, PM_L1_2_EN); - else - rtsx_clear_dev_flag(pcr, PM_L1_2_EN); + rtsx_pci_enable_oobs_polling(pcr); rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0); - if (option->ltr_en) { - u16 val; - pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val); - if (val & PCI_EXP_DEVCTL2_LTR_EN) { - option->ltr_enabled = true; - option->ltr_active = true; + if (option->ltr_en) { + if (option->ltr_enabled) rtsx_set_ltr_latency(pcr, option->ltr_active_latency); - } else { - option->ltr_enabled = false; - } } } static int rts5228_extra_init_hw(struct rtsx_pcr *pcr) { + struct rtsx_cr_option *option = &pcr->option; rtsx_pci_write_register(pcr, RTS5228_AUTOLOAD_CFG1, CD_RESUME_EN_MASK, CD_RESUME_EN_MASK); @@ -469,6 +435,17 @@ static int rts5228_extra_init_hw(struct rtsx_pcr *pcr) else rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00); + /* + * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced + * to drive low, and we forcibly request clock. + */ + if (option->force_clkreq_0) + rtsx_pci_write_register(pcr, PETXCFG, + FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW); + else + rtsx_pci_write_register(pcr, PETXCFG, + FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH); + rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB); if (pcr->rtd3_en) { diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c index 47ab72a43256..6c81040e18be 100644 --- a/drivers/misc/cardreader/rts5249.c +++ b/drivers/misc/cardreader/rts5249.c @@ -86,64 +86,22 @@ static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr) static void rts5249_init_from_cfg(struct rtsx_pcr *pcr) { - struct pci_dev *pdev = pcr->pci; - int l1ss; struct rtsx_cr_option *option = &(pcr->option); - u32 lval; - - l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); - if (!l1ss) - return; - - pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval); if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) { - if (0 == (lval & 0x0F)) - rtsx_pci_enable_oobs_polling(pcr); - else + if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN + | PM_L1_1_EN | PM_L1_2_EN)) rtsx_pci_disable_oobs_polling(pcr); + else + rtsx_pci_enable_oobs_polling(pcr); } - - if (lval & PCI_L1SS_CTL1_ASPM_L1_1) - rtsx_set_dev_flag(pcr, ASPM_L1_1_EN); - - if (lval & PCI_L1SS_CTL1_ASPM_L1_2) - rtsx_set_dev_flag(pcr, ASPM_L1_2_EN); - - if (lval & PCI_L1SS_CTL1_PCIPM_L1_1) - rtsx_set_dev_flag(pcr, PM_L1_1_EN); - - if (lval & PCI_L1SS_CTL1_PCIPM_L1_2) - rtsx_set_dev_flag(pcr, PM_L1_2_EN); - if (option->ltr_en) { - u16 val; - - pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val); - if (val & PCI_EXP_DEVCTL2_LTR_EN) { - option->ltr_enabled = true; - option->ltr_active = true; + if (option->ltr_enabled) rtsx_set_ltr_latency(pcr, option->ltr_active_latency); - } else { - option->ltr_enabled = false; - } } } -static int rts5249_init_from_hw(struct rtsx_pcr *pcr) -{ - struct rtsx_cr_option *option = &(pcr->option); - - if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN - | PM_L1_1_EN | PM_L1_2_EN)) - option->force_clkreq_0 = false; - else - option->force_clkreq_0 = true; - - return 0; -} - static void rts52xa_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime) { /* Set relink_time to 0 */ @@ -276,7 +234,6 @@ static int rts5249_extra_init_hw(struct rtsx_pcr *pcr) struct rtsx_cr_option *option = &(pcr->option); rts5249_init_from_cfg(pcr); - rts5249_init_from_hw(pcr); rtsx_pci_init_cmd(pcr); @@ -327,11 +284,12 @@ static int rts5249_extra_init_hw(struct rtsx_pcr *pcr) } } + /* * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced * to drive low, and we forcibly request clock. */ - if (option->force_clkreq_0 && pcr->aspm_mode == ASPM_MODE_CFG) + if (option->force_clkreq_0) rtsx_pci_write_register(pcr, PETXCFG, FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW); else diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c index 79b18f6f73a8..d2d3a6ccb8f7 100644 --- a/drivers/misc/cardreader/rts5260.c +++ b/drivers/misc/cardreader/rts5260.c @@ -480,47 +480,19 @@ static void rts5260_pwr_saving_setting(struct rtsx_pcr *pcr) static void rts5260_init_from_cfg(struct rtsx_pcr *pcr) { - struct pci_dev *pdev = pcr->pci; - int l1ss; struct rtsx_cr_option *option = &pcr->option; - u32 lval; - - l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); - if (!l1ss) - return; - - pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval); - - if (lval & PCI_L1SS_CTL1_ASPM_L1_1) - rtsx_set_dev_flag(pcr, ASPM_L1_1_EN); - - if (lval & PCI_L1SS_CTL1_ASPM_L1_2) - rtsx_set_dev_flag(pcr, ASPM_L1_2_EN); - - if (lval & PCI_L1SS_CTL1_PCIPM_L1_1) - rtsx_set_dev_flag(pcr, PM_L1_1_EN); - - if (lval & PCI_L1SS_CTL1_PCIPM_L1_2) - rtsx_set_dev_flag(pcr, PM_L1_2_EN); rts5260_pwr_saving_setting(pcr); if (option->ltr_en) { - u16 val; - - pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val); - if (val & PCI_EXP_DEVCTL2_LTR_EN) { - option->ltr_enabled = true; - option->ltr_active = true; + if (option->ltr_enabled) rtsx_set_ltr_latency(pcr, option->ltr_active_latency); - } else { - option->ltr_enabled = false; - } } } static int rts5260_extra_init_hw(struct rtsx_pcr *pcr) { + struct rtsx_cr_option *option = &pcr->option; /* Set mcu_cnt to 7 to ensure data can be sampled properly */ rtsx_pci_write_register(pcr, 0xFC03, 0x7F, 0x07); @@ -539,6 +511,17 @@ static int rts5260_extra_init_hw(struct rtsx_pcr *pcr) rts5260_init_hw(pcr); + /* + * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced + * to drive low, and we forcibly request clock. + */ + if (option->force_clkreq_0) + rtsx_pci_write_register(pcr, PETXCFG, + FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW); + else + rtsx_pci_write_register(pcr, PETXCFG, + FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH); + rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00); return 0; diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c index 94af6bf8a25a..67252512a132 100644 --- a/drivers/misc/cardreader/rts5261.c +++ b/drivers/misc/cardreader/rts5261.c @@ -454,54 +454,17 @@ static void rts5261_init_from_hw(struct rtsx_pcr *pcr) static void rts5261_init_from_cfg(struct rtsx_pcr *pcr) { - struct pci_dev *pdev = pcr->pci; - int l1ss; - u32 lval; struct rtsx_cr_option *option = &pcr->option; - l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); - if (!l1ss) - return; - - pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval); - - if (lval & PCI_L1SS_CTL1_ASPM_L1_1) - rtsx_set_dev_flag(pcr, ASPM_L1_1_EN); - else - rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN); - - if (lval & PCI_L1SS_CTL1_ASPM_L1_2) - rtsx_set_dev_flag(pcr, ASPM_L1_2_EN); - else - rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN); - - if (lval & PCI_L1SS_CTL1_PCIPM_L1_1) - rtsx_set_dev_flag(pcr, PM_L1_1_EN); - else - rtsx_clear_dev_flag(pcr, PM_L1_1_EN); - - if (lval & PCI_L1SS_CTL1_PCIPM_L1_2) - rtsx_set_dev_flag(pcr, PM_L1_2_EN); - else - rtsx_clear_dev_flag(pcr, PM_L1_2_EN); - - rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0); if (option->ltr_en) { - u16 val; - - pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val); - if (val & PCI_EXP_DEVCTL2_LTR_EN) { - option->ltr_enabled = true; - option->ltr_active = true; + if (option->ltr_enabled) rtsx_set_ltr_latency(pcr, option->ltr_active_latency); - } else { - option->ltr_enabled = false; - } } } static int rts5261_extra_init_hw(struct rtsx_pcr *pcr) { + struct rtsx_cr_option *option = &pcr->option; u32 val; rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG1, @@ -547,6 +510,17 @@ static int rts5261_extra_init_hw(struct rtsx_pcr *pcr) else rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00); + /* + * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced + * to drive low, and we forcibly request clock. + */ + if (option->force_clkreq_0) + rtsx_pci_write_register(pcr, PETXCFG, + FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW); + else + rtsx_pci_write_register(pcr, PETXCFG, + FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH); + rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB); if (pcr->rtd3_en) { diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c index a3f4b52bb159..a30751ad3733 100644 --- a/drivers/misc/cardreader/rtsx_pcr.c +++ b/drivers/misc/cardreader/rtsx_pcr.c @@ -1326,11 +1326,8 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) return err; } - if (pcr->aspm_mode == ASPM_MODE_REG) { + if (pcr->aspm_mode == ASPM_MODE_REG) rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30); - rtsx_pci_write_register(pcr, PETXCFG, - FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH); - } /* No CD interrupt if probing driver with card inserted. * So we need to initialize pcr->card_exist here. @@ -1345,7 +1342,9 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) static int rtsx_pci_init_chip(struct rtsx_pcr *pcr) { - int err; + struct rtsx_cr_option *option = &(pcr->option); + int err, l1ss; + u32 lval; u16 cfg_val; u8 val; @@ -1430,6 +1429,48 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr) pcr->aspm_enabled = true; } + l1ss = pci_find_ext_capability(pcr->pci, PCI_EXT_CAP_ID_L1SS); + if (l1ss) { + pci_read_config_dword(pcr->pci, l1ss + PCI_L1SS_CTL1, &lval); + + if (lval & PCI_L1SS_CTL1_ASPM_L1_1) + rtsx_set_dev_flag(pcr, ASPM_L1_1_EN); + else + rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN); + + if (lval & PCI_L1SS_CTL1_ASPM_L1_2) + rtsx_set_dev_flag(pcr, ASPM_L1_2_EN); + else + rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN); + + if (lval & PCI_L1SS_CTL1_PCIPM_L1_1) + rtsx_set_dev_flag(pcr, PM_L1_1_EN); + else + rtsx_clear_dev_flag(pcr, PM_L1_1_EN); + + if (lval & PCI_L1SS_CTL1_PCIPM_L1_2) + rtsx_set_dev_flag(pcr, PM_L1_2_EN); + else + rtsx_clear_dev_flag(pcr, PM_L1_2_EN); + + pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &cfg_val); + if (cfg_val & PCI_EXP_DEVCTL2_LTR_EN) { + option->ltr_enabled = true; + option->ltr_active = true; + } else { + option->ltr_enabled = false; + } + + if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN + | PM_L1_1_EN | PM_L1_2_EN)) + option->force_clkreq_0 = false; + else + option->force_clkreq_0 = true; + } else { + option->ltr_enabled = false; + option->force_clkreq_0 = true; + } + if (pcr->ops->fetch_vendor_settings) pcr->ops->fetch_vendor_settings(pcr); diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 8b91a55ec0d2..8ee51e49fced 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -894,6 +894,13 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, return -EINVAL; } + /* UBI cannot work on flashes with zero erasesize. */ + if (!mtd->erasesize) { + pr_err("ubi: refuse attaching mtd%d - zero erasesize flash is not supported\n", + mtd->index); + return -EINVAL; + } + if (ubi_num == UBI_DEV_NUM_AUTO) { /* Search for an empty slot in the @ubi_devices array */ for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 52a99d8bada0..ab434a77b059 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2958,14 +2958,16 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip) * from the wrong location resulting in the switch booting * to wrong mode and inoperable. */ - mv88e6xxx_g1_wait_eeprom_done(chip); + if (chip->info->ops->get_eeprom) + mv88e6xxx_g2_eeprom_wait(chip); gpiod_set_value_cansleep(gpiod, 1); usleep_range(10000, 20000); gpiod_set_value_cansleep(gpiod, 0); usleep_range(10000, 20000); - mv88e6xxx_g1_wait_eeprom_done(chip); + if (chip->info->ops->get_eeprom) + mv88e6xxx_g2_eeprom_wait(chip); } } diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c index 2fa55a643591..174c773b38c2 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.c +++ b/drivers/net/dsa/mv88e6xxx/global1.c @@ -75,37 +75,6 @@ static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip) return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1); } -void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip) -{ - const unsigned long timeout = jiffies + 1 * HZ; - u16 val; - int err; - - /* Wait up to 1 second for the switch to finish reading the - * EEPROM. - */ - while (time_before(jiffies, timeout)) { - err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val); - if (err) { - dev_err(chip->dev, "Error reading status"); - return; - } - - /* If the switch is still resetting, it may not - * respond on the bus, and so MDIO read returns - * 0xffff. Differentiate between that, and waiting for - * the EEPROM to be done by bit 0 being set. - */ - if (val != 0xffff && - val & BIT(MV88E6XXX_G1_STS_IRQ_EEPROM_DONE)) - return; - - usleep_range(1000, 2000); - } - - dev_err(chip->dev, "Timeout waiting for EEPROM done"); -} - /* Offset 0x01: Switch MAC Address Register Bytes 0 & 1 * Offset 0x02: Switch MAC Address Register Bytes 2 & 3 * Offset 0x03: Switch MAC Address Register Bytes 4 & 5 diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h index c99ddd117fe6..1095261f5b49 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.h +++ b/drivers/net/dsa/mv88e6xxx/global1.h @@ -282,7 +282,6 @@ int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr); int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip); int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip); int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip); -void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip); int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip); int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip); diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index 937a01f2ba75..b2b5f6ba438f 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -340,7 +340,7 @@ int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip) * Offset 0x15: EEPROM Addr (for 8-bit data access) */ -static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip) +int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip) { int bit = __bf_shf(MV88E6XXX_G2_EEPROM_CMD_BUSY); int err; diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h index 7e091965582b..d9434f7cae53 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.h +++ b/drivers/net/dsa/mv88e6xxx/global2.h @@ -365,6 +365,7 @@ int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip); int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target, int port); +int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip); extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops; extern const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops; diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 832a2ae01950..a8d79ee350f8 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1303,24 +1303,23 @@ static void ibmveth_rx_csum_helper(struct sk_buff *skb, * the user space for finding a flow. During this process, OVS computes * checksum on the first packet when CHECKSUM_PARTIAL flag is set. * - * So, re-compute TCP pseudo header checksum when configured for - * trunk mode. + * So, re-compute TCP pseudo header checksum. */ + if (iph_proto == IPPROTO_TCP) { struct tcphdr *tcph = (struct tcphdr *)(skb->data + iphlen); + if (tcph->check == 0x0000) { /* Recompute TCP pseudo header checksum */ - if (adapter->is_active_trunk) { - tcphdrlen = skb->len - iphlen; - if (skb_proto == ETH_P_IP) - tcph->check = - ~csum_tcpudp_magic(iph->saddr, - iph->daddr, tcphdrlen, iph_proto, 0); - else if (skb_proto == ETH_P_IPV6) - tcph->check = - ~csum_ipv6_magic(&iph6->saddr, - &iph6->daddr, tcphdrlen, iph_proto, 0); - } + tcphdrlen = skb->len - iphlen; + if (skb_proto == ETH_P_IP) + tcph->check = + ~csum_tcpudp_magic(iph->saddr, + iph->daddr, tcphdrlen, iph_proto, 0); + else if (skb_proto == ETH_P_IPV6) + tcph->check = + ~csum_ipv6_magic(&iph6->saddr, + &iph6->daddr, tcphdrlen, iph_proto, 0); /* Setup SKB fields for checksum offload */ skb_partial_csum_set(skb, iphlen, offsetof(struct tcphdr, check)); diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index b03426ac932b..db97353efd06 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -2617,12 +2617,14 @@ static int ice_vc_query_rxdid(struct ice_vf *vf) goto err; } - /* Read flexiflag registers to determine whether the - * corresponding RXDID is configured and supported or not. - * Since Legacy 16byte descriptor format is not supported, - * start from Legacy 32byte descriptor. + /* RXDIDs supported by DDP package can be read from the register + * to get the supported RXDID bitmap. But the legacy 32byte RXDID + * is not listed in DDP package, add it in the bitmap manually. + * Legacy 16byte descriptor is not supported. */ - for (i = ICE_RXDID_LEGACY_1; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) { + rxdid->supported_rxdids |= BIT(ICE_RXDID_LEGACY_1); + + for (i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) { regval = rd32(hw, GLFLXP_RXDID_FLAGS(i, 0)); if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h index ddec1627f1a7..8d0bacf4e49c 100644 --- a/drivers/net/ethernet/marvell/sky2.h +++ b/drivers/net/ethernet/marvell/sky2.h @@ -2195,7 +2195,7 @@ struct rx_ring_info { struct sk_buff *skb; dma_addr_t data_addr; DEFINE_DMA_UNMAP_LEN(data_size); - dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT]; + dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT ?: 1]; }; enum flow_control { diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 3cffd1bd3067..20afe79f380a 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -3171,8 +3171,8 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth) eth->rx_events++; if (likely(napi_schedule_prep(ð->rx_napi))) { - __napi_schedule(ð->rx_napi); mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask); + __napi_schedule(ð->rx_napi); } return IRQ_HANDLED; @@ -3184,8 +3184,8 @@ static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth) eth->tx_events++; if (likely(napi_schedule_prep(ð->tx_napi))) { - __napi_schedule(ð->tx_napi); mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); + __napi_schedule(ð->tx_napi); } return IRQ_HANDLED; diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig index 329e374b9539..43ba71e82260 100644 --- a/drivers/net/ethernet/microchip/Kconfig +++ b/drivers/net/ethernet/microchip/Kconfig @@ -46,6 +46,7 @@ config LAN743X tristate "LAN743x support" depends on PCI depends on PTP_1588_CLOCK_OPTIONAL + select PHYLIB select FIXED_PHY select CRC16 select CRC32 diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 4a16ebff3d1d..48ea4aeeea5d 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -91,63 +91,137 @@ static unsigned int mana_checksum_info(struct sk_buff *skb) return 0; } +static void mana_add_sge(struct mana_tx_package *tp, struct mana_skb_head *ash, + int sg_i, dma_addr_t da, int sge_len, u32 gpa_mkey) +{ + ash->dma_handle[sg_i] = da; + ash->size[sg_i] = sge_len; + + tp->wqe_req.sgl[sg_i].address = da; + tp->wqe_req.sgl[sg_i].mem_key = gpa_mkey; + tp->wqe_req.sgl[sg_i].size = sge_len; +} + static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc, - struct mana_tx_package *tp) + struct mana_tx_package *tp, int gso_hs) { struct mana_skb_head *ash = (struct mana_skb_head *)skb->head; + int hsg = 1; /* num of SGEs of linear part */ struct gdma_dev *gd = apc->ac->gdma_dev; + int skb_hlen = skb_headlen(skb); + int sge0_len, sge1_len = 0; struct gdma_context *gc; struct device *dev; skb_frag_t *frag; dma_addr_t da; + int sg_i; int i; gc = gd->gdma_context; dev = gc->dev; - da = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); + if (gso_hs && gso_hs < skb_hlen) { + sge0_len = gso_hs; + sge1_len = skb_hlen - gso_hs; + } else { + sge0_len = skb_hlen; + } + + da = dma_map_single(dev, skb->data, sge0_len, DMA_TO_DEVICE); if (dma_mapping_error(dev, da)) return -ENOMEM; - ash->dma_handle[0] = da; - ash->size[0] = skb_headlen(skb); + mana_add_sge(tp, ash, 0, da, sge0_len, gd->gpa_mkey); - tp->wqe_req.sgl[0].address = ash->dma_handle[0]; - tp->wqe_req.sgl[0].mem_key = gd->gpa_mkey; - tp->wqe_req.sgl[0].size = ash->size[0]; + if (sge1_len) { + sg_i = 1; + da = dma_map_single(dev, skb->data + sge0_len, sge1_len, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, da)) + goto frag_err; + + mana_add_sge(tp, ash, sg_i, da, sge1_len, gd->gpa_mkey); + hsg = 2; + } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + sg_i = hsg + i; + frag = &skb_shinfo(skb)->frags[i]; da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); - if (dma_mapping_error(dev, da)) goto frag_err; - ash->dma_handle[i + 1] = da; - ash->size[i + 1] = skb_frag_size(frag); - - tp->wqe_req.sgl[i + 1].address = ash->dma_handle[i + 1]; - tp->wqe_req.sgl[i + 1].mem_key = gd->gpa_mkey; - tp->wqe_req.sgl[i + 1].size = ash->size[i + 1]; + mana_add_sge(tp, ash, sg_i, da, skb_frag_size(frag), + gd->gpa_mkey); } return 0; frag_err: - for (i = i - 1; i >= 0; i--) - dma_unmap_page(dev, ash->dma_handle[i + 1], ash->size[i + 1], + for (i = sg_i - 1; i >= hsg; i--) + dma_unmap_page(dev, ash->dma_handle[i], ash->size[i], DMA_TO_DEVICE); - dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE); + for (i = hsg - 1; i >= 0; i--) + dma_unmap_single(dev, ash->dma_handle[i], ash->size[i], + DMA_TO_DEVICE); return -ENOMEM; } +/* Handle the case when GSO SKB linear length is too large. + * MANA NIC requires GSO packets to put only the packet header to SGE0. + * So, we need 2 SGEs for the skb linear part which contains more than the + * header. + * Return a positive value for the number of SGEs, or a negative value + * for an error. + */ +static int mana_fix_skb_head(struct net_device *ndev, struct sk_buff *skb, + int gso_hs) +{ + int num_sge = 1 + skb_shinfo(skb)->nr_frags; + int skb_hlen = skb_headlen(skb); + + if (gso_hs < skb_hlen) { + num_sge++; + } else if (gso_hs > skb_hlen) { + if (net_ratelimit()) + netdev_err(ndev, + "TX nonlinear head: hs:%d, skb_hlen:%d\n", + gso_hs, skb_hlen); + + return -EINVAL; + } + + return num_sge; +} + +/* Get the GSO packet's header size */ +static int mana_get_gso_hs(struct sk_buff *skb) +{ + int gso_hs; + + if (skb->encapsulation) { + gso_hs = skb_inner_tcp_all_headers(skb); + } else { + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { + gso_hs = skb_transport_offset(skb) + + sizeof(struct udphdr); + } else { + gso_hs = skb_tcp_all_headers(skb); + } + } + + return gso_hs; +} + netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) { enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT; struct mana_port_context *apc = netdev_priv(ndev); + int gso_hs = 0; /* zero for non-GSO pkts */ u16 txq_idx = skb_get_queue_mapping(skb); struct gdma_dev *gd = apc->ac->gdma_dev; bool ipv4 = false, ipv6 = false; @@ -159,7 +233,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) struct mana_txq *txq; struct mana_cq *cq; int err, len; - u16 ihs; if (unlikely(!apc->port_is_up)) goto tx_drop; @@ -209,19 +282,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) pkg.wqe_req.client_data_unit = 0; pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags; - WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES); - - if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) { - pkg.wqe_req.sgl = pkg.sgl_array; - } else { - pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge, - sizeof(struct gdma_sge), - GFP_ATOMIC); - if (!pkg.sgl_ptr) - goto tx_drop_count; - - pkg.wqe_req.sgl = pkg.sgl_ptr; - } if (skb->protocol == htons(ETH_P_IP)) ipv4 = true; @@ -229,6 +289,26 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) ipv6 = true; if (skb_is_gso(skb)) { + int num_sge; + + gso_hs = mana_get_gso_hs(skb); + + num_sge = mana_fix_skb_head(ndev, skb, gso_hs); + if (num_sge > 0) + pkg.wqe_req.num_sge = num_sge; + else + goto tx_drop_count; + + u64_stats_update_begin(&tx_stats->syncp); + if (skb->encapsulation) { + tx_stats->tso_inner_packets++; + tx_stats->tso_inner_bytes += skb->len - gso_hs; + } else { + tx_stats->tso_packets++; + tx_stats->tso_bytes += skb->len - gso_hs; + } + u64_stats_update_end(&tx_stats->syncp); + pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4; pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6; @@ -252,28 +332,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); } - - if (skb->encapsulation) { - ihs = skb_inner_tcp_all_headers(skb); - u64_stats_update_begin(&tx_stats->syncp); - tx_stats->tso_inner_packets++; - tx_stats->tso_inner_bytes += skb->len - ihs; - u64_stats_update_end(&tx_stats->syncp); - } else { - if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { - ihs = skb_transport_offset(skb) + sizeof(struct udphdr); - } else { - ihs = skb_tcp_all_headers(skb); - if (ipv6_has_hopopt_jumbo(skb)) - ihs -= sizeof(struct hop_jumbo_hdr); - } - - u64_stats_update_begin(&tx_stats->syncp); - tx_stats->tso_packets++; - tx_stats->tso_bytes += skb->len - ihs; - u64_stats_update_end(&tx_stats->syncp); - } - } else if (skb->ip_summed == CHECKSUM_PARTIAL) { csum_type = mana_checksum_info(skb); @@ -296,11 +354,25 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) } else { /* Can't do offload of this type of checksum */ if (skb_checksum_help(skb)) - goto free_sgl_ptr; + goto tx_drop_count; } } - if (mana_map_skb(skb, apc, &pkg)) { + WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES); + + if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) { + pkg.wqe_req.sgl = pkg.sgl_array; + } else { + pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge, + sizeof(struct gdma_sge), + GFP_ATOMIC); + if (!pkg.sgl_ptr) + goto tx_drop_count; + + pkg.wqe_req.sgl = pkg.sgl_ptr; + } + + if (mana_map_skb(skb, apc, &pkg, gso_hs)) { u64_stats_update_begin(&tx_stats->syncp); tx_stats->mana_map_err++; u64_stats_update_end(&tx_stats->syncp); @@ -1258,11 +1330,16 @@ static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc) struct mana_skb_head *ash = (struct mana_skb_head *)skb->head; struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; struct device *dev = gc->dev; - int i; + int hsg, i; - dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE); + /* Number of SGEs of linear part */ + hsg = (skb_is_gso(skb) && skb_headlen(skb) > ash->size[0]) ? 2 : 1; - for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++) + for (i = 0; i < hsg; i++) + dma_unmap_single(dev, ash->dma_handle[i], ash->size[i], + DMA_TO_DEVICE); + + for (i = hsg; i < skb_shinfo(skb)->nr_frags + hsg; i++) dma_unmap_page(dev, ash->dma_handle[i], ash->size[i], DMA_TO_DEVICE); } @@ -1317,19 +1394,23 @@ static void mana_poll_tx_cq(struct mana_cq *cq) case CQE_TX_VPORT_IDX_OUT_OF_RANGE: case CQE_TX_VPORT_DISABLED: case CQE_TX_VLAN_TAGGING_VIOLATION: - WARN_ONCE(1, "TX: CQE error %d: ignored.\n", - cqe_oob->cqe_hdr.cqe_type); + if (net_ratelimit()) + netdev_err(ndev, "TX: CQE error %d\n", + cqe_oob->cqe_hdr.cqe_type); + apc->eth_stats.tx_cqe_err++; break; default: - /* If the CQE type is unexpected, log an error, assert, - * and go through the error path. + /* If the CQE type is unknown, log an error, + * and still free the SKB, update tail, etc. */ - WARN_ONCE(1, "TX: Unexpected CQE type %d: HW BUG?\n", - cqe_oob->cqe_hdr.cqe_type); + if (net_ratelimit()) + netdev_err(ndev, "TX: unknown CQE type %d\n", + cqe_oob->cqe_hdr.cqe_type); + apc->eth_stats.tx_cqe_unknown_type++; - return; + break; } if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num)) diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index 0bfc375161ed..a174c6fc626a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -110,9 +110,9 @@ struct qed_ll2_info { enum core_tx_dest tx_dest; u8 tx_stats_en; bool main_func_queue; + struct qed_ll2_cbs cbs; struct qed_ll2_rx_queue rx_queue; struct qed_ll2_tx_queue tx_queue; - struct qed_ll2_cbs cbs; }; extern const struct qed_ll2_ops qed_ll2_ops_pass; diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c index ea9186178091..fc01ad3f340d 100644 --- a/drivers/net/ethernet/renesas/rswitch.c +++ b/drivers/net/ethernet/renesas/rswitch.c @@ -4,6 +4,7 @@ * Copyright (C) 2022 Renesas Electronics Corporation */ +#include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/etherdevice.h> @@ -1049,7 +1050,7 @@ static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac) static void rswitch_etha_enable_mii(struct rswitch_etha *etha) { rswitch_modify(etha->addr, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK, - MPIC_PSMCS(0x05) | MPIC_PSMHT(0x06)); + MPIC_PSMCS(etha->psmcs) | MPIC_PSMHT(0x06)); rswitch_modify(etha->addr, MPSM, 0, MPSM_MFF_C45); } @@ -1693,6 +1694,12 @@ static void rswitch_etha_init(struct rswitch_private *priv, int index) etha->index = index; etha->addr = priv->addr + RSWITCH_ETHA_OFFSET + index * RSWITCH_ETHA_SIZE; etha->coma_addr = priv->addr; + + /* MPIC.PSMCS = (clk [MHz] / (MDC frequency [MHz] * 2) - 1. + * Calculating PSMCS value as MDC frequency = 2.5MHz. So, multiply + * both the numerator and the denominator by 10. + */ + etha->psmcs = clk_get_rate(priv->clk) / 100000 / (25 * 2) - 1; } static int rswitch_device_alloc(struct rswitch_private *priv, int index) @@ -1900,6 +1907,10 @@ static int renesas_eth_sw_probe(struct platform_device *pdev) return -ENOMEM; spin_lock_init(&priv->lock); + priv->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(priv->clk)) + return PTR_ERR(priv->clk); + attr = soc_device_match(rswitch_soc_no_speed_change); if (attr) priv->etha_no_runtime_change = true; diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h index f0c16a37ea55..04f49a7a5843 100644 --- a/drivers/net/ethernet/renesas/rswitch.h +++ b/drivers/net/ethernet/renesas/rswitch.h @@ -915,6 +915,7 @@ struct rswitch_etha { bool external_phy; struct mii_bus *mii; phy_interface_t phy_interface; + u32 psmcs; u8 mac_addr[MAX_ADDR_LEN]; int link; int speed; @@ -1012,6 +1013,7 @@ struct rswitch_private { struct rswitch_mfwd mfwd; spinlock_t lock; /* lock interrupt registers' control */ + struct clk *clk; bool etha_no_runtime_change; bool gwca_halt; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c index 26ea8c687881..a0e276783e65 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c @@ -104,6 +104,7 @@ struct stm32_ops { int (*parse_data)(struct stm32_dwmac *dwmac, struct device *dev); u32 syscfg_eth_mask; + bool clk_rx_enable_in_suspend; }; static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat) @@ -121,7 +122,8 @@ static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat) if (ret) return ret; - if (!dwmac->dev->power.is_suspended) { + if (!dwmac->ops->clk_rx_enable_in_suspend || + !dwmac->dev->power.is_suspended) { ret = clk_prepare_enable(dwmac->clk_rx); if (ret) { clk_disable_unprepare(dwmac->clk_tx); @@ -513,7 +515,8 @@ static struct stm32_ops stm32mp1_dwmac_data = { .suspend = stm32mp1_suspend, .resume = stm32mp1_resume, .parse_data = stm32mp1_parse_data, - .syscfg_eth_mask = SYSCFG_MP1_ETH_MASK + .syscfg_eth_mask = SYSCFG_MP1_ETH_MASK, + .clk_rx_enable_in_suspend = true }; static const struct of_device_id stm32_dwmac_match[] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 83c567a89a46..ed1a5a31a491 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -6002,33 +6002,6 @@ static irqreturn_t stmmac_msi_intr_rx(int irq, void *data) return IRQ_HANDLED; } -#ifdef CONFIG_NET_POLL_CONTROLLER -/* Polling receive - used by NETCONSOLE and other diagnostic tools - * to allow network I/O with interrupts disabled. - */ -static void stmmac_poll_controller(struct net_device *dev) -{ - struct stmmac_priv *priv = netdev_priv(dev); - int i; - - /* If adapter is down, do nothing */ - if (test_bit(STMMAC_DOWN, &priv->state)) - return; - - if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN) { - for (i = 0; i < priv->plat->rx_queues_to_use; i++) - stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]); - - for (i = 0; i < priv->plat->tx_queues_to_use; i++) - stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]); - } else { - disable_irq(dev->irq); - stmmac_interrupt(dev->irq, dev); - enable_irq(dev->irq); - } -} -#endif - /** * stmmac_ioctl - Entry point for the Ioctl * @dev: Device pointer. @@ -6989,9 +6962,6 @@ static const struct net_device_ops stmmac_netdev_ops = { .ndo_get_stats64 = stmmac_get_stats64, .ndo_setup_tc = stmmac_setup_tc, .ndo_select_queue = stmmac_select_queue, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = stmmac_poll_controller, -#endif .ndo_set_mac_address = stmmac_set_mac_address, .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 0f28795e581c..2f0678f15fb7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -901,7 +901,7 @@ static int __maybe_unused stmmac_pltfr_resume(struct device *dev) struct platform_device *pdev = to_platform_device(dev); int ret; - ret = stmmac_pltfr_init(pdev, priv->plat->bsp_priv); + ret = stmmac_pltfr_init(pdev, priv->plat); if (ret) return ret; diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index bea6fc0f324c..24120605502f 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -1747,9 +1747,10 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common) } tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn); - if (tx_chn->irq <= 0) { + if (tx_chn->irq < 0) { dev_err(dev, "Failed to get tx dma irq %d\n", tx_chn->irq); + ret = tx_chn->irq; goto err; } diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c index 410612f43cbd..4914d0ef58e9 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c @@ -316,12 +316,12 @@ static int prueth_init_tx_chns(struct prueth_emac *emac) goto fail; } - tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn); - if (tx_chn->irq <= 0) { - ret = -EINVAL; + ret = k3_udma_glue_tx_get_irq(tx_chn->tx_chn); + if (ret < 0) { netdev_err(ndev, "failed to get tx irq\n"); goto fail; } + tx_chn->irq = ret; snprintf(tx_chn->name, sizeof(tx_chn->name), "%s-tx%d", dev_name(dev), tx_chn->id); diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 5d6454fedb3f..78ad2da3ee29 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -90,7 +90,9 @@ static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index, ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, &buf, 4); - if (unlikely(ret < 0)) { + if (unlikely(ret < 4)) { + ret = ret < 0 ? ret : -ENODATA; + netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n", index, ret); return ret; diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index 47c2ad7a3e42..fd50bb313b92 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -34,6 +34,8 @@ #define TDM_PPPOHT_SLIC_MAXIN #define RX_BD_ERRORS (R_CD_S | R_OV_S | R_CR_S | R_AB_S | R_NO_S | R_LG_S) +static int uhdlc_close(struct net_device *dev); + static struct ucc_tdm_info utdm_primary_info = { .uf_info = { .tsa = 0, @@ -708,6 +710,7 @@ static int uhdlc_open(struct net_device *dev) hdlc_device *hdlc = dev_to_hdlc(dev); struct ucc_hdlc_private *priv = hdlc->priv; struct ucc_tdm *utdm = priv->utdm; + int rc = 0; if (priv->hdlc_busy != 1) { if (request_irq(priv->ut_info->uf_info.irq, @@ -731,10 +734,13 @@ static int uhdlc_open(struct net_device *dev) napi_enable(&priv->napi); netdev_reset_queue(dev); netif_start_queue(dev); - hdlc_open(dev); + + rc = hdlc_open(dev); + if (rc) + uhdlc_close(dev); } - return 0; + return rc; } static void uhdlc_memclean(struct ucc_hdlc_private *priv) @@ -824,6 +830,8 @@ static int uhdlc_close(struct net_device *dev) netdev_reset_queue(dev); priv->hdlc_busy = 0; + hdlc_close(dev); + return 0; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h index bece26741d3a..611d1a6aabb9 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h @@ -442,7 +442,12 @@ struct brcmf_scan_params_v2_le { * fixed parameter portion is assumed, otherwise * ssid in the fixed portion is ignored */ - __le16 channel_list[1]; /* list of chanspecs */ + union { + __le16 padding; /* Reserve space for at least 1 entry for abort + * which uses an on stack brcmf_scan_params_v2_le + */ + DECLARE_FLEX_ARRAY(__le16, channel_list); /* chanspecs */ + }; }; struct brcmf_scan_results { @@ -702,7 +707,7 @@ struct brcmf_sta_info_le { struct brcmf_chanspec_list { __le32 count; /* # of entries */ - __le32 element[1]; /* variable length uint32 list */ + __le32 element[]; /* variable length uint32 list */ }; /* diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h index f5e08988dc7b..06d6f7f66430 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h @@ -310,9 +310,9 @@ struct iwl_fw_ini_fifo_hdr { struct iwl_fw_ini_error_dump_range { __le32 range_data_size; union { - __le32 internal_base_addr; - __le64 dram_base_addr; - __le32 page_num; + __le32 internal_base_addr __packed; + __le64 dram_base_addr __packed; + __le32 page_num __packed; struct iwl_fw_ini_fifo_hdr fifo_hdr; struct iwl_cmd_header fw_pkt_hdr; }; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 1f5db65a088d..1d5ee4330f29 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -802,7 +802,7 @@ out: mvm->nvm_data->bands[0].n_channels = 1; mvm->nvm_data->bands[0].n_bitrates = 1; mvm->nvm_data->bands[0].bitrates = - (void *)((u8 *)mvm->nvm_data->channels + 1); + (void *)(mvm->nvm_data->channels + 1); mvm->nvm_data->bands[0].bitrates->hw_value = 10; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c index 8b6c641772ee..b719843e9457 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c @@ -731,73 +731,78 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm, mvmvif->associated = vif->cfg.assoc; - if (!(changes & BSS_CHANGED_ASSOC)) - return; - - if (vif->cfg.assoc) { - /* clear statistics to get clean beacon counter */ - iwl_mvm_request_statistics(mvm, true); - iwl_mvm_sf_update(mvm, vif, false); - iwl_mvm_power_vif_assoc(mvm, vif); - - for_each_mvm_vif_valid_link(mvmvif, i) { - memset(&mvmvif->link[i]->beacon_stats, 0, - sizeof(mvmvif->link[i]->beacon_stats)); + if (changes & BSS_CHANGED_ASSOC) { + if (vif->cfg.assoc) { + /* clear statistics to get clean beacon counter */ + iwl_mvm_request_statistics(mvm, true); + iwl_mvm_sf_update(mvm, vif, false); + iwl_mvm_power_vif_assoc(mvm, vif); + + for_each_mvm_vif_valid_link(mvmvif, i) { + memset(&mvmvif->link[i]->beacon_stats, 0, + sizeof(mvmvif->link[i]->beacon_stats)); + + if (vif->p2p) { + iwl_mvm_update_smps(mvm, vif, + IWL_MVM_SMPS_REQ_PROT, + IEEE80211_SMPS_DYNAMIC, i); + } + + rcu_read_lock(); + link_conf = rcu_dereference(vif->link_conf[i]); + if (link_conf && !link_conf->dtim_period) + protect = true; + rcu_read_unlock(); + } - if (vif->p2p) { - iwl_mvm_update_smps(mvm, vif, - IWL_MVM_SMPS_REQ_PROT, - IEEE80211_SMPS_DYNAMIC, i); + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && + protect) { + /* If we're not restarting and still haven't + * heard a beacon (dtim period unknown) then + * make sure we still have enough minimum time + * remaining in the time event, since the auth + * might actually have taken quite a while + * (especially for SAE) and so the remaining + * time could be small without us having heard + * a beacon yet. + */ + iwl_mvm_protect_assoc(mvm, vif, 0); } - rcu_read_lock(); - link_conf = rcu_dereference(vif->link_conf[i]); - if (link_conf && !link_conf->dtim_period) - protect = true; - rcu_read_unlock(); - } + iwl_mvm_sf_update(mvm, vif, false); + + /* FIXME: need to decide about misbehaving AP handling */ + iwl_mvm_power_vif_assoc(mvm, vif); + } else if (iwl_mvm_mld_vif_have_valid_ap_sta(mvmvif)) { + iwl_mvm_mei_host_disassociated(mvm); - if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && - protect) { - /* If we're not restarting and still haven't - * heard a beacon (dtim period unknown) then - * make sure we still have enough minimum time - * remaining in the time event, since the auth - * might actually have taken quite a while - * (especially for SAE) and so the remaining - * time could be small without us having heard - * a beacon yet. + /* If update fails - SF might be running in associated + * mode while disassociated - which is forbidden. */ - iwl_mvm_protect_assoc(mvm, vif, 0); + ret = iwl_mvm_sf_update(mvm, vif, false); + WARN_ONCE(ret && + !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, + &mvm->status), + "Failed to update SF upon disassociation\n"); + + /* If we get an assert during the connection (after the + * station has been added, but before the vif is set + * to associated), mac80211 will re-add the station and + * then configure the vif. Since the vif is not + * associated, we would remove the station here and + * this would fail the recovery. + */ + iwl_mvm_mld_vif_delete_all_stas(mvm, vif); } - iwl_mvm_sf_update(mvm, vif, false); - - /* FIXME: need to decide about misbehaving AP handling */ - iwl_mvm_power_vif_assoc(mvm, vif); - } else if (iwl_mvm_mld_vif_have_valid_ap_sta(mvmvif)) { - iwl_mvm_mei_host_disassociated(mvm); - - /* If update fails - SF might be running in associated - * mode while disassociated - which is forbidden. - */ - ret = iwl_mvm_sf_update(mvm, vif, false); - WARN_ONCE(ret && - !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, - &mvm->status), - "Failed to update SF upon disassociation\n"); - - /* If we get an assert during the connection (after the - * station has been added, but before the vif is set - * to associated), mac80211 will re-add the station and - * then configure the vif. Since the vif is not - * associated, we would remove the station here and - * this would fail the recovery. - */ - iwl_mvm_mld_vif_delete_all_stas(mvm, vif); + iwl_mvm_bss_info_changed_station_assoc(mvm, vif, changes); } - iwl_mvm_bss_info_changed_station_assoc(mvm, vif, changes); + if (changes & BSS_CHANGED_PS) { + ret = iwl_mvm_power_update_mac(mvm); + if (ret) + IWL_ERR(mvm, "failed to update power mode\n"); + } } static void diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index c1d9ce753468..3cbe2c0b8d6b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -2342,7 +2342,7 @@ iwl_mvm_scan_umac_fill_general_p_v12(struct iwl_mvm *mvm, if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2) gp->num_of_fragments[SCAN_HB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS; - if (version < 12) { + if (version < 16) { gp->scan_start_mac_or_link_id = scan_vif->id; } else { struct iwl_mvm_vif_link_info *link_info; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 36d70d589aed..898dca393643 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -1612,6 +1612,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); memset(&info->status, 0, sizeof(info->status)); + info->flags &= ~(IEEE80211_TX_STAT_ACK | IEEE80211_TX_STAT_TX_FILTERED); /* inform mac80211 about what happened with the frame */ switch (status & TX_STATUS_MSK) { @@ -1964,6 +1965,8 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid, */ if (!is_flush) info->flags |= IEEE80211_TX_STAT_ACK; + else + info->flags &= ~IEEE80211_TX_STAT_ACK; } /* diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c index 391793a16adc..10690e82358b 100644 --- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c +++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c @@ -918,9 +918,17 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv, mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:", event_buf, len); - while (tlv_buf_left >= sizeof(*tlv_rxba)) { + while (tlv_buf_left > sizeof(*tlv_rxba)) { tlv_type = le16_to_cpu(tlv_rxba->header.type); tlv_len = le16_to_cpu(tlv_rxba->header.len); + if (size_add(sizeof(tlv_rxba->header), tlv_len) > tlv_buf_left) { + mwifiex_dbg(priv->adapter, WARN, + "TLV size (%zu) overflows event_buf buf_left=%d\n", + size_add(sizeof(tlv_rxba->header), tlv_len), + tlv_buf_left); + return; + } + if (tlv_type != TLV_TYPE_RXBA_SYNC) { mwifiex_dbg(priv->adapter, ERROR, "Wrong TLV id=0x%x\n", tlv_type); @@ -929,6 +937,14 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv, tlv_seq_num = le16_to_cpu(tlv_rxba->seq_num); tlv_bitmap_len = le16_to_cpu(tlv_rxba->bitmap_len); + if (size_add(sizeof(*tlv_rxba), tlv_bitmap_len) > tlv_buf_left) { + mwifiex_dbg(priv->adapter, WARN, + "TLV size (%zu) overflows event_buf buf_left=%d\n", + size_add(sizeof(*tlv_rxba), tlv_bitmap_len), + tlv_buf_left); + return; + } + mwifiex_dbg(priv->adapter, INFO, "%pM tid=%d seq_num=%d bitmap_len=%d\n", tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num, @@ -965,8 +981,8 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv, } } - tlv_buf_left -= (sizeof(*tlv_rxba) + tlv_len); - tmp = (u8 *)tlv_rxba + tlv_len + sizeof(*tlv_rxba); + tlv_buf_left -= (sizeof(tlv_rxba->header) + tlv_len); + tmp = (u8 *)tlv_rxba + sizeof(tlv_rxba->header) + tlv_len; tlv_rxba = (struct mwifiex_ie_types_rxba_sync *)tmp; } } diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index f2168fac95ed..8e6db904e5b2 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -779,7 +779,7 @@ struct mwifiex_ie_types_rxba_sync { u8 reserved; __le16 seq_num; __le16 bitmap_len; - u8 bitmap[1]; + u8 bitmap[]; } __packed; struct chan_band_param_set { diff --git a/drivers/net/wireless/marvell/mwifiex/sta_rx.c b/drivers/net/wireless/marvell/mwifiex/sta_rx.c index 65420ad67416..257737137cd7 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_rx.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_rx.c @@ -86,7 +86,8 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv, rx_pkt_len = le16_to_cpu(local_rx_pd->rx_pkt_length); rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_off; - if (sizeof(*rx_pkt_hdr) + rx_pkt_off > skb->len) { + if (sizeof(rx_pkt_hdr->eth803_hdr) + sizeof(rfc1042_header) + + rx_pkt_off > skb->len) { mwifiex_dbg(priv->adapter, ERROR, "wrong rx packet offset: len=%d, rx_pkt_off=%d\n", skb->len, rx_pkt_off); @@ -95,12 +96,13 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv, return -1; } - if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header, - sizeof(bridge_tunnel_header))) || - (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header, - sizeof(rfc1042_header)) && - ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP && - ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX)) { + if (sizeof(*rx_pkt_hdr) + rx_pkt_off <= skb->len && + ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header, + sizeof(bridge_tunnel_header))) || + (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header, + sizeof(rfc1042_header)) && + ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP && + ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX))) { /* * Replace the 803 header and rfc1042 header (llc/snap) with an * EthernetII header, keep the src/dst and snap_type diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 05d9ab3ce819..dc8f4e157eb2 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -93,13 +93,13 @@ __mt76_get_rxwi(struct mt76_dev *dev) { struct mt76_txwi_cache *t = NULL; - spin_lock(&dev->wed_lock); + spin_lock_bh(&dev->wed_lock); if (!list_empty(&dev->rxwi_cache)) { t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache, list); list_del(&t->list); } - spin_unlock(&dev->wed_lock); + spin_unlock_bh(&dev->wed_lock); return t; } @@ -145,9 +145,9 @@ mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) if (!t) return; - spin_lock(&dev->wed_lock); + spin_lock_bh(&dev->wed_lock); list_add(&t->list, &dev->rxwi_cache); - spin_unlock(&dev->wed_lock); + spin_unlock_bh(&dev->wed_lock); } EXPORT_SYMBOL_GPL(mt76_put_rxwi); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c index 0acabba2d1a5..5d402cf2951c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c @@ -131,15 +131,8 @@ u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev, s8 *lna_2g, s8 *lna_5g, struct ieee80211_channel *chan) { - u16 val; u8 lna; - val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1); - if (val & MT_EE_NIC_CONF_1_LNA_EXT_2G) - *lna_2g = 0; - if (val & MT_EE_NIC_CONF_1_LNA_EXT_5G) - memset(lna_5g, 0, sizeof(s8) * 3); - if (chan->band == NL80211_BAND_2GHZ) lna = *lna_2g; else if (chan->hw_value <= 64) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c index d5809408d1d3..8c01855885ce 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c @@ -256,7 +256,8 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev) struct ieee80211_channel *chan = dev->mphy.chandef.chan; int channel = chan->hw_value; s8 lna_5g[3], lna_2g; - u8 lna; + bool use_lna; + u8 lna = 0; u16 val; if (chan->band == NL80211_BAND_2GHZ) @@ -275,7 +276,15 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev) dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16; dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24; - lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan); + val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1); + if (chan->band == NL80211_BAND_2GHZ) + use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_2G); + else + use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_5G); + + if (use_lna) + lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan); + dev->cal.rx.lna_gain = mt76x02_sign_extend(lna, 8); } EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.h b/drivers/net/wireless/realtek/rtw88/rtw8723d.h index 3642a2c7f80c..2434e2480cbe 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8723d.h +++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.h @@ -46,6 +46,7 @@ struct rtw8723du_efuse { u8 vender_id[2]; /* 0x100 */ u8 product_id[2]; /* 0x102 */ u8 usb_option; /* 0x104 */ + u8 res5[2]; /* 0x105 */ u8 mac_addr[ETH_ALEN]; /* 0x107 */ }; diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c index 0a3483e247a8..f63250c650ca 100644 --- a/drivers/of/dynamic.c +++ b/drivers/of/dynamic.c @@ -890,13 +890,13 @@ int of_changeset_action(struct of_changeset *ocs, unsigned long action, { struct of_changeset_entry *ce; + if (WARN_ON(action >= ARRAY_SIZE(action_names))) + return -EINVAL; + ce = kzalloc(sizeof(*ce), GFP_KERNEL); if (!ce) return -ENOMEM; - if (WARN_ON(action >= ARRAY_SIZE(action_names))) - return -EINVAL; - /* get a reference to the node */ ce->action = action; ce->np = of_node_get(np); diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index dfb6fb962fc7..a9a292d6d59b 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c @@ -45,8 +45,8 @@ struct target { /** * struct fragment - info about fragment nodes in overlay expanded device tree - * @target: target of the overlay operation * @overlay: pointer to the __overlay__ node + * @target: target of the overlay operation */ struct fragment { struct device_node *overlay; diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index e2f29404c84e..64420ecc24d1 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -43,7 +43,6 @@ #define PARF_PHY_REFCLK 0x4c #define PARF_CONFIG_BITS 0x50 #define PARF_DBI_BASE_ADDR 0x168 -#define PARF_SLV_ADDR_SPACE_SIZE_2_3_3 0x16c /* Register offset specific to IP ver 2.3.3 */ #define PARF_MHI_CLOCK_RESET_CTRL 0x174 #define PARF_AXI_MSTR_WR_ADDR_HALT 0x178 #define PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1a8 @@ -797,8 +796,7 @@ static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie) u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); u32 val; - writel(SLV_ADDR_SPACE_SZ, - pcie->parf + PARF_SLV_ADDR_SPACE_SIZE_2_3_3); + writel(SLV_ADDR_SPACE_SZ, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE); val = readl(pcie->parf + PARF_PHY_CTRL); val &= ~PHY_TEST_PWR_DOWN; diff --git a/drivers/pci/of.c b/drivers/pci/of.c index 2af64bcb7da3..51e3dd0ea5ab 100644 --- a/drivers/pci/of.c +++ b/drivers/pci/of.c @@ -657,30 +657,33 @@ void of_pci_make_dev_node(struct pci_dev *pdev) cset = kmalloc(sizeof(*cset), GFP_KERNEL); if (!cset) - goto failed; + goto out_free_name; of_changeset_init(cset); np = of_changeset_create_node(cset, ppnode, name); if (!np) - goto failed; - np->data = cset; + goto out_destroy_cset; ret = of_pci_add_properties(pdev, cset, np); if (ret) - goto failed; + goto out_free_node; ret = of_changeset_apply(cset); if (ret) - goto failed; + goto out_free_node; + np->data = cset; pdev->dev.of_node = np; kfree(name); return; -failed: - if (np) - of_node_put(np); +out_free_node: + of_node_put(np); +out_destroy_cset: + of_changeset_destroy(cset); + kfree(cset); +out_free_name: kfree(name); } #endif diff --git a/drivers/pci/of_property.c b/drivers/pci/of_property.c index 710ec35ba4a1..c2c7334152bc 100644 --- a/drivers/pci/of_property.c +++ b/drivers/pci/of_property.c @@ -186,8 +186,8 @@ static int of_pci_prop_interrupts(struct pci_dev *pdev, static int of_pci_prop_intr_map(struct pci_dev *pdev, struct of_changeset *ocs, struct device_node *np) { + u32 i, addr_sz[OF_PCI_MAX_INT_PIN] = { 0 }, map_sz = 0; struct of_phandle_args out_irq[OF_PCI_MAX_INT_PIN]; - u32 i, addr_sz[OF_PCI_MAX_INT_PIN], map_sz = 0; __be32 laddr[OF_PCI_ADDRESS_CELLS] = { 0 }; u32 int_map_mask[] = { 0xffff00, 0, 0, 7 }; struct device_node *pnode; @@ -213,33 +213,44 @@ static int of_pci_prop_intr_map(struct pci_dev *pdev, struct of_changeset *ocs, out_irq[i].args[0] = pin; ret = of_irq_parse_raw(laddr, &out_irq[i]); if (ret) { - pci_err(pdev, "parse irq %d failed, ret %d", pin, ret); + out_irq[i].np = NULL; + pci_dbg(pdev, "parse irq %d failed, ret %d", pin, ret); continue; } - ret = of_property_read_u32(out_irq[i].np, "#address-cells", - &addr_sz[i]); - if (ret) - addr_sz[i] = 0; + of_property_read_u32(out_irq[i].np, "#address-cells", + &addr_sz[i]); } list_for_each_entry(child, &pdev->subordinate->devices, bus_list) { for (pin = 1; pin <= OF_PCI_MAX_INT_PIN; pin++) { i = pci_swizzle_interrupt_pin(child, pin) - 1; + if (!out_irq[i].np) + continue; map_sz += 5 + addr_sz[i] + out_irq[i].args_count; } } + /* + * Parsing interrupt failed for all pins. In this case, it does not + * need to generate interrupt-map property. + */ + if (!map_sz) + return 0; + int_map = kcalloc(map_sz, sizeof(u32), GFP_KERNEL); mapp = int_map; list_for_each_entry(child, &pdev->subordinate->devices, bus_list) { for (pin = 1; pin <= OF_PCI_MAX_INT_PIN; pin++) { + i = pci_swizzle_interrupt_pin(child, pin) - 1; + if (!out_irq[i].np) + continue; + *mapp = (child->bus->number << 16) | (child->devfn << 8); mapp += OF_PCI_ADDRESS_CELLS; *mapp = pin; mapp++; - i = pci_swizzle_interrupt_pin(child, pin) - 1; *mapp = out_irq[i].np->phandle; mapp++; if (addr_sz[i]) { diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index a79c110c7e51..51ec9e7e784f 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -572,7 +572,19 @@ static void pci_pm_default_resume_early(struct pci_dev *pci_dev) static void pci_pm_bridge_power_up_actions(struct pci_dev *pci_dev) { - pci_bridge_wait_for_secondary_bus(pci_dev, "resume"); + int ret; + + ret = pci_bridge_wait_for_secondary_bus(pci_dev, "resume"); + if (ret) { + /* + * The downstream link failed to come up, so mark the + * devices below as disconnected to make sure we don't + * attempt to resume them. + */ + pci_walk_bus(pci_dev->subordinate, pci_dev_set_disconnected, + NULL); + return; + } /* * When powering on a bridge from D3cold, the whole hierarchy may be diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c index 913dc04b3a40..6b50bc551984 100644 --- a/drivers/perf/arm-cmn.c +++ b/drivers/perf/arm-cmn.c @@ -1972,7 +1972,7 @@ static irqreturn_t arm_cmn_handle_irq(int irq, void *dev_id) u64 delta; int i; - for (i = 0; i < CMN_DTM_NUM_COUNTERS; i++) { + for (i = 0; i < CMN_DT_NUM_COUNTERS; i++) { if (status & (1U << i)) { ret = IRQ_HANDLED; if (WARN_ON(!dtc->counters[i])) diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index e9dc9638120a..e2f7519bef04 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -1022,17 +1022,20 @@ static int add_setting(struct pinctrl *p, struct pinctrl_dev *pctldev, static struct pinctrl *find_pinctrl(struct device *dev) { - struct pinctrl *p; + struct pinctrl *entry, *p = NULL; mutex_lock(&pinctrl_list_mutex); - list_for_each_entry(p, &pinctrl_list, node) - if (p->dev == dev) { - mutex_unlock(&pinctrl_list_mutex); - return p; + + list_for_each_entry(entry, &pinctrl_list, node) { + if (entry->dev == dev) { + p = entry; + kref_get(&p->users); + break; } + } mutex_unlock(&pinctrl_list_mutex); - return NULL; + return p; } static void pinctrl_free(struct pinctrl *p, bool inlist); @@ -1140,7 +1143,6 @@ struct pinctrl *pinctrl_get(struct device *dev) p = find_pinctrl(dev); if (p) { dev_dbg(dev, "obtain a copy of previously claimed pinctrl\n"); - kref_get(&p->users); return p; } diff --git a/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c b/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c index 2d1c1652cfd9..8a9961ac8712 100644 --- a/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c +++ b/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c @@ -1062,13 +1062,13 @@ static int wpcm450_gpio_register(struct platform_device *pdev, if (ret < 0) return ret; - gpio = &pctrl->gpio_bank[reg]; - gpio->pctrl = pctrl; - if (reg >= WPCM450_NUM_BANKS) return dev_err_probe(dev, -EINVAL, "GPIO index %d out of range!\n", reg); + gpio = &pctrl->gpio_bank[reg]; + gpio->pctrl = pctrl; + bank = &wpcm450_banks[reg]; gpio->bank = bank; diff --git a/drivers/pinctrl/pinctrl-lantiq.h b/drivers/pinctrl/pinctrl-lantiq.h index efb25fc34f14..1ac49ae638de 100644 --- a/drivers/pinctrl/pinctrl-lantiq.h +++ b/drivers/pinctrl/pinctrl-lantiq.h @@ -198,5 +198,4 @@ enum ltq_pin { extern int ltq_pinctrl_register(struct platform_device *pdev, struct ltq_pinmux_info *info); -extern int ltq_pinctrl_unregister(struct platform_device *pdev); #endif /* __PINCTRL_LANTIQ_H */ diff --git a/drivers/pinctrl/renesas/Kconfig b/drivers/pinctrl/renesas/Kconfig index 77730dc548ed..c8d519ca53eb 100644 --- a/drivers/pinctrl/renesas/Kconfig +++ b/drivers/pinctrl/renesas/Kconfig @@ -235,6 +235,7 @@ config PINCTRL_RZN1 depends on OF depends on ARCH_RZN1 || COMPILE_TEST select GENERIC_PINCONF + select PINMUX help This selects pinctrl driver for Renesas RZ/N1 devices. diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110-aon.c b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110-aon.c index 4bfe3aa57f8a..cf42e204cbf0 100644 --- a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110-aon.c +++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110-aon.c @@ -31,6 +31,8 @@ #define JH7110_AON_NGPIO 4 #define JH7110_AON_GC_BASE 64 +#define JH7110_AON_REGS_NUM 37 + /* registers */ #define JH7110_AON_DOEN 0x0 #define JH7110_AON_DOUT 0x4 @@ -145,6 +147,7 @@ static const struct jh7110_pinctrl_soc_info jh7110_aon_pinctrl_info = { .gpi_mask = GENMASK(3, 0), .gpioin_reg_base = JH7110_AON_GPIOIN, .irq_reg = &jh7110_aon_irq_reg, + .nsaved_regs = JH7110_AON_REGS_NUM, .jh7110_set_one_pin_mux = jh7110_aon_set_one_pin_mux, .jh7110_get_padcfg_base = jh7110_aon_get_padcfg_base, .jh7110_gpio_irq_handler = jh7110_aon_irq_handler, @@ -165,6 +168,7 @@ static struct platform_driver jh7110_aon_pinctrl_driver = { .driver = { .name = "starfive-jh7110-aon-pinctrl", .of_match_table = jh7110_aon_pinctrl_of_match, + .pm = pm_sleep_ptr(&jh7110_pinctrl_pm_ops), }, }; module_platform_driver(jh7110_aon_pinctrl_driver); diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110-sys.c b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110-sys.c index 20c85db1cd3a..03c2ad808d61 100644 --- a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110-sys.c +++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110-sys.c @@ -31,6 +31,8 @@ #define JH7110_SYS_NGPIO 64 #define JH7110_SYS_GC_BASE 0 +#define JH7110_SYS_REGS_NUM 174 + /* registers */ #define JH7110_SYS_DOEN 0x000 #define JH7110_SYS_DOUT 0x040 @@ -417,6 +419,7 @@ static const struct jh7110_pinctrl_soc_info jh7110_sys_pinctrl_info = { .gpi_mask = GENMASK(6, 0), .gpioin_reg_base = JH7110_SYS_GPIOIN, .irq_reg = &jh7110_sys_irq_reg, + .nsaved_regs = JH7110_SYS_REGS_NUM, .jh7110_set_one_pin_mux = jh7110_sys_set_one_pin_mux, .jh7110_get_padcfg_base = jh7110_sys_get_padcfg_base, .jh7110_gpio_irq_handler = jh7110_sys_irq_handler, @@ -437,6 +440,7 @@ static struct platform_driver jh7110_sys_pinctrl_driver = { .driver = { .name = "starfive-jh7110-sys-pinctrl", .of_match_table = jh7110_sys_pinctrl_of_match, + .pm = pm_sleep_ptr(&jh7110_pinctrl_pm_ops), }, }; module_platform_driver(jh7110_sys_pinctrl_driver); diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c index b9081805c8f6..640f827a9b2c 100644 --- a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c +++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c @@ -872,6 +872,13 @@ int jh7110_pinctrl_probe(struct platform_device *pdev) if (!sfp) return -ENOMEM; +#if IS_ENABLED(CONFIG_PM_SLEEP) + sfp->saved_regs = devm_kcalloc(dev, info->nsaved_regs, + sizeof(*sfp->saved_regs), GFP_KERNEL); + if (!sfp->saved_regs) + return -ENOMEM; +#endif + sfp->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(sfp->base)) return PTR_ERR(sfp->base); @@ -967,14 +974,45 @@ int jh7110_pinctrl_probe(struct platform_device *pdev) if (ret) return dev_err_probe(dev, ret, "could not register gpiochip\n"); - irq_domain_set_pm_device(sfp->gc.irq.domain, dev); - dev_info(dev, "StarFive GPIO chip registered %d GPIOs\n", sfp->gc.ngpio); return pinctrl_enable(sfp->pctl); } EXPORT_SYMBOL_GPL(jh7110_pinctrl_probe); +static int jh7110_pinctrl_suspend(struct device *dev) +{ + struct jh7110_pinctrl *sfp = dev_get_drvdata(dev); + unsigned long flags; + unsigned int i; + + raw_spin_lock_irqsave(&sfp->lock, flags); + for (i = 0 ; i < sfp->info->nsaved_regs ; i++) + sfp->saved_regs[i] = readl_relaxed(sfp->base + 4 * i); + + raw_spin_unlock_irqrestore(&sfp->lock, flags); + return 0; +} + +static int jh7110_pinctrl_resume(struct device *dev) +{ + struct jh7110_pinctrl *sfp = dev_get_drvdata(dev); + unsigned long flags; + unsigned int i; + + raw_spin_lock_irqsave(&sfp->lock, flags); + for (i = 0 ; i < sfp->info->nsaved_regs ; i++) + writel_relaxed(sfp->saved_regs[i], sfp->base + 4 * i); + + raw_spin_unlock_irqrestore(&sfp->lock, flags); + return 0; +} + +const struct dev_pm_ops jh7110_pinctrl_pm_ops = { + LATE_SYSTEM_SLEEP_PM_OPS(jh7110_pinctrl_suspend, jh7110_pinctrl_resume) +}; +EXPORT_SYMBOL_GPL(jh7110_pinctrl_pm_ops); + MODULE_DESCRIPTION("Pinctrl driver for the StarFive JH7110 SoC"); MODULE_AUTHOR("Emil Renner Berthing <kernel@esmil.dk>"); MODULE_AUTHOR("Jianlong Huang <jianlong.huang@starfivetech.com>"); diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.h b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.h index 3f20b7ff96dd..a33d0d4e1382 100644 --- a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.h +++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.h @@ -21,6 +21,7 @@ struct jh7110_pinctrl { /* register read/write mutex */ struct mutex mutex; const struct jh7110_pinctrl_soc_info *info; + u32 *saved_regs; }; struct jh7110_gpio_irq_reg { @@ -50,6 +51,8 @@ struct jh7110_pinctrl_soc_info { const struct jh7110_gpio_irq_reg *irq_reg; + unsigned int nsaved_regs; + /* generic pinmux */ int (*jh7110_set_one_pin_mux)(struct jh7110_pinctrl *sfp, unsigned int pin, @@ -66,5 +69,6 @@ void jh7110_set_gpiomux(struct jh7110_pinctrl *sfp, unsigned int pin, unsigned int din, u32 dout, u32 doen); int jh7110_pinctrl_probe(struct platform_device *pdev); struct jh7110_pinctrl *jh7110_from_irq_desc(struct irq_desc *desc); +extern const struct dev_pm_ops jh7110_pinctrl_pm_ops; #endif /* __PINCTRL_STARFIVE_JH7110_H__ */ diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c index cfeda5b3e048..734c71ef005b 100644 --- a/drivers/pinctrl/tegra/pinctrl-tegra.c +++ b/drivers/pinctrl/tegra/pinctrl-tegra.c @@ -96,7 +96,6 @@ static const struct cfg_param { {"nvidia,slew-rate-falling", TEGRA_PINCONF_PARAM_SLEW_RATE_FALLING}, {"nvidia,slew-rate-rising", TEGRA_PINCONF_PARAM_SLEW_RATE_RISING}, {"nvidia,drive-type", TEGRA_PINCONF_PARAM_DRIVE_TYPE}, - {"nvidia,function", TEGRA_PINCONF_PARAM_FUNCTION}, }; static int tegra_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev, @@ -471,12 +470,6 @@ static int tegra_pinconf_reg(struct tegra_pmx *pmx, *bit = g->drvtype_bit; *width = 2; break; - case TEGRA_PINCONF_PARAM_FUNCTION: - *bank = g->mux_bank; - *reg = g->mux_reg; - *bit = g->mux_bit; - *width = 2; - break; default: dev_err(pmx->dev, "Invalid config param %04x\n", param); return -ENOTSUPP; @@ -640,16 +633,8 @@ static void tegra_pinconf_group_dbg_show(struct pinctrl_dev *pctldev, val >>= bit; val &= (1 << width) - 1; - if (cfg_params[i].param == TEGRA_PINCONF_PARAM_FUNCTION) { - u8 idx = pmx->soc->groups[group].funcs[val]; - - seq_printf(s, "\n\t%s=%s", - strip_prefix(cfg_params[i].property), - pmx->functions[idx].name); - } else { - seq_printf(s, "\n\t%s=%u", - strip_prefix(cfg_params[i].property), val); - } + seq_printf(s, "\n\t%s=%u", + strip_prefix(cfg_params[i].property), val); } } diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.h b/drivers/pinctrl/tegra/pinctrl-tegra.h index e728efeaa4de..b3289bdf727d 100644 --- a/drivers/pinctrl/tegra/pinctrl-tegra.h +++ b/drivers/pinctrl/tegra/pinctrl-tegra.h @@ -54,8 +54,6 @@ enum tegra_pinconf_param { TEGRA_PINCONF_PARAM_SLEW_RATE_RISING, /* argument: Integer, range is HW-dependant */ TEGRA_PINCONF_PARAM_DRIVE_TYPE, - /* argument: pinmux settings */ - TEGRA_PINCONF_PARAM_FUNCTION, }; enum tegra_pinconf_pull { diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c index f3696a54a2bd..fd38d8c8371e 100644 --- a/drivers/platform/mellanox/mlxbf-tmfifo.c +++ b/drivers/platform/mellanox/mlxbf-tmfifo.c @@ -53,7 +53,7 @@ struct mlxbf_tmfifo; /** - * mlxbf_tmfifo_vring - Structure of the TmFifo virtual ring + * struct mlxbf_tmfifo_vring - Structure of the TmFifo virtual ring * @va: virtual address of the ring * @dma: dma address of the ring * @vq: pointer to the virtio virtqueue @@ -113,12 +113,13 @@ enum { }; /** - * mlxbf_tmfifo_vdev - Structure of the TmFifo virtual device + * struct mlxbf_tmfifo_vdev - Structure of the TmFifo virtual device * @vdev: virtio device, in which the vdev.id.device field has the * VIRTIO_ID_xxx id to distinguish the virtual device. * @status: status of the device * @features: supported features of the device * @vrings: array of tmfifo vrings of this device + * @config: non-anonymous union for cons and net * @config.cons: virtual console config - * select if vdev.id.device is VIRTIO_ID_CONSOLE * @config.net: virtual network config - @@ -138,7 +139,7 @@ struct mlxbf_tmfifo_vdev { }; /** - * mlxbf_tmfifo_irq_info - Structure of the interrupt information + * struct mlxbf_tmfifo_irq_info - Structure of the interrupt information * @fifo: pointer to the tmfifo structure * @irq: interrupt number * @index: index into the interrupt array @@ -150,7 +151,7 @@ struct mlxbf_tmfifo_irq_info { }; /** - * mlxbf_tmfifo_io - Structure of the TmFifo IO resource (for both rx & tx) + * struct mlxbf_tmfifo_io - Structure of the TmFifo IO resource (for both rx & tx) * @ctl: control register offset (TMFIFO_RX_CTL / TMFIFO_TX_CTL) * @sts: status register offset (TMFIFO_RX_STS / TMFIFO_TX_STS) * @data: data register offset (TMFIFO_RX_DATA / TMFIFO_TX_DATA) @@ -162,7 +163,7 @@ struct mlxbf_tmfifo_io { }; /** - * mlxbf_tmfifo - Structure of the TmFifo + * struct mlxbf_tmfifo - Structure of the TmFifo * @vdev: array of the virtual devices running over the TmFifo * @lock: lock to protect the TmFifo access * @res0: mapped resource block 0 @@ -198,7 +199,7 @@ struct mlxbf_tmfifo { }; /** - * mlxbf_tmfifo_msg_hdr - Structure of the TmFifo message header + * struct mlxbf_tmfifo_msg_hdr - Structure of the TmFifo message header * @type: message type * @len: payload length in network byte order. Messages sent into the FIFO * will be read by the other side as data stream in the same byte order. @@ -208,6 +209,7 @@ struct mlxbf_tmfifo { struct mlxbf_tmfifo_msg_hdr { u8 type; __be16 len; + /* private: */ u8 unused[5]; } __packed __aligned(sizeof(u64)); diff --git a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c index 8c4f9e12f018..5798b49ddaba 100644 --- a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c +++ b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c @@ -659,7 +659,7 @@ static int hp_init_bios_package_attribute(enum hp_wmi_data_type attr_type, const char *guid, int min_elements, int instance_id) { - struct kobject *attr_name_kobj; + struct kobject *attr_name_kobj, *duplicate; union acpi_object *elements; struct kset *temp_kset; @@ -704,8 +704,11 @@ static int hp_init_bios_package_attribute(enum hp_wmi_data_type attr_type, } /* All duplicate attributes found are ignored */ - if (kset_find_obj(temp_kset, str_value)) { + duplicate = kset_find_obj(temp_kset, str_value); + if (duplicate) { pr_debug("Duplicate attribute name found - %s\n", str_value); + /* kset_find_obj() returns a reference */ + kobject_put(duplicate); goto pack_attr_exit; } @@ -768,7 +771,7 @@ static int hp_init_bios_buffer_attribute(enum hp_wmi_data_type attr_type, const char *guid, int min_elements, int instance_id) { - struct kobject *attr_name_kobj; + struct kobject *attr_name_kobj, *duplicate; struct kset *temp_kset; char str[MAX_BUFF_SIZE]; @@ -794,8 +797,11 @@ static int hp_init_bios_buffer_attribute(enum hp_wmi_data_type attr_type, temp_kset = bioscfg_drv.main_dir_kset; /* All duplicate attributes found are ignored */ - if (kset_find_obj(temp_kset, str)) { + duplicate = kset_find_obj(temp_kset, str); + if (duplicate) { pr_debug("Duplicate attribute name found - %s\n", str); + /* kset_find_obj() returns a reference */ + kobject_put(duplicate); goto buff_attr_exit; } diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c index e76e5458db35..8ebb7be52ee7 100644 --- a/drivers/platform/x86/hp/hp-wmi.c +++ b/drivers/platform/x86/hp/hp-wmi.c @@ -1548,7 +1548,13 @@ static const struct dev_pm_ops hp_wmi_pm_ops = { .restore = hp_wmi_resume_handler, }; -static struct platform_driver hp_wmi_driver = { +/* + * hp_wmi_bios_remove() lives in .exit.text. For drivers registered via + * module_platform_driver_probe() this is ok because they cannot get unbound at + * runtime. So mark the driver struct with __refdata to prevent modpost + * triggering a section mismatch warning. + */ +static struct platform_driver hp_wmi_driver __refdata = { .driver = { .name = "hp-wmi", .pm = &hp_wmi_pm_ops, diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c index 1061eb7ec399..43c864add778 100644 --- a/drivers/platform/x86/intel/ifs/runtest.c +++ b/drivers/platform/x86/intel/ifs/runtest.c @@ -331,14 +331,15 @@ int do_core_test(int cpu, struct device *dev) switch (test->test_num) { case IFS_TYPE_SAF: if (!ifsd->loaded) - return -EPERM; - ifs_test_core(cpu, dev); + ret = -EPERM; + else + ifs_test_core(cpu, dev); break; case IFS_TYPE_ARRAY_BIST: ifs_array_test_core(cpu, dev); break; default: - return -EINVAL; + ret = -EINVAL; } out: cpus_read_unlock(); diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c index 79346881cadb..aee869769843 100644 --- a/drivers/platform/x86/think-lmi.c +++ b/drivers/platform/x86/think-lmi.c @@ -1248,6 +1248,24 @@ static void tlmi_release_attr(void) kset_unregister(tlmi_priv.authentication_kset); } +static int tlmi_validate_setting_name(struct kset *attribute_kset, char *name) +{ + struct kobject *duplicate; + + if (!strcmp(name, "Reserved")) + return -EINVAL; + + duplicate = kset_find_obj(attribute_kset, name); + if (duplicate) { + pr_debug("Duplicate attribute name found - %s\n", name); + /* kset_find_obj() returns a reference */ + kobject_put(duplicate); + return -EBUSY; + } + + return 0; +} + static int tlmi_sysfs_init(void) { int i, ret; @@ -1276,10 +1294,8 @@ static int tlmi_sysfs_init(void) continue; /* check for duplicate or reserved values */ - if (kset_find_obj(tlmi_priv.attribute_kset, tlmi_priv.setting[i]->display_name) || - !strcmp(tlmi_priv.setting[i]->display_name, "Reserved")) { - pr_debug("duplicate or reserved attribute name found - %s\n", - tlmi_priv.setting[i]->display_name); + if (tlmi_validate_setting_name(tlmi_priv.attribute_kset, + tlmi_priv.setting[i]->display_name) < 0) { kfree(tlmi_priv.setting[i]->possible_values); kfree(tlmi_priv.setting[i]); tlmi_priv.setting[i] = NULL; diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c index f9301a9382e7..0c6733772698 100644 --- a/drivers/platform/x86/touchscreen_dmi.c +++ b/drivers/platform/x86/touchscreen_dmi.c @@ -42,6 +42,21 @@ static const struct ts_dmi_data archos_101_cesium_educ_data = { .properties = archos_101_cesium_educ_props, }; +static const struct property_entry bush_bush_windows_tablet_props[] = { + PROPERTY_ENTRY_U32("touchscreen-size-x", 1850), + PROPERTY_ENTRY_U32("touchscreen-size-y", 1280), + PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), + PROPERTY_ENTRY_U32("silead,max-fingers", 10), + PROPERTY_ENTRY_BOOL("silead,home-button"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-bush-bush-windows-tablet.fw"), + { } +}; + +static const struct ts_dmi_data bush_bush_windows_tablet_data = { + .acpi_name = "MSSL1680:00", + .properties = bush_bush_windows_tablet_props, +}; + static const struct property_entry chuwi_hi8_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1665), PROPERTY_ENTRY_U32("touchscreen-size-y", 1140), @@ -756,6 +771,21 @@ static const struct ts_dmi_data pipo_w11_data = { .properties = pipo_w11_props, }; +static const struct property_entry positivo_c4128b_props[] = { + PROPERTY_ENTRY_U32("touchscreen-min-x", 4), + PROPERTY_ENTRY_U32("touchscreen-min-y", 13), + PROPERTY_ENTRY_U32("touchscreen-size-x", 1915), + PROPERTY_ENTRY_U32("touchscreen-size-y", 1269), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-positivo-c4128b.fw"), + PROPERTY_ENTRY_U32("silead,max-fingers", 10), + { } +}; + +static const struct ts_dmi_data positivo_c4128b_data = { + .acpi_name = "MSSL1680:00", + .properties = positivo_c4128b_props, +}; + static const struct property_entry pov_mobii_wintab_p800w_v20_props[] = { PROPERTY_ENTRY_U32("touchscreen-min-x", 32), PROPERTY_ENTRY_U32("touchscreen-min-y", 16), @@ -1071,6 +1101,13 @@ const struct dmi_system_id touchscreen_dmi_table[] = { }, }, { + /* Bush Windows tablet */ + .driver_data = (void *)&bush_bush_windows_tablet_data, + .matches = { + DMI_MATCH(DMI_PRODUCT_NAME, "Bush Windows tablet"), + }, + }, + { /* Chuwi Hi8 */ .driver_data = (void *)&chuwi_hi8_data, .matches = { @@ -1481,6 +1518,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = { }, }, { + /* Positivo C4128B */ + .driver_data = (void *)&positivo_c4128b_data, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Positivo Tecnologia SA"), + DMI_MATCH(DMI_PRODUCT_NAME, "C4128B-1"), + }, + }, + { /* Point of View mobii wintab p800w (v2.0) */ .driver_data = (void *)&pov_mobii_wintab_p800w_v20_data, .matches = { diff --git a/drivers/pmdomain/imx/scu-pd.c b/drivers/pmdomain/imx/scu-pd.c index 2f693b67ddb4..891c1d925a9d 100644 --- a/drivers/pmdomain/imx/scu-pd.c +++ b/drivers/pmdomain/imx/scu-pd.c @@ -150,7 +150,8 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = { { "mclk-out-1", IMX_SC_R_MCLK_OUT_1, 1, false, 0 }, { "dma0-ch", IMX_SC_R_DMA_0_CH0, 32, true, 0 }, { "dma1-ch", IMX_SC_R_DMA_1_CH0, 16, true, 0 }, - { "dma2-ch", IMX_SC_R_DMA_2_CH0, 32, true, 0 }, + { "dma2-ch-0", IMX_SC_R_DMA_2_CH0, 5, true, 0 }, + { "dma2-ch-1", IMX_SC_R_DMA_2_CH5, 27, true, 0 }, { "dma3-ch", IMX_SC_R_DMA_3_CH0, 32, true, 0 }, { "asrc0", IMX_SC_R_ASRC_0, 1, false, 0 }, { "asrc1", IMX_SC_R_ASRC_1, 1, false, 0 }, diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig index 59e1ebb7842e..411e00b255d6 100644 --- a/drivers/power/reset/Kconfig +++ b/drivers/power/reset/Kconfig @@ -300,7 +300,7 @@ config NVMEM_REBOOT_MODE config POWER_MLXBF tristate "Mellanox BlueField power handling driver" - depends on (GPIO_MLXBF2 && ACPI) + depends on (GPIO_MLXBF2 || GPIO_MLXBF3) && ACPI help This driver supports reset or low power mode handling for Mellanox BlueField. diff --git a/drivers/power/reset/pwr-mlxbf.c b/drivers/power/reset/pwr-mlxbf.c index 12dedf841a44..de35d24bb7ef 100644 --- a/drivers/power/reset/pwr-mlxbf.c +++ b/drivers/power/reset/pwr-mlxbf.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0-only or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause /* * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. diff --git a/drivers/power/reset/vexpress-poweroff.c b/drivers/power/reset/vexpress-poweroff.c index 447ffdacddf9..17064d7b19f6 100644 --- a/drivers/power/reset/vexpress-poweroff.c +++ b/drivers/power/reset/vexpress-poweroff.c @@ -121,7 +121,7 @@ static int vexpress_reset_probe(struct platform_device *pdev) return PTR_ERR(regmap); dev_set_drvdata(&pdev->dev, regmap); - switch ((enum vexpress_reset_func)match->data) { + switch ((uintptr_t)match->data) { case FUNC_SHUTDOWN: vexpress_power_off_device = &pdev->dev; pm_power_off = vexpress_power_off; diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig index 663a1c423806..a61bb1283e19 100644 --- a/drivers/power/supply/Kconfig +++ b/drivers/power/supply/Kconfig @@ -769,6 +769,7 @@ config BATTERY_RT5033 config CHARGER_RT5033 tristate "RT5033 battery charger support" depends on MFD_RT5033 + depends on EXTCON || !EXTCON help This adds support for battery charger in Richtek RT5033 PMIC. The device supports pre-charge mode, fast charge mode and diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c index 6f83e99d2eb7..ce36d6ca3422 100644 --- a/drivers/power/supply/ab8500_btemp.c +++ b/drivers/power/supply/ab8500_btemp.c @@ -115,7 +115,6 @@ struct ab8500_btemp { static enum power_supply_property ab8500_btemp_props[] = { POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_ONLINE, - POWER_SUPPLY_PROP_TECHNOLOGY, POWER_SUPPLY_PROP_TEMP, }; @@ -532,12 +531,6 @@ static int ab8500_btemp_get_property(struct power_supply *psy, else val->intval = 1; break; - case POWER_SUPPLY_PROP_TECHNOLOGY: - if (di->bm->bi) - val->intval = di->bm->bi->technology; - else - val->intval = POWER_SUPPLY_TECHNOLOGY_UNKNOWN; - break; case POWER_SUPPLY_PROP_TEMP: val->intval = ab8500_btemp_get_temp(di); break; @@ -662,7 +655,7 @@ static char *supply_interface[] = { static const struct power_supply_desc ab8500_btemp_desc = { .name = "ab8500_btemp", - .type = POWER_SUPPLY_TYPE_BATTERY, + .type = POWER_SUPPLY_TYPE_UNKNOWN, .properties = ab8500_btemp_props, .num_properties = ARRAY_SIZE(ab8500_btemp_props), .get_property = ab8500_btemp_get_property, diff --git a/drivers/power/supply/ab8500_chargalg.c b/drivers/power/supply/ab8500_chargalg.c index ea4ad61d4c7e..2205ea0834a6 100644 --- a/drivers/power/supply/ab8500_chargalg.c +++ b/drivers/power/supply/ab8500_chargalg.c @@ -1720,7 +1720,7 @@ static char *supply_interface[] = { static const struct power_supply_desc ab8500_chargalg_desc = { .name = "ab8500_chargalg", - .type = POWER_SUPPLY_TYPE_BATTERY, + .type = POWER_SUPPLY_TYPE_UNKNOWN, .properties = ab8500_chargalg_props, .num_properties = ARRAY_SIZE(ab8500_chargalg_props), .get_property = ab8500_chargalg_get_property, diff --git a/drivers/power/supply/mt6370-charger.c b/drivers/power/supply/mt6370-charger.c index f27dae5043f5..a9641bd3d8cf 100644 --- a/drivers/power/supply/mt6370-charger.c +++ b/drivers/power/supply/mt6370-charger.c @@ -324,7 +324,7 @@ static int mt6370_chg_toggle_cfo(struct mt6370_priv *priv) if (fl_strobe) { dev_err(priv->dev, "Flash led is still in strobe mode\n"); - return ret; + return -EINVAL; } /* cfo off */ diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index 06e5b6b0e255..d483a81560ab 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -482,6 +482,13 @@ int power_supply_uevent(const struct device *dev, struct kobj_uevent_env *env) if (ret) return ret; + /* + * Kernel generates KOBJ_REMOVE uevent in device removal path, after + * resources have been freed. Exit early to avoid use-after-free. + */ + if (psy->removing) + return 0; + prop_buf = (char *)get_zeroed_page(GFP_KERNEL); if (!prop_buf) return -ENOMEM; diff --git a/drivers/power/supply/rk817_charger.c b/drivers/power/supply/rk817_charger.c index 8328bcea1a29..f64daf5a41d9 100644 --- a/drivers/power/supply/rk817_charger.c +++ b/drivers/power/supply/rk817_charger.c @@ -1045,6 +1045,13 @@ static void rk817_charging_monitor(struct work_struct *work) queue_delayed_work(system_wq, &charger->work, msecs_to_jiffies(8000)); } +static void rk817_cleanup_node(void *data) +{ + struct device_node *node = data; + + of_node_put(node); +} + static int rk817_charger_probe(struct platform_device *pdev) { struct rk808 *rk808 = dev_get_drvdata(pdev->dev.parent); @@ -1061,11 +1068,13 @@ static int rk817_charger_probe(struct platform_device *pdev) if (!node) return -ENODEV; + ret = devm_add_action_or_reset(&pdev->dev, rk817_cleanup_node, node); + if (ret) + return ret; + charger = devm_kzalloc(&pdev->dev, sizeof(*charger), GFP_KERNEL); - if (!charger) { - of_node_put(node); + if (!charger) return -ENOMEM; - } charger->rk808 = rk808; @@ -1211,3 +1220,4 @@ MODULE_DESCRIPTION("Battery power supply driver for RK817 PMIC"); MODULE_AUTHOR("Maya Matuszczyk <maccraft123mc@gmail.com>"); MODULE_AUTHOR("Chris Morgan <macromorgan@hotmail.com>"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:rk817-charger"); diff --git a/drivers/power/supply/rt9467-charger.c b/drivers/power/supply/rt9467-charger.c index 683adb18253d..fdfdc83ab045 100644 --- a/drivers/power/supply/rt9467-charger.c +++ b/drivers/power/supply/rt9467-charger.c @@ -598,8 +598,8 @@ static int rt9467_run_aicl(struct rt9467_chg_data *data) reinit_completion(&data->aicl_done); ret = wait_for_completion_timeout(&data->aicl_done, msecs_to_jiffies(3500)); - if (ret) - return ret; + if (ret == 0) + return -ETIMEDOUT; ret = rt9467_get_value_from_ranges(data, F_IAICR, RT9467_RANGE_IAICR, &aicr_get); if (ret) { diff --git a/drivers/power/supply/ucs1002_power.c b/drivers/power/supply/ucs1002_power.c index 954feba6600b..7970843a4f48 100644 --- a/drivers/power/supply/ucs1002_power.c +++ b/drivers/power/supply/ucs1002_power.c @@ -384,7 +384,8 @@ static int ucs1002_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_USB_TYPE: return ucs1002_get_usb_type(info, val); case POWER_SUPPLY_PROP_HEALTH: - return val->intval = info->health; + val->intval = info->health; + return 0; case POWER_SUPPLY_PROP_PRESENT: val->intval = info->present; return 0; diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index 20a974ced8d6..a7a6947ab4bc 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -3998,7 +3998,6 @@ ptp_ocp_device_init(struct ptp_ocp *bp, struct pci_dev *pdev) return 0; out: - ptp_ocp_dev_release(&bp->dev); put_device(&bp->dev); return err; } diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index d8e1caaf207e..3137e40fcd3e 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -5542,6 +5542,8 @@ regulator_register(struct device *dev, goto rinse; } device_initialize(&rdev->dev); + dev_set_drvdata(&rdev->dev, rdev); + rdev->dev.class = ®ulator_class; spin_lock_init(&rdev->err_lock); /* @@ -5603,11 +5605,9 @@ regulator_register(struct device *dev, rdev->supply_name = regulator_desc->supply_name; /* register with sysfs */ - rdev->dev.class = ®ulator_class; rdev->dev.parent = config->dev; dev_set_name(&rdev->dev, "regulator.%lu", (unsigned long) atomic_inc_return(®ulator_no)); - dev_set_drvdata(&rdev->dev, rdev); /* set regulator constraints */ if (init_data) @@ -5724,15 +5724,11 @@ wash: mutex_lock(®ulator_list_mutex); regulator_ena_gpio_free(rdev); mutex_unlock(®ulator_list_mutex); - put_device(&rdev->dev); - rdev = NULL; clean: if (dangling_of_gpiod) gpiod_put(config->ena_gpiod); - if (rdev && rdev->dev.of_node) - of_node_put(rdev->dev.of_node); - kfree(rdev); kfree(config); + put_device(&rdev->dev); rinse: if (dangling_cfg_gpiod) gpiod_put(cfg->ena_gpiod); diff --git a/drivers/regulator/mt6358-regulator.c b/drivers/regulator/mt6358-regulator.c index b9cda2210c33..65fbd95f1dbb 100644 --- a/drivers/regulator/mt6358-regulator.c +++ b/drivers/regulator/mt6358-regulator.c @@ -43,7 +43,7 @@ struct mt6358_regulator_info { .desc = { \ .name = #vreg, \ .of_match = of_match_ptr(match), \ - .ops = &mt6358_volt_range_ops, \ + .ops = &mt6358_buck_ops, \ .type = REGULATOR_VOLTAGE, \ .id = MT6358_ID_##vreg, \ .owner = THIS_MODULE, \ @@ -139,7 +139,7 @@ struct mt6358_regulator_info { .desc = { \ .name = #vreg, \ .of_match = of_match_ptr(match), \ - .ops = &mt6358_volt_range_ops, \ + .ops = &mt6358_buck_ops, \ .type = REGULATOR_VOLTAGE, \ .id = MT6366_ID_##vreg, \ .owner = THIS_MODULE, \ @@ -450,7 +450,7 @@ static unsigned int mt6358_regulator_get_mode(struct regulator_dev *rdev) } } -static const struct regulator_ops mt6358_volt_range_ops = { +static const struct regulator_ops mt6358_buck_ops = { .list_voltage = regulator_list_voltage_linear, .map_voltage = regulator_map_voltage_linear, .set_voltage_sel = regulator_set_voltage_sel_regmap, @@ -464,6 +464,18 @@ static const struct regulator_ops mt6358_volt_range_ops = { .get_mode = mt6358_regulator_get_mode, }; +static const struct regulator_ops mt6358_volt_range_ops = { + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = mt6358_get_buck_voltage_sel, + .set_voltage_time_sel = regulator_set_voltage_time_sel, + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .get_status = mt6358_get_status, +}; + static const struct regulator_ops mt6358_volt_table_ops = { .list_voltage = regulator_list_voltage_table, .map_voltage = regulator_map_voltage_iterate, diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index df782646e856..ab2f35bc294d 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -518,12 +518,12 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, if (port) { put_device(&port->dev); retval = -EEXIST; - goto err_out; + goto err_put; } port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL); if (!port) - goto err_out; + goto err_put; rwlock_init(&port->unit_list_lock); INIT_LIST_HEAD(&port->unit_list); @@ -546,7 +546,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) { kfree(port); - goto err_out; + goto err_put; } retval = -EINVAL; @@ -563,7 +563,8 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, return port; -err_out: +err_put: zfcp_ccw_adapter_put(adapter); +err_out: return ERR_PTR(retval); } diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index 93c68931a593..22cef283b2b9 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h @@ -27,7 +27,7 @@ #define DRV_NAME "fnic" #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" -#define DRV_VERSION "1.6.0.56" +#define DRV_VERSION "1.6.0.57" #define PFX DRV_NAME ": " #define DFX DRV_NAME "%d: " @@ -237,6 +237,8 @@ struct fnic { unsigned int cq_count; struct mutex sgreset_mutex; + spinlock_t sgreset_lock; /* lock for sgreset */ + struct scsi_cmnd *sgreset_sc; struct dentry *fnic_stats_debugfs_host; struct dentry *fnic_stats_debugfs_file; struct dentry *fnic_reset_debugfs_file; diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h index f4c8769df312..5895ead20e14 100644 --- a/drivers/scsi/fnic/fnic_io.h +++ b/drivers/scsi/fnic/fnic_io.h @@ -52,6 +52,8 @@ struct fnic_io_req { unsigned long start_time; /* in jiffies */ struct completion *abts_done; /* completion for abts */ struct completion *dr_done; /* completion for device reset */ + unsigned int tag; + struct scsi_cmnd *sc; /* midlayer's cmd pointer */ }; enum fnic_port_speeds { diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index 984bc5fc55e2..f27f9319e0b2 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -754,6 +754,8 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) for (i = 0; i < FNIC_IO_LOCKS; i++) spin_lock_init(&fnic->io_req_lock[i]); + spin_lock_init(&fnic->sgreset_lock); + err = -ENOMEM; fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache); if (!fnic->io_req_pool) diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index 9761b2c9db48..416d81954819 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -1047,9 +1047,9 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, { u8 type; u8 hdr_status; - struct fcpio_tag tag; + struct fcpio_tag ftag; u32 id; - struct scsi_cmnd *sc; + struct scsi_cmnd *sc = NULL; struct fnic_io_req *io_req; struct fnic_stats *fnic_stats = &fnic->fnic_stats; struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats; @@ -1058,27 +1058,43 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned long flags; spinlock_t *io_lock; unsigned long start_time; + unsigned int tag; - fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); - fcpio_tag_id_dec(&tag, &id); + fcpio_header_dec(&desc->hdr, &type, &hdr_status, &ftag); + fcpio_tag_id_dec(&ftag, &id); - if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) { + tag = id & FNIC_TAG_MASK; + if (tag == fnic->fnic_max_tag_id) { + if (!(id & FNIC_TAG_DEV_RST)) { + shost_printk(KERN_ERR, fnic->lport->host, + "Tag out of range id 0x%x hdr status = %s\n", + id, fnic_fcpio_status_to_str(hdr_status)); + return; + } + } else if (tag > fnic->fnic_max_tag_id) { shost_printk(KERN_ERR, fnic->lport->host, - "Tag out of range tag %x hdr status = %s\n", - id, fnic_fcpio_status_to_str(hdr_status)); + "Tag out of range tag 0x%x hdr status = %s\n", + tag, fnic_fcpio_status_to_str(hdr_status)); return; } - sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK); + if ((tag == fnic->fnic_max_tag_id) && (id & FNIC_TAG_DEV_RST)) { + sc = fnic->sgreset_sc; + io_lock = &fnic->sgreset_lock; + } else { + sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK); + io_lock = fnic_io_lock_hash(fnic, sc); + } + WARN_ON_ONCE(!sc); if (!sc) { atomic64_inc(&fnic_stats->io_stats.sc_null); shost_printk(KERN_ERR, fnic->lport->host, "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n", - fnic_fcpio_status_to_str(hdr_status), id); + fnic_fcpio_status_to_str(hdr_status), tag); return; } - io_lock = fnic_io_lock_hash(fnic, sc); + spin_lock_irqsave(io_lock, flags); io_req = fnic_priv(sc)->io_req; WARN_ON_ONCE(!io_req); @@ -1089,7 +1105,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, shost_printk(KERN_ERR, fnic->lport->host, "itmf_cmpl io_req is null - " "hdr status = %s tag = 0x%x sc 0x%p\n", - fnic_fcpio_status_to_str(hdr_status), id, sc); + fnic_fcpio_status_to_str(hdr_status), tag, sc); return; } start_time = io_req->start_time; @@ -1938,6 +1954,10 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic, struct scsi_lun fc_lun; int ret = 0; unsigned long intr_flags; + unsigned int tag = scsi_cmd_to_rq(sc)->tag; + + if (tag == SCSI_NO_TAG) + tag = io_req->tag; spin_lock_irqsave(host->host_lock, intr_flags); if (unlikely(fnic_chk_state_flags_locked(fnic, @@ -1964,7 +1984,8 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic, /* fill in the lun info */ int_to_scsilun(sc->device->lun, &fc_lun); - fnic_queue_wq_copy_desc_itmf(wq, scsi_cmd_to_rq(sc)->tag | FNIC_TAG_DEV_RST, + tag |= FNIC_TAG_DEV_RST; + fnic_queue_wq_copy_desc_itmf(wq, tag, 0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG, fc_lun.scsi_lun, io_req->port_id, fnic->config.ra_tov, fnic->config.ed_tov); @@ -2146,8 +2167,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, .ret = SUCCESS, }; - if (new_sc) - iter_data.lr_sc = lr_sc; + iter_data.lr_sc = lr_sc; scsi_host_busy_iter(fnic->lport->host, fnic_pending_aborts_iter, &iter_data); @@ -2230,8 +2250,14 @@ int fnic_device_reset(struct scsi_cmnd *sc) mutex_lock(&fnic->sgreset_mutex); tag = fnic->fnic_max_tag_id; new_sc = 1; - } - io_lock = fnic_io_lock_hash(fnic, sc); + fnic->sgreset_sc = sc; + io_lock = &fnic->sgreset_lock; + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, + "fcid: 0x%x lun: 0x%llx flags: 0x%x tag: 0x%x Issuing sgreset\n", + rport->port_id, sc->device->lun, fnic_priv(sc)->flags, tag); + } else + io_lock = fnic_io_lock_hash(fnic, sc); + spin_lock_irqsave(io_lock, flags); io_req = fnic_priv(sc)->io_req; @@ -2247,6 +2273,8 @@ int fnic_device_reset(struct scsi_cmnd *sc) } memset(io_req, 0, sizeof(*io_req)); io_req->port_id = rport->port_id; + io_req->tag = tag; + io_req->sc = sc; fnic_priv(sc)->io_req = io_req; } io_req->dr_done = &tm_done; @@ -2400,8 +2428,10 @@ fnic_device_reset_end: (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), fnic_flags_and_state(sc)); - if (new_sc) + if (new_sc) { + fnic->sgreset_sc = NULL; mutex_unlock(&fnic->sgreset_mutex); + } FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Returning from device reset %s\n", diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index d0911bc28663..89367c4bf0ef 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -613,6 +613,17 @@ void scsi_cdl_check(struct scsi_device *sdev) bool cdl_supported; unsigned char *buf; + /* + * Support for CDL was defined in SPC-5. Ignore devices reporting an + * lower SPC version. This also avoids problems with old drives choking + * on MAINTENANCE_IN / MI_REPORT_SUPPORTED_OPERATION_CODES with a + * service action specified, as done in scsi_cdl_check_cmd(). + */ + if (sdev->scsi_level < SCSI_SPC_5) { + sdev->cdl_supported = 0; + return; + } + buf = kmalloc(SCSI_CDL_CHECK_BUF_LEN, GFP_KERNEL); if (!buf) { sdev->cdl_supported = 0; diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 52014b2d39e1..44680f65ea14 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -822,7 +822,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result, * device is attached at LUN 0 (SCSI_SCAN_TARGET_PRESENT) so * non-zero LUNs can be scanned. */ - sdev->scsi_level = inq_result[2] & 0x07; + sdev->scsi_level = inq_result[2] & 0x0f; if (sdev->scsi_level >= 2 || (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1)) sdev->scsi_level++; @@ -1619,12 +1619,25 @@ int scsi_add_device(struct Scsi_Host *host, uint channel, } EXPORT_SYMBOL(scsi_add_device); -void scsi_rescan_device(struct scsi_device *sdev) +int scsi_rescan_device(struct scsi_device *sdev) { struct device *dev = &sdev->sdev_gendev; + int ret = 0; device_lock(dev); + /* + * Bail out if the device or its queue are not running. Otherwise, + * the rescan may block waiting for commands to be executed, with us + * holding the device lock. This can result in a potential deadlock + * in the power management core code when system resume is on-going. + */ + if (sdev->sdev_state != SDEV_RUNNING || + blk_queue_pm_only(sdev->request_queue)) { + ret = -EWOULDBLOCK; + goto unlock; + } + scsi_attach_vpd(sdev); scsi_cdl_check(sdev); @@ -1638,7 +1651,11 @@ void scsi_rescan_device(struct scsi_device *sdev) drv->rescan(dev); module_put(dev->driver->owner); } + +unlock: device_unlock(dev); + + return ret; } EXPORT_SYMBOL(scsi_rescan_device); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index c92a317ba547..83b6a3f3863b 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -201,18 +201,32 @@ cache_type_store(struct device *dev, struct device_attribute *attr, } static ssize_t -manage_start_stop_show(struct device *dev, struct device_attribute *attr, - char *buf) +manage_start_stop_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_device *sdp = sdkp->device; - return sprintf(buf, "%u\n", sdp->manage_start_stop); + return sysfs_emit(buf, "%u\n", + sdp->manage_system_start_stop && + sdp->manage_runtime_start_stop); } +static DEVICE_ATTR_RO(manage_start_stop); static ssize_t -manage_start_stop_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +manage_system_start_stop_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + struct scsi_device *sdp = sdkp->device; + + return sysfs_emit(buf, "%u\n", sdp->manage_system_start_stop); +} + +static ssize_t +manage_system_start_stop_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_device *sdp = sdkp->device; @@ -224,11 +238,42 @@ manage_start_stop_store(struct device *dev, struct device_attribute *attr, if (kstrtobool(buf, &v)) return -EINVAL; - sdp->manage_start_stop = v; + sdp->manage_system_start_stop = v; return count; } -static DEVICE_ATTR_RW(manage_start_stop); +static DEVICE_ATTR_RW(manage_system_start_stop); + +static ssize_t +manage_runtime_start_stop_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + struct scsi_device *sdp = sdkp->device; + + return sysfs_emit(buf, "%u\n", sdp->manage_runtime_start_stop); +} + +static ssize_t +manage_runtime_start_stop_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + struct scsi_device *sdp = sdkp->device; + bool v; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + if (kstrtobool(buf, &v)) + return -EINVAL; + + sdp->manage_runtime_start_stop = v; + + return count; +} +static DEVICE_ATTR_RW(manage_runtime_start_stop); static ssize_t allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -560,6 +605,8 @@ static struct attribute *sd_disk_attrs[] = { &dev_attr_FUA.attr, &dev_attr_allow_restart.attr, &dev_attr_manage_start_stop.attr, + &dev_attr_manage_system_start_stop.attr, + &dev_attr_manage_runtime_start_stop.attr, &dev_attr_protection_type.attr, &dev_attr_protection_mode.attr, &dev_attr_app_tag_own.attr, @@ -3694,7 +3741,8 @@ static int sd_remove(struct device *dev) device_del(&sdkp->disk_dev); del_gendisk(sdkp->disk); - sd_shutdown(dev); + if (!sdkp->suspended) + sd_shutdown(dev); put_disk(sdkp->disk); return 0; @@ -3771,13 +3819,20 @@ static void sd_shutdown(struct device *dev) sd_sync_cache(sdkp, NULL); } - if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) { + if (system_state != SYSTEM_RESTART && + sdkp->device->manage_system_start_stop) { sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); sd_start_stop_device(sdkp, 0); } } -static int sd_suspend_common(struct device *dev, bool ignore_stop_errors) +static inline bool sd_do_start_stop(struct scsi_device *sdev, bool runtime) +{ + return (sdev->manage_system_start_stop && !runtime) || + (sdev->manage_runtime_start_stop && runtime); +} + +static int sd_suspend_common(struct device *dev, bool runtime) { struct scsi_disk *sdkp = dev_get_drvdata(dev); struct scsi_sense_hdr sshdr; @@ -3809,15 +3864,18 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors) } } - if (sdkp->device->manage_start_stop) { + if (sd_do_start_stop(sdkp->device, runtime)) { if (!sdkp->device->silence_suspend) sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); /* an error is not worth aborting a system sleep */ ret = sd_start_stop_device(sdkp, 0); - if (ignore_stop_errors) + if (!runtime) ret = 0; } + if (!ret) + sdkp->suspended = true; + return ret; } @@ -3826,15 +3884,15 @@ static int sd_suspend_system(struct device *dev) if (pm_runtime_suspended(dev)) return 0; - return sd_suspend_common(dev, true); + return sd_suspend_common(dev, false); } static int sd_suspend_runtime(struct device *dev) { - return sd_suspend_common(dev, false); + return sd_suspend_common(dev, true); } -static int sd_resume(struct device *dev) +static int sd_resume(struct device *dev, bool runtime) { struct scsi_disk *sdkp = dev_get_drvdata(dev); int ret = 0; @@ -3842,16 +3900,21 @@ static int sd_resume(struct device *dev) if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */ return 0; - if (!sdkp->device->manage_start_stop) + if (!sd_do_start_stop(sdkp->device, runtime)) { + sdkp->suspended = false; return 0; + } if (!sdkp->device->no_start_on_resume) { sd_printk(KERN_NOTICE, sdkp, "Starting disk\n"); ret = sd_start_stop_device(sdkp, 1); } - if (!ret) + if (!ret) { opal_unlock_from_suspend(sdkp->opal_dev); + sdkp->suspended = false; + } + return ret; } @@ -3860,7 +3923,7 @@ static int sd_resume_system(struct device *dev) if (pm_runtime_suspended(dev)) return 0; - return sd_resume(dev); + return sd_resume(dev, false); } static int sd_resume_runtime(struct device *dev) @@ -3887,7 +3950,7 @@ static int sd_resume_runtime(struct device *dev) "Failed to clear sense data\n"); } - return sd_resume(dev); + return sd_resume(dev, true); } static const struct dev_pm_ops sd_pm_ops = { diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 5eea762f84d1..409dda5350d1 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -131,6 +131,7 @@ struct scsi_disk { u8 provisioning_mode; u8 zeroing_mode; u8 nr_actuators; /* Number of actuators */ + bool suspended; /* Disk is suspended (stopped) */ unsigned ATO : 1; /* state of disk ATO bit */ unsigned cache_override : 1; /* temp override of WCE,RCD */ unsigned WCE : 1; /* state of disk WCE bit */ diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c index 1dcd243df567..ec87d9d878f3 100644 --- a/drivers/soc/imx/soc-imx8m.c +++ b/drivers/soc/imx/soc-imx8m.c @@ -100,6 +100,7 @@ static void __init imx8mm_soc_uid(void) { void __iomem *ocotp_base; struct device_node *np; + struct clk *clk; u32 offset = of_machine_is_compatible("fsl,imx8mp") ? IMX8MP_OCOTP_UID_OFFSET : 0; @@ -109,11 +110,20 @@ static void __init imx8mm_soc_uid(void) ocotp_base = of_iomap(np, 0); WARN_ON(!ocotp_base); + clk = of_clk_get_by_name(np, NULL); + if (IS_ERR(clk)) { + WARN_ON(IS_ERR(clk)); + return; + } + + clk_prepare_enable(clk); soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH + offset); soc_uid <<= 32; soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW + offset); + clk_disable_unprepare(clk); + clk_put(clk); iounmap(ocotp_base); of_node_put(np); } diff --git a/drivers/soc/loongson/Kconfig b/drivers/soc/loongson/Kconfig index 314e13bb3e01..368344943a93 100644 --- a/drivers/soc/loongson/Kconfig +++ b/drivers/soc/loongson/Kconfig @@ -20,6 +20,7 @@ config LOONGSON2_GUTS config LOONGSON2_PM bool "Loongson-2 SoC Power Management Controller Driver" depends on LOONGARCH && OF + depends on INPUT=y help The Loongson-2's power management controller was ACPI, supports ACPI S2Idle (Suspend To Idle), ACPI S3 (Suspend To RAM), ACPI S4 (Suspend To diff --git a/drivers/soc/loongson/loongson2_guts.c b/drivers/soc/loongson/loongson2_guts.c index bace4bc8e03b..9a469779eea7 100644 --- a/drivers/soc/loongson/loongson2_guts.c +++ b/drivers/soc/loongson/loongson2_guts.c @@ -70,7 +70,7 @@ static const struct loongson2_soc_die_attr *loongson2_soc_die_match( if (matches->svr == (svr & matches->mask)) return matches; matches++; - }; + } return NULL; } @@ -94,7 +94,6 @@ static int loongson2_guts_probe(struct platform_device *pdev) { struct device_node *root, *np = pdev->dev.of_node; struct device *dev = &pdev->dev; - struct resource *res; const struct loongson2_soc_die_attr *soc_die; const char *machine; u32 svr; @@ -106,8 +105,7 @@ static int loongson2_guts_probe(struct platform_device *pdev) guts->little_endian = of_property_read_bool(np, "little-endian"); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - guts->regs = ioremap(res->start, res->end - res->start + 1); + guts->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(guts->regs)) return PTR_ERR(guts->regs); diff --git a/drivers/soc/loongson/loongson2_pm.c b/drivers/soc/loongson/loongson2_pm.c index 796add6e8b63..b8e5e1e3528a 100644 --- a/drivers/soc/loongson/loongson2_pm.c +++ b/drivers/soc/loongson/loongson2_pm.c @@ -11,6 +11,7 @@ #include <linux/input.h> #include <linux/suspend.h> #include <linux/interrupt.h> +#include <linux/of_platform.h> #include <linux/pm_wakeirq.h> #include <linux/platform_device.h> #include <asm/bootinfo.h> @@ -192,12 +193,16 @@ static int loongson2_pm_probe(struct platform_device *pdev) if (loongson_sysconf.suspend_addr) suspend_set_ops(&loongson2_suspend_ops); + /* Populate children */ + retval = devm_of_platform_populate(dev); + if (retval) + dev_err(dev, "Error populating children, reboot and poweroff might not work properly\n"); + return 0; } static const struct of_device_id loongson2_pm_match[] = { { .compatible = "loongson,ls2k0500-pmc", }, - { .compatible = "loongson,ls2k1000-pmc", }, {}, }; diff --git a/drivers/spi/spi-cs42l43.c b/drivers/spi/spi-cs42l43.c index 453a9b37ce78..d239fc5a49cc 100644 --- a/drivers/spi/spi-cs42l43.c +++ b/drivers/spi/spi-cs42l43.c @@ -256,7 +256,6 @@ static int cs42l43_spi_probe(struct platform_device *pdev) ret = devm_spi_register_controller(priv->dev, priv->ctlr); if (ret) { - pm_runtime_disable(priv->dev); dev_err(priv->dev, "Failed to register SPI controller: %d\n", ret); } diff --git a/drivers/spi/spi-gxp.c b/drivers/spi/spi-gxp.c index fd2fac236bbd..3aff5a166c94 100644 --- a/drivers/spi/spi-gxp.c +++ b/drivers/spi/spi-gxp.c @@ -194,7 +194,7 @@ static ssize_t gxp_spi_write(struct gxp_spi_chip *chip, const struct spi_mem_op return ret; } - return write_len; + return 0; } static int do_gxp_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op) diff --git a/drivers/staging/media/atomisp/Kconfig b/drivers/staging/media/atomisp/Kconfig index 5d8917160d41..75c985da75b5 100644 --- a/drivers/staging/media/atomisp/Kconfig +++ b/drivers/staging/media/atomisp/Kconfig @@ -12,12 +12,12 @@ menuconfig INTEL_ATOMISP config VIDEO_ATOMISP tristate "Intel Atom Image Signal Processor Driver" depends on VIDEO_DEV && INTEL_ATOMISP + depends on IPU_BRIDGE depends on MEDIA_PCI_SUPPORT depends on PMIC_OPREGION depends on I2C select V4L2_FWNODE select IOSF_MBI - select IPU_BRIDGE select VIDEOBUF2_VMALLOC select VIDEO_V4L2_SUBDEV_API help diff --git a/drivers/staging/media/tegra-video/vi.c b/drivers/staging/media/tegra-video/vi.c index e98b3010520e..94171e62dee9 100644 --- a/drivers/staging/media/tegra-video/vi.c +++ b/drivers/staging/media/tegra-video/vi.c @@ -1455,17 +1455,18 @@ static int __maybe_unused vi_runtime_suspend(struct device *dev) } /* - * Graph Management + * Find the entity matching a given fwnode in an v4l2_async_notifier list */ static struct tegra_vi_graph_entity * -tegra_vi_graph_find_entity(struct tegra_vi_channel *chan, +tegra_vi_graph_find_entity(struct list_head *list, const struct fwnode_handle *fwnode) { struct tegra_vi_graph_entity *entity; struct v4l2_async_connection *asd; - list_for_each_entry(asd, &chan->notifier.done_list, asc_entry) { + list_for_each_entry(asd, list, asc_entry) { entity = to_tegra_vi_graph_entity(asd); + if (entity->asd.match.fwnode == fwnode) return entity; } @@ -1532,7 +1533,8 @@ static int tegra_vi_graph_build(struct tegra_vi_channel *chan, } /* find the remote entity from notifier list */ - ent = tegra_vi_graph_find_entity(chan, link.remote_node); + ent = tegra_vi_graph_find_entity(&chan->notifier.done_list, + link.remote_node); if (!ent) { dev_err(vi->dev, "no entity found for %pOF\n", to_of_node(link.remote_node)); @@ -1664,7 +1666,8 @@ static int tegra_vi_graph_notify_bound(struct v4l2_async_notifier *notifier, * Locate the entity corresponding to the bound subdev and store the * subdev pointer. */ - entity = tegra_vi_graph_find_entity(chan, subdev->fwnode); + entity = tegra_vi_graph_find_entity(&chan->notifier.waiting_list, + subdev->fwnode); if (!entity) { dev_err(vi->dev, "no entity for subdev %s\n", subdev->name); return -EINVAL; @@ -1713,7 +1716,8 @@ static int tegra_vi_graph_parse_one(struct tegra_vi_channel *chan, /* skip entities that are already processed */ if (device_match_fwnode(vi->dev, remote) || - tegra_vi_graph_find_entity(chan, remote)) { + tegra_vi_graph_find_entity(&chan->notifier.waiting_list, + remote)) { fwnode_handle_put(remote); continue; } diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index b7ac60f4a219..b6523d4b9259 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -843,7 +843,6 @@ sector_t target_to_linux_sector(struct se_device *dev, sector_t lb) EXPORT_SYMBOL(target_to_linux_sector); struct devices_idr_iter { - struct config_item *prev_item; int (*fn)(struct se_device *dev, void *data); void *data; }; @@ -853,11 +852,9 @@ static int target_devices_idr_iter(int id, void *p, void *data) { struct devices_idr_iter *iter = data; struct se_device *dev = p; + struct config_item *item; int ret; - config_item_put(iter->prev_item); - iter->prev_item = NULL; - /* * We add the device early to the idr, so it can be used * by backend modules during configuration. We do not want @@ -867,12 +864,13 @@ static int target_devices_idr_iter(int id, void *p, void *data) if (!target_dev_configured(dev)) return 0; - iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item); - if (!iter->prev_item) + item = config_item_get_unless_zero(&dev->dev_group.cg_item); + if (!item) return 0; mutex_unlock(&device_mutex); ret = iter->fn(dev, iter->data); + config_item_put(item); mutex_lock(&device_mutex); return ret; @@ -895,7 +893,6 @@ int target_for_each_device(int (*fn)(struct se_device *dev, void *data), mutex_lock(&device_mutex); ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter); mutex_unlock(&device_mutex); - config_item_put(iter.prev_item); return ret; } diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h index 72685ee0d53f..6bb5cae09688 100644 --- a/drivers/tee/optee/optee_private.h +++ b/drivers/tee/optee/optee_private.h @@ -238,8 +238,6 @@ int optee_notif_send(struct optee *optee, u_int key); u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, struct tee_param *param); -int optee_supp_read(struct tee_context *ctx, void __user *buf, size_t len); -int optee_supp_write(struct tee_context *ctx, void __user *buf, size_t len); void optee_supp_init(struct optee_supp *supp); void optee_supp_uninit(struct optee_supp *supp); void optee_supp_release(struct optee_supp *supp); diff --git a/drivers/tee/tee_private.h b/drivers/tee/tee_private.h index 409cadcc1cff..754e11dcb240 100644 --- a/drivers/tee/tee_private.h +++ b/drivers/tee/tee_private.h @@ -47,8 +47,6 @@ struct tee_device { struct tee_shm_pool *pool; }; -int tee_shm_init(void); - int tee_shm_get_fd(struct tee_shm *shm); bool tee_device_get(struct tee_device *teedev); diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index b3550ff9c494..1f3aba607cd5 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -3097,10 +3097,8 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc) gsm->has_devices = false; } for (i = NUM_DLCI - 1; i >= 0; i--) - if (gsm->dlci[i]) { + if (gsm->dlci[i]) gsm_dlci_release(gsm->dlci[i]); - gsm->dlci[i] = NULL; - } mutex_unlock(&gsm->mutex); /* Now wipe the queues */ tty_ldisc_flush(gsm->tty); diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index fb891b67968f..141627370aab 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -1936,7 +1936,10 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir) skip_rx = true; if (status & (UART_LSR_DR | UART_LSR_BI) && !skip_rx) { - if (irqd_is_wakeup_set(irq_get_irq_data(port->irq))) + struct irq_data *d; + + d = irq_get_irq_data(port->irq); + if (d && irqd_is_wakeup_set(d)) pm_wakeup_event(tport->tty->dev, 0); if (!up->dma || handle_rx_dma(up, iir)) status = serial8250_rx_chars(up, status); diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c index e4490639d383..9d2738e10c0b 100644 --- a/drivers/vfio/mdev/mdev_sysfs.c +++ b/drivers/vfio/mdev/mdev_sysfs.c @@ -233,7 +233,8 @@ int parent_create_sysfs_files(struct mdev_parent *parent) out_err: while (--i >= 0) mdev_type_remove(parent->types[i]); - return 0; + kset_unregister(parent->mdev_types_kset); + return ret; } static ssize_t remove_store(struct device *dev, struct device_attribute *attr, diff --git a/drivers/vfio/pci/pds/Kconfig b/drivers/vfio/pci/pds/Kconfig index 407b3fd32733..6eceef7b028a 100644 --- a/drivers/vfio/pci/pds/Kconfig +++ b/drivers/vfio/pci/pds/Kconfig @@ -3,7 +3,7 @@ config PDS_VFIO_PCI tristate "VFIO support for PDS PCI devices" - depends on PDS_CORE + depends on PDS_CORE && PCI_IOV select VFIO_PCI_CORE help This provides generic PCI support for PDS devices using the VFIO diff --git a/drivers/vfio/pci/pds/vfio_dev.c b/drivers/vfio/pci/pds/vfio_dev.c index b46174f5eb09..649b18ee394b 100644 --- a/drivers/vfio/pci/pds/vfio_dev.c +++ b/drivers/vfio/pci/pds/vfio_dev.c @@ -162,7 +162,7 @@ static int pds_vfio_init_device(struct vfio_device *vdev) pci_id = PCI_DEVID(pdev->bus->number, pdev->devfn); dev_dbg(&pdev->dev, "%s: PF %#04x VF %#04x vf_id %d domain %d pds_vfio %p\n", - __func__, pci_dev_id(pdev->physfn), pci_id, vf_id, + __func__, pci_dev_id(pci_physfn(pdev)), pci_id, vf_id, pci_domain_nr(pdev->bus), pds_vfio); return 0; diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c index 955d938eb663..7b8fd977f71c 100644 --- a/drivers/vhost/vringh.c +++ b/drivers/vhost/vringh.c @@ -123,8 +123,18 @@ static inline ssize_t vringh_iov_xfer(struct vringh *vrh, done += partlen; len -= partlen; ptr += partlen; + iov->consumed += partlen; + iov->iov[iov->i].iov_len -= partlen; + iov->iov[iov->i].iov_base += partlen; - vringh_kiov_advance(iov, partlen); + if (!iov->iov[iov->i].iov_len) { + /* Fix up old iov element then increment. */ + iov->iov[iov->i].iov_len = iov->consumed; + iov->iov[iov->i].iov_base -= iov->consumed; + + iov->consumed = 0; + iov->i++; + } } return done; } diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 0bb86e6c4d0a..1b2136fe0fa5 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -33,6 +33,7 @@ #include <linux/slab.h> #include <linux/irqnr.h> #include <linux/pci.h> +#include <linux/rcupdate.h> #include <linux/spinlock.h> #include <linux/cpuhotplug.h> #include <linux/atomic.h> @@ -96,6 +97,7 @@ enum xen_irq_type { struct irq_info { struct list_head list; struct list_head eoi_list; + struct rcu_work rwork; short refcnt; u8 spurious_cnt; u8 is_accounted; @@ -147,22 +149,12 @@ const struct evtchn_ops *evtchn_ops; static DEFINE_MUTEX(irq_mapping_update_lock); /* - * Lock protecting event handling loop against removing event channels. - * Adding of event channels is no issue as the associated IRQ becomes active - * only after everything is setup (before request_[threaded_]irq() the handler - * can't be entered for an event, as the event channel will be unmasked only - * then). - */ -static DEFINE_RWLOCK(evtchn_rwlock); - -/* * Lock hierarchy: * * irq_mapping_update_lock - * evtchn_rwlock - * IRQ-desc lock - * percpu eoi_list_lock - * irq_info->lock + * IRQ-desc lock + * percpu eoi_list_lock + * irq_info->lock */ static LIST_HEAD(xen_irq_list_head); @@ -306,6 +298,22 @@ static void channels_on_cpu_inc(struct irq_info *info) info->is_accounted = 1; } +static void delayed_free_irq(struct work_struct *work) +{ + struct irq_info *info = container_of(to_rcu_work(work), struct irq_info, + rwork); + unsigned int irq = info->irq; + + /* Remove the info pointer only now, with no potential users left. */ + set_info_for_irq(irq, NULL); + + kfree(info); + + /* Legacy IRQ descriptors are managed by the arch. */ + if (irq >= nr_legacy_irqs()) + irq_free_desc(irq); +} + /* Constructors for packed IRQ information. */ static int xen_irq_info_common_setup(struct irq_info *info, unsigned irq, @@ -668,33 +676,36 @@ static void xen_irq_lateeoi_worker(struct work_struct *work) eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed); - read_lock_irqsave(&evtchn_rwlock, flags); + rcu_read_lock(); while (true) { - spin_lock(&eoi->eoi_list_lock); + spin_lock_irqsave(&eoi->eoi_list_lock, flags); info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info, eoi_list); - if (info == NULL || now < info->eoi_time) { - spin_unlock(&eoi->eoi_list_lock); + if (info == NULL) + break; + + if (now < info->eoi_time) { + mod_delayed_work_on(info->eoi_cpu, system_wq, + &eoi->delayed, + info->eoi_time - now); break; } list_del_init(&info->eoi_list); - spin_unlock(&eoi->eoi_list_lock); + spin_unlock_irqrestore(&eoi->eoi_list_lock, flags); info->eoi_time = 0; xen_irq_lateeoi_locked(info, false); } - if (info) - mod_delayed_work_on(info->eoi_cpu, system_wq, - &eoi->delayed, info->eoi_time - now); + spin_unlock_irqrestore(&eoi->eoi_list_lock, flags); - read_unlock_irqrestore(&evtchn_rwlock, flags); + rcu_read_unlock(); } static void xen_cpu_init_eoi(unsigned int cpu) @@ -709,16 +720,15 @@ static void xen_cpu_init_eoi(unsigned int cpu) void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags) { struct irq_info *info; - unsigned long flags; - read_lock_irqsave(&evtchn_rwlock, flags); + rcu_read_lock(); info = info_for_irq(irq); if (info) xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS); - read_unlock_irqrestore(&evtchn_rwlock, flags); + rcu_read_unlock(); } EXPORT_SYMBOL_GPL(xen_irq_lateeoi); @@ -732,6 +742,7 @@ static void xen_irq_init(unsigned irq) info->type = IRQT_UNBOUND; info->refcnt = -1; + INIT_RCU_WORK(&info->rwork, delayed_free_irq); set_info_for_irq(irq, info); /* @@ -789,31 +800,18 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi) static void xen_free_irq(unsigned irq) { struct irq_info *info = info_for_irq(irq); - unsigned long flags; if (WARN_ON(!info)) return; - write_lock_irqsave(&evtchn_rwlock, flags); - if (!list_empty(&info->eoi_list)) lateeoi_list_del(info); list_del(&info->list); - set_info_for_irq(irq, NULL); - WARN_ON(info->refcnt > 0); - write_unlock_irqrestore(&evtchn_rwlock, flags); - - kfree(info); - - /* Legacy IRQ descriptors are managed by the arch. */ - if (irq < nr_legacy_irqs()) - return; - - irq_free_desc(irq); + queue_rcu_work(system_wq, &info->rwork); } /* Not called for lateeoi events. */ @@ -1711,7 +1709,14 @@ int xen_evtchn_do_upcall(void) int cpu = smp_processor_id(); struct evtchn_loop_ctrl ctrl = { 0 }; - read_lock(&evtchn_rwlock); + /* + * When closing an event channel the associated IRQ must not be freed + * until all cpus have left the event handling loop. This is ensured + * by taking the rcu_read_lock() while handling events, as freeing of + * the IRQ is handled via queue_rcu_work() _after_ closing the event + * channel. + */ + rcu_read_lock(); do { vcpu_info->evtchn_upcall_pending = 0; @@ -1724,7 +1729,7 @@ int xen_evtchn_do_upcall(void) } while (vcpu_info->evtchn_upcall_pending); - read_unlock(&evtchn_rwlock); + rcu_read_unlock(); /* * Increment irq_epoch only now to defer EOIs only for |
