diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2017-06-03 09:47:16 +0900 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2017-06-03 09:47:16 +0900 |
commit | b132e4a25dccf5d5857e6ce2d96541be51aa9c5e (patch) | |
tree | 64a6474bc7c218ff228469fda4600556217939b8 /drivers | |
parent | b3addcf0d1f04f53fcc302577d5a5e964c18531a (diff) | |
parent | afbbc7913a288c29616bd31ae612548f6475151a (diff) | |
download | linux-stable-b132e4a25dccf5d5857e6ce2d96541be51aa9c5e.tar.gz linux-stable-b132e4a25dccf5d5857e6ce2d96541be51aa9c5e.tar.bz2 linux-stable-b132e4a25dccf5d5857e6ce2d96541be51aa9c5e.zip |
Merge tag 'fixes-for-v4.12-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb into usb-linus
Felipe writes:
usb: fixes for v4.12-rc4
A fix to a really old synchronization bug on mass storage gadget.
Support for Meson8 SoCs on dwc2
Synchronization fixes on renesas USB driver.
Diffstat (limited to 'drivers')
112 files changed, 1704 insertions, 888 deletions
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index b7c2a06963d6..25aba9b107dd 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c @@ -57,6 +57,7 @@ #define ACPI_BUTTON_LID_INIT_IGNORE 0x00 #define ACPI_BUTTON_LID_INIT_OPEN 0x01 +#define ACPI_BUTTON_LID_INIT_METHOD 0x02 #define _COMPONENT ACPI_BUTTON_COMPONENT ACPI_MODULE_NAME("button"); @@ -376,6 +377,9 @@ static void acpi_lid_initialize_state(struct acpi_device *device) case ACPI_BUTTON_LID_INIT_OPEN: (void)acpi_lid_notify_state(device, 1); break; + case ACPI_BUTTON_LID_INIT_METHOD: + (void)acpi_lid_update_state(device); + break; case ACPI_BUTTON_LID_INIT_IGNORE: default: break; @@ -560,6 +564,9 @@ static int param_set_lid_init_state(const char *val, struct kernel_param *kp) if (!strncmp(val, "open", sizeof("open") - 1)) { lid_init_state = ACPI_BUTTON_LID_INIT_OPEN; pr_info("Notify initial lid state as open\n"); + } else if (!strncmp(val, "method", sizeof("method") - 1)) { + lid_init_state = ACPI_BUTTON_LID_INIT_METHOD; + pr_info("Notify initial lid state with _LID return value\n"); } else if (!strncmp(val, "ignore", sizeof("ignore") - 1)) { lid_init_state = ACPI_BUTTON_LID_INIT_IGNORE; pr_info("Do not notify initial lid state\n"); @@ -573,6 +580,8 @@ static int param_get_lid_init_state(char *buffer, struct kernel_param *kp) switch (lid_init_state) { case ACPI_BUTTON_LID_INIT_OPEN: return sprintf(buffer, "open"); + case ACPI_BUTTON_LID_INIT_METHOD: + return sprintf(buffer, "method"); case ACPI_BUTTON_LID_INIT_IGNORE: return sprintf(buffer, "ignore"); default: diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c index 3ba1c3472cf9..fd86bec98dea 100644 --- a/drivers/acpi/nfit/mce.c +++ b/drivers/acpi/nfit/mce.c @@ -26,7 +26,7 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val, struct nfit_spa *nfit_spa; /* We only care about memory errors */ - if (!(mce->status & MCACOD)) + if (!mce_is_memory_error(mce)) return NOTIFY_DONE; /* diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index f62082fdd670..9c36b27996fc 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -512,13 +512,12 @@ static bool wakeup_source_not_registered(struct wakeup_source *ws) /** * wakup_source_activate - Mark given wakeup source as active. * @ws: Wakeup source to handle. - * @hard: If set, abort suspends in progress and wake up from suspend-to-idle. * * Update the @ws' statistics and, if @ws has just been activated, notify the PM * core of the event by incrementing the counter of of wakeup events being * processed. */ -static void wakeup_source_activate(struct wakeup_source *ws, bool hard) +static void wakeup_source_activate(struct wakeup_source *ws) { unsigned int cec; @@ -526,9 +525,6 @@ static void wakeup_source_activate(struct wakeup_source *ws, bool hard) "unregistered wakeup source\n")) return; - if (hard) - pm_system_wakeup(); - ws->active = true; ws->active_count++; ws->last_time = ktime_get(); @@ -554,7 +550,10 @@ static void wakeup_source_report_event(struct wakeup_source *ws, bool hard) ws->wakeup_count++; if (!ws->active) - wakeup_source_activate(ws, hard); + wakeup_source_activate(ws); + + if (hard) + pm_system_wakeup(); } /** diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 74ed7e9a7f27..2011fec2d6ad 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -71,6 +71,15 @@ config ARM_HIGHBANK_CPUFREQ If in doubt, say N. +config ARM_DB8500_CPUFREQ + tristate "ST-Ericsson DB8500 cpufreq" if COMPILE_TEST && !ARCH_U8500 + default ARCH_U8500 + depends on HAS_IOMEM + depends on !CPU_THERMAL || THERMAL + help + This adds the CPUFreq driver for ST-Ericsson Ux500 (DB8500) SoC + series. + config ARM_IMX6Q_CPUFREQ tristate "Freescale i.MX6 cpufreq support" depends on ARCH_MXC diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index b7e78f063c4f..ab3a42cd29ef 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -53,7 +53,7 @@ obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o -obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o +obj-$(CONFIG_ARM_DB8500_CPUFREQ) += dbx500-cpufreq.o obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c index ab3a951a17e6..ef1fafdad400 100644 --- a/drivers/firmware/efi/efi-pstore.c +++ b/drivers/firmware/efi/efi-pstore.c @@ -53,6 +53,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, if (sscanf(name, "dump-type%u-%u-%d-%lu-%c", &record->type, &part, &cnt, &time, &data_type) == 5) { record->id = generic_id(time, part, cnt); + record->part = part; record->count = cnt; record->time.tv_sec = time; record->time.tv_nsec = 0; @@ -64,6 +65,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, } else if (sscanf(name, "dump-type%u-%u-%d-%lu", &record->type, &part, &cnt, &time) == 4) { record->id = generic_id(time, part, cnt); + record->part = part; record->count = cnt; record->time.tv_sec = time; record->time.tv_nsec = 0; @@ -77,6 +79,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, * multiple logs, remains. */ record->id = generic_id(time, part, 0); + record->part = part; record->count = 0; record->time.tv_sec = time; record->time.tv_nsec = 0; @@ -241,9 +244,15 @@ static int efi_pstore_write(struct pstore_record *record) efi_guid_t vendor = LINUX_EFI_CRASH_GUID; int i, ret = 0; + record->time.tv_sec = get_seconds(); + record->time.tv_nsec = 0; + + record->id = generic_id(record->time.tv_sec, record->part, + record->count); + snprintf(name, sizeof(name), "dump-type%u-%u-%d-%lu-%c", record->type, record->part, record->count, - get_seconds(), record->compressed ? 'C' : 'D'); + record->time.tv_sec, record->compressed ? 'C' : 'D'); for (i = 0; i < DUMP_NAME_LEN; i++) efi_name[i] = name[i]; @@ -255,7 +264,6 @@ static int efi_pstore_write(struct pstore_record *record) if (record->reason == KMSG_DUMP_OOPS) efivar_run_worker(); - record->id = record->part; return ret; }; @@ -287,7 +295,7 @@ static int efi_pstore_erase_func(struct efivar_entry *entry, void *data) * holding multiple logs, remains. */ snprintf(name_old, sizeof(name_old), "dump-type%u-%u-%lu", - ed->record->type, (unsigned int)ed->record->id, + ed->record->type, ed->record->part, ed->record->time.tv_sec); for (i = 0; i < DUMP_NAME_LEN; i++) @@ -320,10 +328,7 @@ static int efi_pstore_erase(struct pstore_record *record) char name[DUMP_NAME_LEN]; efi_char16_t efi_name[DUMP_NAME_LEN]; int found, i; - unsigned int part; - do_div(record->id, 1000); - part = do_div(record->id, 100); snprintf(name, sizeof(name), "dump-type%u-%u-%d-%lu", record->type, record->part, record->count, record->time.tv_sec); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index 236d9950221b..c0d8c6ff6380 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -425,10 +425,15 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj) void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev) { - struct amdgpu_fbdev *afbdev = adev->mode_info.rfbdev; + struct amdgpu_fbdev *afbdev; struct drm_fb_helper *fb_helper; int ret; + if (!adev) + return; + + afbdev = adev->mode_info.rfbdev; + if (!afbdev) return; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 07ff3b1514f1..8ecf82c5fe74 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -634,7 +634,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) mutex_unlock(&id_mgr->lock); } - if (gds_switch_needed) { + if (ring->funcs->emit_gds_switch && gds_switch_needed) { id->gds_base = job->gds_base; id->gds_size = job->gds_size; id->gws_base = job->gws_base; @@ -672,6 +672,7 @@ void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub, struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; struct amdgpu_vm_id *id = &id_mgr->ids[vmid]; + atomic64_set(&id->owner, 0); id->gds_base = 0; id->gds_size = 0; id->gws_base = 0; @@ -681,6 +682,26 @@ void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub, } /** + * amdgpu_vm_reset_all_id - reset VMID to zero + * + * @adev: amdgpu device structure + * + * Reset VMID to force flush on next use + */ +void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev) +{ + unsigned i, j; + + for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { + struct amdgpu_vm_id_manager *id_mgr = + &adev->vm_manager.id_mgr[i]; + + for (j = 1; j < id_mgr->num_ids; ++j) + amdgpu_vm_reset_id(adev, i, j); + } +} + +/** * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo * * @vm: requested vm @@ -2270,7 +2291,6 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) for (i = 0; i < AMDGPU_MAX_RINGS; ++i) adev->vm_manager.seqno[i] = 0; - atomic_set(&adev->vm_manager.vm_pte_next_ring, 0); atomic64_set(&adev->vm_manager.client_counter, 0); spin_lock_init(&adev->vm_manager.prt_lock); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index d97e28b4bdc4..e1d951ece433 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -204,6 +204,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub, unsigned vmid); +void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev); int amdgpu_vm_update_directories(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_clear_freed(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index 6dc1410b380f..ec93714e4524 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -906,6 +906,12 @@ static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev) u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300; + /* disable mclk switching if the refresh is >120Hz, even if the + * blanking period would allow it + */ + if (amdgpu_dpm_get_vrefresh(adev) > 120) + return true; + if (vblank_time < switch_limit) return true; else diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index a572979f186c..d860939152df 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -950,10 +950,6 @@ static int gmc_v6_0_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->vm_manager.enabled) { - gmc_v6_0_vm_fini(adev); - adev->vm_manager.enabled = false; - } gmc_v6_0_hw_fini(adev); return 0; @@ -968,16 +964,9 @@ static int gmc_v6_0_resume(void *handle) if (r) return r; - if (!adev->vm_manager.enabled) { - r = gmc_v6_0_vm_init(adev); - if (r) { - dev_err(adev->dev, "vm manager initialization failed (%d).\n", r); - return r; - } - adev->vm_manager.enabled = true; - } + amdgpu_vm_reset_all_ids(adev); - return r; + return 0; } static bool gmc_v6_0_is_idle(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index a9083a16a250..2750e5c23813 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -1117,10 +1117,6 @@ static int gmc_v7_0_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->vm_manager.enabled) { - gmc_v7_0_vm_fini(adev); - adev->vm_manager.enabled = false; - } gmc_v7_0_hw_fini(adev); return 0; @@ -1135,16 +1131,9 @@ static int gmc_v7_0_resume(void *handle) if (r) return r; - if (!adev->vm_manager.enabled) { - r = gmc_v7_0_vm_init(adev); - if (r) { - dev_err(adev->dev, "vm manager initialization failed (%d).\n", r); - return r; - } - adev->vm_manager.enabled = true; - } + amdgpu_vm_reset_all_ids(adev); - return r; + return 0; } static bool gmc_v7_0_is_idle(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 4ac99784160a..f56b4089ee9f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1209,10 +1209,6 @@ static int gmc_v8_0_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->vm_manager.enabled) { - gmc_v8_0_vm_fini(adev); - adev->vm_manager.enabled = false; - } gmc_v8_0_hw_fini(adev); return 0; @@ -1227,16 +1223,9 @@ static int gmc_v8_0_resume(void *handle) if (r) return r; - if (!adev->vm_manager.enabled) { - r = gmc_v8_0_vm_init(adev); - if (r) { - dev_err(adev->dev, "vm manager initialization failed (%d).\n", r); - return r; - } - adev->vm_manager.enabled = true; - } + amdgpu_vm_reset_all_ids(adev); - return r; + return 0; } static bool gmc_v8_0_is_idle(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index dc1e1c1d6b24..f936332a069d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -791,10 +791,6 @@ static int gmc_v9_0_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->vm_manager.enabled) { - gmc_v9_0_vm_fini(adev); - adev->vm_manager.enabled = false; - } gmc_v9_0_hw_fini(adev); return 0; @@ -809,17 +805,9 @@ static int gmc_v9_0_resume(void *handle) if (r) return r; - if (!adev->vm_manager.enabled) { - r = gmc_v9_0_vm_init(adev); - if (r) { - dev_err(adev->dev, - "vm manager initialization failed (%d).\n", r); - return r; - } - adev->vm_manager.enabled = true; - } + amdgpu_vm_reset_all_ids(adev); - return r; + return 0; } static bool gmc_v9_0_is_idle(void *handle) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index a74a3db3056c..102eb6d029fa 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -2655,6 +2655,28 @@ static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr) return sizeof(struct smu7_power_state); } +static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr, + uint32_t vblank_time_us) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t switch_limit_us; + + switch (hwmgr->chip_id) { + case CHIP_POLARIS10: + case CHIP_POLARIS11: + case CHIP_POLARIS12: + switch_limit_us = data->is_memory_gddr5 ? 190 : 150; + break; + default: + switch_limit_us = data->is_memory_gddr5 ? 450 : 150; + break; + } + + if (vblank_time_us < switch_limit_us) + return true; + else + return false; +} static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, struct pp_power_state *request_ps, @@ -2669,6 +2691,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, bool disable_mclk_switching; bool disable_mclk_switching_for_frame_lock; struct cgs_display_info info = {0}; + struct cgs_mode_info mode_info = {0}; const struct phm_clock_and_voltage_limits *max_limits; uint32_t i; struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); @@ -2677,6 +2700,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, int32_t count; int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; + info.mode_info = &mode_info; data->battery_state = (PP_StateUILabel_Battery == request_ps->classification.ui_label); @@ -2703,8 +2727,6 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, cgs_get_active_displays_info(hwmgr->device, &info); - /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/ - minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock; minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock; @@ -2769,8 +2791,10 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); - disable_mclk_switching = (1 < info.display_count) || - disable_mclk_switching_for_frame_lock; + disable_mclk_switching = ((1 < info.display_count) || + disable_mclk_switching_for_frame_lock || + smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) || + (mode_info.refresh_rate > 120)); sclk = smu7_ps->performance_levels[0].engine_clock; mclk = smu7_ps->performance_levels[0].memory_clock; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index ad30f5d3a10d..2614af2f553f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -4186,7 +4186,7 @@ static int vega10_force_clock_level(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask) { struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); - uint32_t i; + int i; if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) return -EINVAL; diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index fedd4d60d9cd..5dc8c4350602 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -948,8 +948,6 @@ retry: } out: - if (ret && crtc->funcs->page_flip_target) - drm_crtc_vblank_put(crtc); if (fb) drm_framebuffer_put(fb); if (crtc->primary->old_fb) @@ -964,5 +962,8 @@ out: drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); + if (ret && crtc->funcs->page_flip_target) + drm_crtc_vblank_put(crtc); + return ret; } diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c index 0066fe7e622e..be3eefec5152 100644 --- a/drivers/gpu/drm/gma500/psb_intel_lvds.c +++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c @@ -759,20 +759,23 @@ void psb_intel_lvds_init(struct drm_device *dev, if (scan->type & DRM_MODE_TYPE_PREFERRED) { mode_dev->panel_fixed_mode = drm_mode_duplicate(dev, scan); + DRM_DEBUG_KMS("Using mode from DDC\n"); goto out; /* FIXME: check for quirks */ } } /* Failed to get EDID, what about VBT? do we need this? */ - if (mode_dev->vbt_mode) + if (dev_priv->lfp_lvds_vbt_mode) { mode_dev->panel_fixed_mode = - drm_mode_duplicate(dev, mode_dev->vbt_mode); + drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); - if (!mode_dev->panel_fixed_mode) - if (dev_priv->lfp_lvds_vbt_mode) - mode_dev->panel_fixed_mode = - drm_mode_duplicate(dev, - dev_priv->lfp_lvds_vbt_mode); + if (mode_dev->panel_fixed_mode) { + mode_dev->panel_fixed_mode->type |= + DRM_MODE_TYPE_PREFERRED; + DRM_DEBUG_KMS("Using mode from VBT\n"); + goto out; + } + } /* * If we didn't get EDID, try checking if the panel is already turned @@ -789,6 +792,7 @@ void psb_intel_lvds_init(struct drm_device *dev, if (mode_dev->panel_fixed_mode) { mode_dev->panel_fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; + DRM_DEBUG_KMS("Using pre-programmed mode\n"); goto out; /* FIXME: check for quirks */ } } diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 058340a002c2..4a340efd8ba6 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -575,8 +575,6 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, if (ret) return; - cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release); - if (fb != old_state->fb) { obj = to_qxl_framebuffer(fb)->obj; user_bo = gem_to_qxl_bo(obj); @@ -614,6 +612,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, qxl_bo_kunmap(cursor_bo); qxl_bo_kunmap(user_bo); + cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release); cmd->u.set.visible = 1; cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0); @@ -624,6 +623,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, if (ret) goto out_free_release; + cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release); cmd->type = QXL_CURSOR_MOVE; } diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index 7ba450832e6b..ea36dc4dd5d2 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -776,6 +776,12 @@ bool ci_dpm_vblank_too_short(struct radeon_device *rdev) u32 vblank_time = r600_dpm_get_vblank_time(rdev); u32 switch_limit = pi->mem_gddr5 ? 450 : 300; + /* disable mclk switching if the refresh is >120Hz, even if the + * blanking period would allow it + */ + if (r600_dpm_get_vrefresh(rdev) > 120) + return true; + if (vblank_time < switch_limit) return true; else diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index ccebe0f8d2e1..008c145b7f29 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -7401,7 +7401,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev) WREG32(DC_HPD5_INT_CONTROL, tmp); } if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) { - tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp = RREG32(DC_HPD6_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD6_INT_CONTROL, tmp); } @@ -7431,7 +7431,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev) WREG32(DC_HPD5_INT_CONTROL, tmp); } if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { - tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp = RREG32(DC_HPD6_INT_CONTROL); tmp |= DC_HPDx_RX_INT_ACK; WREG32(DC_HPD6_INT_CONTROL, tmp); } diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index f130ec41ee4b..0bf103536404 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -4927,7 +4927,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev) WREG32(DC_HPD5_INT_CONTROL, tmp); } if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { - tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp = RREG32(DC_HPD6_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD6_INT_CONTROL, tmp); } @@ -4958,7 +4958,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev) WREG32(DC_HPD5_INT_CONTROL, tmp); } if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { - tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp = RREG32(DC_HPD6_INT_CONTROL); tmp |= DC_HPDx_RX_INT_ACK; WREG32(DC_HPD6_INT_CONTROL, tmp); } diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 0a085176e79b..e06e2d8feab3 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -3988,7 +3988,7 @@ static void r600_irq_ack(struct radeon_device *rdev) WREG32(DC_HPD5_INT_CONTROL, tmp); } if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) { - tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp = RREG32(DC_HPD6_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD6_INT_CONTROL, tmp); } diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index e3e7cb1d10a2..4761f27f2ca2 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -116,7 +116,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) if ((radeon_runtime_pm != 0) && radeon_has_atpx() && ((flags & RADEON_IS_IGP) == 0) && - !pci_is_thunderbolt_attached(rdev->pdev)) + !pci_is_thunderbolt_attached(dev->pdev)) flags |= RADEON_IS_PX; /* radeon_device_init should report only fatal error diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index ceee87f029d9..76d1888528e6 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -6317,7 +6317,7 @@ static inline void si_irq_ack(struct radeon_device *rdev) WREG32(DC_HPD5_INT_CONTROL, tmp); } if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { - tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp = RREG32(DC_HPD6_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD6_INT_CONTROL, tmp); } @@ -6348,7 +6348,7 @@ static inline void si_irq_ack(struct radeon_device *rdev) WREG32(DC_HPD5_INT_CONTROL, tmp); } if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { - tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp = RREG32(DC_HPD6_INT_CONTROL); tmp |= DC_HPDx_RX_INT_ACK; WREG32(DC_HPD6_INT_CONTROL, tmp); } diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 6283b99d2b17..d1263b82d646 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -94,9 +94,9 @@ static void dw_i2c_acpi_params(struct platform_device *pdev, char method[], static int dw_i2c_acpi_configure(struct platform_device *pdev) { struct dw_i2c_dev *dev = platform_get_drvdata(pdev); + u32 ss_ht = 0, fp_ht = 0, hs_ht = 0, fs_ht = 0; acpi_handle handle = ACPI_HANDLE(&pdev->dev); const struct acpi_device_id *id; - u32 ss_ht, fp_ht, hs_ht, fs_ht; struct acpi_device *adev; const char *uid; diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c index 0ed77eeff31e..a2e3dd715380 100644 --- a/drivers/i2c/busses/i2c-tiny-usb.c +++ b/drivers/i2c/busses/i2c-tiny-usb.c @@ -178,22 +178,39 @@ static int usb_read(struct i2c_adapter *adapter, int cmd, int value, int index, void *data, int len) { struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data; + void *dmadata = kmalloc(len, GFP_KERNEL); + int ret; + + if (!dmadata) + return -ENOMEM; /* do control transfer */ - return usb_control_msg(dev->usb_dev, usb_rcvctrlpipe(dev->usb_dev, 0), + ret = usb_control_msg(dev->usb_dev, usb_rcvctrlpipe(dev->usb_dev, 0), cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE | - USB_DIR_IN, value, index, data, len, 2000); + USB_DIR_IN, value, index, dmadata, len, 2000); + + memcpy(data, dmadata, len); + kfree(dmadata); + return ret; } static int usb_write(struct i2c_adapter *adapter, int cmd, int value, int index, void *data, int len) { struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data; + void *dmadata = kmemdup(data, len, GFP_KERNEL); + int ret; + + if (!dmadata) + return -ENOMEM; /* do control transfer */ - return usb_control_msg(dev->usb_dev, usb_sndctrlpipe(dev->usb_dev, 0), + ret = usb_control_msg(dev->usb_dev, usb_sndctrlpipe(dev->usb_dev, 0), cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE, - value, index, data, len, 2000); + value, index, dmadata, len, 2000); + + kfree(dmadata); + return ret; } static void i2c_tiny_usb_free(struct i2c_tiny_usb *dev) diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c index a679e56c44cd..f431da07f861 100644 --- a/drivers/input/mouse/elan_i2c_i2c.c +++ b/drivers/input/mouse/elan_i2c_i2c.c @@ -554,32 +554,34 @@ static int elan_i2c_finish_fw_update(struct i2c_client *client, struct completion *completion) { struct device *dev = &client->dev; - long ret; int error; int len; - u8 buffer[ETP_I2C_INF_LENGTH]; + u8 buffer[ETP_I2C_REPORT_LEN]; + + len = i2c_master_recv(client, buffer, ETP_I2C_REPORT_LEN); + if (len != ETP_I2C_REPORT_LEN) { + error = len < 0 ? len : -EIO; + dev_warn(dev, "failed to read I2C data after FW WDT reset: %d (%d)\n", + error, len); + } reinit_completion(completion); enable_irq(client->irq); error = elan_i2c_write_cmd(client, ETP_I2C_STAND_CMD, ETP_I2C_RESET); - if (!error) - ret = wait_for_completion_interruptible_timeout(completion, - msecs_to_jiffies(300)); - disable_irq(client->irq); - if (error) { dev_err(dev, "device reset failed: %d\n", error); - return error; - } else if (ret == 0) { + } else if (!wait_for_completion_timeout(completion, + msecs_to_jiffies(300))) { dev_err(dev, "timeout waiting for device reset\n"); - return -ETIMEDOUT; - } else if (ret < 0) { - error = ret; - dev_err(dev, "error waiting for device reset: %d\n", error); - return error; + error = -ETIMEDOUT; } + disable_irq(client->irq); + + if (error) + return error; + len = i2c_master_recv(client, buffer, ETP_I2C_INF_LENGTH); if (len != ETP_I2C_INF_LENGTH) { error = len < 0 ? len : -EIO; diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c index 2302aef2b2d4..dd042a9b0aaa 100644 --- a/drivers/input/touchscreen/atmel_mxt_ts.c +++ b/drivers/input/touchscreen/atmel_mxt_ts.c @@ -350,6 +350,7 @@ static bool mxt_object_readable(unsigned int type) case MXT_TOUCH_KEYARRAY_T15: case MXT_TOUCH_PROXIMITY_T23: case MXT_TOUCH_PROXKEY_T52: + case MXT_TOUCH_MULTITOUCHSCREEN_T100: case MXT_PROCI_GRIPFACE_T20: case MXT_PROCG_NOISE_T22: case MXT_PROCI_ONETOUCH_T24: diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c index 8cf8d8d5d4ef..f872817e81e4 100644 --- a/drivers/input/touchscreen/edt-ft5x06.c +++ b/drivers/input/touchscreen/edt-ft5x06.c @@ -471,7 +471,7 @@ static EDT_ATTR(gain, S_IWUSR | S_IRUGO, WORK_REGISTER_GAIN, static EDT_ATTR(offset, S_IWUSR | S_IRUGO, WORK_REGISTER_OFFSET, M09_REGISTER_OFFSET, 0, 31); static EDT_ATTR(threshold, S_IWUSR | S_IRUGO, WORK_REGISTER_THRESHOLD, - M09_REGISTER_THRESHOLD, 20, 80); + M09_REGISTER_THRESHOLD, 0, 80); static EDT_ATTR(report_rate, S_IWUSR | S_IRUGO, WORK_REGISTER_REPORT_RATE, NO_REGISTER, 3, 14); diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c index 78a7ce816a47..9a873118ea5f 100644 --- a/drivers/leds/leds-pca955x.c +++ b/drivers/leds/leds-pca955x.c @@ -285,7 +285,7 @@ static int pca955x_probe(struct i2c_client *client, "slave address 0x%02x\n", client->name, chip->bits, client->addr); - if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; if (pdata) { diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c index 1304160de168..13ef162cf066 100644 --- a/drivers/mmc/core/pwrseq_simple.c +++ b/drivers/mmc/core/pwrseq_simple.c @@ -27,6 +27,7 @@ struct mmc_pwrseq_simple { struct mmc_pwrseq pwrseq; bool clk_enabled; u32 post_power_on_delay_ms; + u32 power_off_delay_us; struct clk *ext_clk; struct gpio_descs *reset_gpios; }; @@ -78,6 +79,10 @@ static void mmc_pwrseq_simple_power_off(struct mmc_host *host) mmc_pwrseq_simple_set_gpios_value(pwrseq, 1); + if (pwrseq->power_off_delay_us) + usleep_range(pwrseq->power_off_delay_us, + 2 * pwrseq->power_off_delay_us); + if (!IS_ERR(pwrseq->ext_clk) && pwrseq->clk_enabled) { clk_disable_unprepare(pwrseq->ext_clk); pwrseq->clk_enabled = false; @@ -119,6 +124,8 @@ static int mmc_pwrseq_simple_probe(struct platform_device *pdev) device_property_read_u32(dev, "post-power-on-delay-ms", &pwrseq->post_power_on_delay_ms); + device_property_read_u32(dev, "power-off-delay-us", + &pwrseq->power_off_delay_us); pwrseq->pwrseq.dev = dev; pwrseq->pwrseq.ops = &mmc_pwrseq_simple_ops; diff --git a/drivers/mmc/host/cavium-octeon.c b/drivers/mmc/host/cavium-octeon.c index 772d0900026d..951d2cdd7888 100644 --- a/drivers/mmc/host/cavium-octeon.c +++ b/drivers/mmc/host/cavium-octeon.c @@ -108,7 +108,7 @@ static void octeon_mmc_release_bus(struct cvm_mmc_host *host) static void octeon_mmc_int_enable(struct cvm_mmc_host *host, u64 val) { writeq(val, host->base + MIO_EMM_INT(host)); - if (!host->dma_active || (host->dma_active && !host->has_ciu3)) + if (!host->has_ciu3) writeq(val, host->base + MIO_EMM_INT_EN(host)); } @@ -267,7 +267,7 @@ static int octeon_mmc_probe(struct platform_device *pdev) } host->global_pwr_gpiod = devm_gpiod_get_optional(&pdev->dev, - "power-gpios", + "power", GPIOD_OUT_HIGH); if (IS_ERR(host->global_pwr_gpiod)) { dev_err(&pdev->dev, "Invalid power GPIO\n"); @@ -288,11 +288,20 @@ static int octeon_mmc_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "Error populating slots\n"); octeon_mmc_set_shared_power(host, 0); - return ret; + goto error; } i++; } return 0; + +error: + for (i = 0; i < CAVIUM_MAX_MMC; i++) { + if (host->slot[i]) + cvm_mmc_of_slot_remove(host->slot[i]); + if (host->slot_pdev[i]) + of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL); + } + return ret; } static int octeon_mmc_remove(struct platform_device *pdev) diff --git a/drivers/mmc/host/cavium-thunderx.c b/drivers/mmc/host/cavium-thunderx.c index fe3d77267cd6..b9cc95998799 100644 --- a/drivers/mmc/host/cavium-thunderx.c +++ b/drivers/mmc/host/cavium-thunderx.c @@ -146,6 +146,12 @@ static int thunder_mmc_probe(struct pci_dev *pdev, return 0; error: + for (i = 0; i < CAVIUM_MAX_MMC; i++) { + if (host->slot[i]) + cvm_mmc_of_slot_remove(host->slot[i]); + if (host->slot_pdev[i]) + of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL); + } clk_disable_unprepare(host->clk); return ret; } diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c index 58b51ba6aabd..b8aaf0fdb77c 100644 --- a/drivers/mmc/host/cavium.c +++ b/drivers/mmc/host/cavium.c @@ -839,14 +839,14 @@ static void cvm_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) cvm_mmc_reset_bus(slot); if (host->global_pwr_gpiod) host->set_shared_power(host, 0); - else + else if (!IS_ERR(mmc->supply.vmmc)) mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); break; case MMC_POWER_UP: if (host->global_pwr_gpiod) host->set_shared_power(host, 1); - else + else if (!IS_ERR(mmc->supply.vmmc)) mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); break; } @@ -968,20 +968,15 @@ static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot) return -EINVAL; } - mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc"); - if (IS_ERR(mmc->supply.vmmc)) { - if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER) - return -EPROBE_DEFER; - /* - * Legacy Octeon firmware has no regulator entry, fall-back to - * a hard-coded voltage to get a sane OCR. - */ + ret = mmc_regulator_get_supply(mmc); + if (ret == -EPROBE_DEFER) + return ret; + /* + * Legacy Octeon firmware has no regulator entry, fall-back to + * a hard-coded voltage to get a sane OCR. + */ + if (IS_ERR(mmc->supply.vmmc)) mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; - } else { - ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc); - if (ret > 0) - mmc->ocr_avail = ret; - } /* Common MMC bindings */ ret = mmc_of_parse(mmc); diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c index 3275d4995812..61666d269771 100644 --- a/drivers/mmc/host/sdhci-iproc.c +++ b/drivers/mmc/host/sdhci-iproc.c @@ -187,7 +187,8 @@ static const struct sdhci_iproc_data iproc_cygnus_data = { }; static const struct sdhci_pltfm_data sdhci_iproc_pltfm_data = { - .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK, + .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | + SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN, .ops = &sdhci_iproc_ops, }; diff --git a/drivers/mmc/host/sdhci-xenon-phy.c b/drivers/mmc/host/sdhci-xenon-phy.c index 6356781f1cca..f7e26b031e76 100644 --- a/drivers/mmc/host/sdhci-xenon-phy.c +++ b/drivers/mmc/host/sdhci-xenon-phy.c @@ -787,14 +787,6 @@ int xenon_phy_adj(struct sdhci_host *host, struct mmc_ios *ios) return ret; } -void xenon_clean_phy(struct sdhci_host *host) -{ - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); - - kfree(priv->phy_params); -} - static int xenon_add_phy(struct device_node *np, struct sdhci_host *host, const char *phy_name) { @@ -819,11 +811,7 @@ static int xenon_add_phy(struct device_node *np, struct sdhci_host *host, if (ret) return ret; - ret = xenon_emmc_phy_parse_param_dt(host, np, priv->phy_params); - if (ret) - xenon_clean_phy(host); - - return ret; + return xenon_emmc_phy_parse_param_dt(host, np, priv->phy_params); } int xenon_phy_parse_dt(struct device_node *np, struct sdhci_host *host) diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c index 67246655315b..bc1781bb070b 100644 --- a/drivers/mmc/host/sdhci-xenon.c +++ b/drivers/mmc/host/sdhci-xenon.c @@ -486,7 +486,7 @@ static int xenon_probe(struct platform_device *pdev) err = xenon_sdhc_prepare(host); if (err) - goto clean_phy_param; + goto err_clk; err = sdhci_add_host(host); if (err) @@ -496,8 +496,6 @@ static int xenon_probe(struct platform_device *pdev) remove_sdhc: xenon_sdhc_unprepare(host); -clean_phy_param: - xenon_clean_phy(host); err_clk: clk_disable_unprepare(pltfm_host->clk); free_pltfm: @@ -510,8 +508,6 @@ static int xenon_remove(struct platform_device *pdev) struct sdhci_host *host = platform_get_drvdata(pdev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - xenon_clean_phy(host); - sdhci_remove_host(host, 0); xenon_sdhc_unprepare(host); diff --git a/drivers/mmc/host/sdhci-xenon.h b/drivers/mmc/host/sdhci-xenon.h index 6e6523ea01ce..73debb42dc2f 100644 --- a/drivers/mmc/host/sdhci-xenon.h +++ b/drivers/mmc/host/sdhci-xenon.h @@ -93,7 +93,6 @@ struct xenon_priv { }; int xenon_phy_adj(struct sdhci_host *host, struct mmc_ios *ios); -void xenon_clean_phy(struct sdhci_host *host); int xenon_phy_parse_dt(struct device_node *np, struct sdhci_host *host); void xenon_soc_pad_ctrl(struct sdhci_host *host, diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index c5fd4259da33..b44a6aeb346d 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -2577,7 +2577,7 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond, return -1; ad_info->aggregator_id = aggregator->aggregator_identifier; - ad_info->ports = aggregator->num_of_ports; + ad_info->ports = __agg_active_ports(aggregator); ad_info->actor_key = aggregator->actor_oper_aggregator_key; ad_info->partner_key = aggregator->partner_oper_aggregator_key; ether_addr_copy(ad_info->partner_system, diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 2be78807fd6e..2359478b977f 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2612,11 +2612,13 @@ static void bond_loadbalance_arp_mon(struct bonding *bond) bond_for_each_slave_rcu(bond, slave, iter) { unsigned long trans_start = dev_trans_start(slave->dev); + slave->new_link = BOND_LINK_NOCHANGE; + if (slave->link != BOND_LINK_UP) { if (bond_time_in_interval(bond, trans_start, 1) && bond_time_in_interval(bond, slave->last_rx, 1)) { - slave->link = BOND_LINK_UP; + slave->new_link = BOND_LINK_UP; slave_state_changed = 1; /* primary_slave has no meaning in round-robin @@ -2643,7 +2645,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond) if (!bond_time_in_interval(bond, trans_start, 2) || !bond_time_in_interval(bond, slave->last_rx, 2)) { - slave->link = BOND_LINK_DOWN; + slave->new_link = BOND_LINK_DOWN; slave_state_changed = 1; if (slave->link_failure_count < UINT_MAX) @@ -2674,6 +2676,11 @@ static void bond_loadbalance_arp_mon(struct bonding *bond) if (!rtnl_trylock()) goto re_arm; + bond_for_each_slave(bond, slave, iter) { + if (slave->new_link != BOND_LINK_NOCHANGE) + slave->link = slave->new_link; + } + if (slave_state_changed) { bond_slave_state_change(bond); if (BOND_MODE(bond) == BOND_MODE_XOR) @@ -4271,10 +4278,10 @@ static int bond_check_params(struct bond_params *params) int arp_validate_value, fail_over_mac_value, primary_reselect_value, i; struct bond_opt_value newval; const struct bond_opt_value *valptr; - int arp_all_targets_value; + int arp_all_targets_value = 0; u16 ad_actor_sys_prio = 0; u16 ad_user_port_key = 0; - __be32 arp_target[BOND_MAX_ARP_TARGETS]; + __be32 arp_target[BOND_MAX_ARP_TARGETS] = { 0 }; int arp_ip_count; int bond_mode = BOND_MODE_ROUNDROBIN; int xmit_hashtype = BOND_XMIT_POLICY_LAYER2; @@ -4501,7 +4508,6 @@ static int bond_check_params(struct bond_params *params) arp_validate_value = 0; } - arp_all_targets_value = 0; if (arp_all_targets) { bond_opt_initstr(&newval, arp_all_targets); valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_ALL_TARGETS), diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index b0a3b85fc6f8..db02bc2fb4b2 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -748,13 +748,13 @@ static int ax_init_dev(struct net_device *dev) ret = ax_mii_init(dev); if (ret) - goto out_irq; + goto err_out; ax_NS8390_init(dev, 0); ret = register_netdev(dev); if (ret) - goto out_irq; + goto err_out; netdev_info(dev, "%dbit, irq %d, %lx, MAC: %pM\n", ei_local->word16 ? 16 : 8, dev->irq, dev->base_addr, @@ -762,9 +762,6 @@ static int ax_init_dev(struct net_device *dev) return 0; - out_irq: - /* cleanup irq */ - free_irq(dev->irq, dev); err_out: return ret; } diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index 63f2deec2a52..77a1c03255de 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c @@ -1353,6 +1353,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) && pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { printk(KERN_ERR "atl2: No usable DMA configuration, aborting\n"); + err = -EIO; goto err_dma; } @@ -1366,10 +1367,11 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * pcibios_set_master to do the needed arch specific settings */ pci_set_master(pdev); - err = -ENOMEM; netdev = alloc_etherdev(sizeof(struct atl2_adapter)); - if (!netdev) + if (!netdev) { + err = -ENOMEM; goto err_alloc_etherdev; + } SET_NETDEV_DEV(netdev, &pdev->dev); @@ -1408,8 +1410,6 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_sw_init; - err = -EIO; - netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX; netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index f3a09ab55900..4eee18ce9be4 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -5078,9 +5078,11 @@ static netdev_features_t be_features_check(struct sk_buff *skb, struct be_adapter *adapter = netdev_priv(dev); u8 l4_hdr = 0; - /* The code below restricts offload features for some tunneled packets. + /* The code below restricts offload features for some tunneled and + * Q-in-Q packets. * Offload features for normal (non tunnel) packets are unchanged. */ + features = vlan_features_check(skb, features); if (!skb->encapsulation || !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)) return features; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 56a563f90b0b..f7c8649fd28f 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3192,7 +3192,7 @@ static int fec_reset_phy(struct platform_device *pdev) { int err, phy_reset; bool active_high = false; - int msec = 1; + int msec = 1, phy_post_delay = 0; struct device_node *np = pdev->dev.of_node; if (!np) @@ -3209,6 +3209,11 @@ static int fec_reset_phy(struct platform_device *pdev) else if (!gpio_is_valid(phy_reset)) return 0; + err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay); + /* valid reset duration should be less than 1s */ + if (!err && phy_post_delay > 1000) + return -EINVAL; + active_high = of_property_read_bool(np, "phy-reset-active-high"); err = devm_gpio_request_one(&pdev->dev, phy_reset, @@ -3226,6 +3231,15 @@ static int fec_reset_phy(struct platform_device *pdev) gpio_set_value_cansleep(phy_reset, !active_high); + if (!phy_post_delay) + return 0; + + if (phy_post_delay > 20) + msleep(phy_post_delay); + else + usleep_range(phy_post_delay * 1000, + phy_post_delay * 1000 + 1000); + return 0; } #else /* CONFIG_OF */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 5bdaf3d545b2..10d282841f5b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -774,7 +774,7 @@ static void cb_timeout_handler(struct work_struct *work) mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); - mlx5_cmd_comp_handler(dev, 1UL << ent->idx); + mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); } static void cmd_work_handler(struct work_struct *work) @@ -804,6 +804,7 @@ static void cmd_work_handler(struct work_struct *work) } cmd->ent_arr[ent->idx] = ent; + set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state); lay = get_inst(cmd, ent->idx); ent->lay = lay; memset(lay, 0, sizeof(*lay)); @@ -825,6 +826,20 @@ static void cmd_work_handler(struct work_struct *work) if (ent->callback) schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); + /* Skip sending command to fw if internal error */ + if (pci_channel_offline(dev->pdev) || + dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { + u8 status = 0; + u32 drv_synd; + + ent->ret = mlx5_internal_err_ret_value(dev, msg_to_opcode(ent->in), &drv_synd, &status); + MLX5_SET(mbox_out, ent->out, status, status); + MLX5_SET(mbox_out, ent->out, syndrome, drv_synd); + + mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); + return; + } + /* ring doorbell after the descriptor is valid */ mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); wmb(); @@ -835,7 +850,7 @@ static void cmd_work_handler(struct work_struct *work) poll_timeout(ent); /* make sure we read the descriptor after ownership is SW */ rmb(); - mlx5_cmd_comp_handler(dev, 1UL << ent->idx); + mlx5_cmd_comp_handler(dev, 1UL << ent->idx, (ent->ret == -ETIMEDOUT)); } } @@ -879,7 +894,7 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) wait_for_completion(&ent->done); } else if (!wait_for_completion_timeout(&ent->done, timeout)) { ent->ret = -ETIMEDOUT; - mlx5_cmd_comp_handler(dev, 1UL << ent->idx); + mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); } err = ent->ret; @@ -1375,7 +1390,7 @@ static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) } } -void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec) +void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced) { struct mlx5_cmd *cmd = &dev->cmd; struct mlx5_cmd_work_ent *ent; @@ -1395,6 +1410,19 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec) struct semaphore *sem; ent = cmd->ent_arr[i]; + + /* if we already completed the command, ignore it */ + if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, + &ent->state)) { + /* only real completion can free the cmd slot */ + if (!forced) { + mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n", + ent->idx); + free_ent(cmd, ent->idx); + } + continue; + } + if (ent->callback) cancel_delayed_work(&ent->cb_timeout_work); if (ent->page_queue) @@ -1417,7 +1445,10 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec) mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n", ent->ret, deliv_status_to_str(ent->status), ent->status); } - free_ent(cmd, ent->idx); + + /* only real completion will free the entry slot */ + if (!forced) + free_ent(cmd, ent->idx); if (ent->callback) { ds = ent->ts2 - ent->ts1; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 7b1566f0ae58..66b5fec15313 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -1041,6 +1041,8 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq) #define MLX5_IB_GRH_BYTES 40 #define MLX5_IPOIB_ENCAP_LEN 4 #define MLX5_GID_SIZE 16 +#define MLX5_IPOIB_PSEUDO_LEN 20 +#define MLX5_IPOIB_HARD_LEN (MLX5_IPOIB_PSEUDO_LEN + MLX5_IPOIB_ENCAP_LEN) static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, @@ -1048,6 +1050,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, struct sk_buff *skb) { struct net_device *netdev = rq->netdev; + char *pseudo_header; u8 *dgid; u8 g; @@ -1076,8 +1079,11 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, if (likely(netdev->features & NETIF_F_RXHASH)) mlx5e_skb_set_hash(cqe, skb); + /* 20 bytes of ipoib header and 4 for encap existing */ + pseudo_header = skb_push(skb, MLX5_IPOIB_PSEUDO_LEN); + memset(pseudo_header, 0, MLX5_IPOIB_PSEUDO_LEN); skb_reset_mac_header(skb); - skb_pull(skb, MLX5_IPOIB_ENCAP_LEN); + skb_pull(skb, MLX5_IPOIB_HARD_LEN); skb->dev = netdev; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 11c27e4fadf6..ec63158ab643 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -43,6 +43,7 @@ #include <net/tc_act/tc_vlan.h> #include <net/tc_act/tc_tunnel_key.h> #include <net/tc_act/tc_pedit.h> +#include <net/tc_act/tc_csum.h> #include <net/vxlan.h> #include <net/arp.h> #include "en.h" @@ -384,7 +385,7 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv, if (e->flags & MLX5_ENCAP_ENTRY_VALID) mlx5_encap_dealloc(priv->mdev, e->encap_id); - hlist_del_rcu(&e->encap_hlist); + hash_del_rcu(&e->encap_hlist); kfree(e->encap_header); kfree(e); } @@ -925,11 +926,11 @@ static int offload_pedit_fields(struct pedit_headers *masks, struct mlx5e_tc_flow_parse_attr *parse_attr) { struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals; - int i, action_size, nactions, max_actions, first, last; + int i, action_size, nactions, max_actions, first, last, first_z; void *s_masks_p, *a_masks_p, *vals_p; - u32 s_mask, a_mask, val; struct mlx5_fields *f; u8 cmd, field_bsize; + u32 s_mask, a_mask; unsigned long mask; void *action; @@ -946,7 +947,8 @@ static int offload_pedit_fields(struct pedit_headers *masks, for (i = 0; i < ARRAY_SIZE(fields); i++) { f = &fields[i]; /* avoid seeing bits set from previous iterations */ - s_mask = a_mask = mask = val = 0; + s_mask = 0; + a_mask = 0; s_masks_p = (void *)set_masks + f->offset; a_masks_p = (void *)add_masks + f->offset; @@ -981,12 +983,12 @@ static int offload_pedit_fields(struct pedit_headers *masks, memset(a_masks_p, 0, f->size); } - memcpy(&val, vals_p, f->size); - field_bsize = f->size * BITS_PER_BYTE; + + first_z = find_first_zero_bit(&mask, field_bsize); first = find_first_bit(&mask, field_bsize); last = find_last_bit(&mask, field_bsize); - if (first > 0 || last != (field_bsize - 1)) { + if (first > 0 || last != (field_bsize - 1) || first_z < last) { printk(KERN_WARNING "mlx5: partial rewrite (mask %lx) is currently not offloaded\n", mask); return -EOPNOTSUPP; @@ -1002,11 +1004,11 @@ static int offload_pedit_fields(struct pedit_headers *masks, } if (field_bsize == 32) - MLX5_SET(set_action_in, action, data, ntohl(val)); + MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p)); else if (field_bsize == 16) - MLX5_SET(set_action_in, action, data, ntohs(val)); + MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p)); else if (field_bsize == 8) - MLX5_SET(set_action_in, action, data, val); + MLX5_SET(set_action_in, action, data, *(u8 *)vals_p); action += action_size; nactions++; @@ -1109,6 +1111,28 @@ out_err: return err; } +static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 update_flags) +{ + u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP | + TCA_CSUM_UPDATE_FLAG_UDP; + + /* The HW recalcs checksums only if re-writing headers */ + if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) { + netdev_warn(priv->netdev, + "TC csum action is only offloaded with pedit\n"); + return false; + } + + if (update_flags & ~prot_flags) { + netdev_warn(priv->netdev, + "can't offload TC csum action for some header/s - flags %#x\n", + update_flags); + return false; + } + + return true; +} + static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, struct mlx5e_tc_flow_parse_attr *parse_attr, struct mlx5e_tc_flow *flow) @@ -1149,6 +1173,14 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, continue; } + if (is_tcf_csum(a)) { + if (csum_offload_supported(priv, attr->action, + tcf_csum_update_flags(a))) + continue; + + return -EOPNOTSUPP; + } + if (is_tcf_skbedit_mark(a)) { u32 mark = tcf_skbedit_mark(a); @@ -1651,6 +1683,14 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, continue; } + if (is_tcf_csum(a)) { + if (csum_offload_supported(priv, attr->action, + tcf_csum_update_flags(a))) + continue; + + return -EOPNOTSUPP; + } + if (is_tcf_mirred_egress_redirect(a)) { int ifindex = tcf_mirred_ifindex(a); struct net_device *out_dev, *encap_dev = NULL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index ea5d8d37a75c..33eae5ad2fb0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -422,7 +422,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) break; case MLX5_EVENT_TYPE_CMD: - mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector)); + mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false); break; case MLX5_EVENT_TYPE_PORT_CHANGE: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index d0515391d33b..44f59b1d6f0f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -90,7 +90,7 @@ static void trigger_cmd_completions(struct mlx5_core_dev *dev) spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); mlx5_core_dbg(dev, "vector 0x%llx\n", vector); - mlx5_cmd_comp_handler(dev, vector); + mlx5_cmd_comp_handler(dev, vector, true); return; no_trig: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 0c123d571b4c..fe5546bb4153 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -612,7 +612,6 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) struct mlx5_priv *priv = &mdev->priv; struct msix_entry *msix = priv->msix_arr; int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector; - int err; if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) { mlx5_core_warn(mdev, "zalloc_cpumask_var failed"); @@ -622,18 +621,12 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node), priv->irq_info[i].mask); - err = irq_set_affinity_hint(irq, priv->irq_info[i].mask); - if (err) { - mlx5_core_warn(mdev, "irq_set_affinity_hint failed,irq 0x%.4x", - irq); - goto err_clear_mask; - } +#ifdef CONFIG_SMP + if (irq_set_affinity_hint(irq, priv->irq_info[i].mask)) + mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq); +#endif return 0; - -err_clear_mask: - free_cpumask_var(priv->irq_info[i].mask); - return err; } static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i) diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index dec5d563ab19..959fd12d2e67 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1293,7 +1293,7 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) if (nla_put_u32(skb, IFLA_GENEVE_ID, vni)) goto nla_put_failure; - if (ip_tunnel_info_af(info) == AF_INET) { + if (rtnl_dereference(geneve->sock4)) { if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE, info->key.u.ipv4.dst)) goto nla_put_failure; @@ -1302,8 +1302,10 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) !!(info->key.tun_flags & TUNNEL_CSUM))) goto nla_put_failure; + } + #if IS_ENABLED(CONFIG_IPV6) - } else { + if (rtnl_dereference(geneve->sock6)) { if (nla_put_in6_addr(skb, IFLA_GENEVE_REMOTE6, &info->key.u.ipv6.dst)) goto nla_put_failure; @@ -1315,8 +1317,8 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX, !geneve->use_udp6_rx_checksums)) goto nla_put_failure; -#endif } +#endif if (nla_put_u8(skb, IFLA_GENEVE_TTL, info->key.ttl) || nla_put_u8(skb, IFLA_GENEVE_TOS, info->key.tos) || diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 4fea1b3dfbb4..7b652bb7ebe4 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -873,7 +873,7 @@ static struct gtp_dev *gtp_find_dev(struct net *src_net, struct nlattr *nla[]) /* Check if there's an existing gtpX device to configure */ dev = dev_get_by_index_rcu(net, nla_get_u32(nla[GTPA_LINK])); - if (dev->netdev_ops == >p_netdev_ops) + if (dev && dev->netdev_ops == >p_netdev_ops) gtp = netdev_priv(dev); put_net(net); diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 60ffc9da6a28..c360dd6ead22 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -108,7 +108,7 @@ config MDIO_MOXART config MDIO_OCTEON tristate "Octeon and some ThunderX SOCs MDIO buses" depends on 64BIT - depends on HAS_IOMEM + depends on HAS_IOMEM && OF_MDIO select MDIO_CAVIUM help This module provides a driver for the Octeon and ThunderX MDIO diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 272b051a0199..9097e42bec2e 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -255,34 +255,6 @@ static int marvell_config_aneg(struct phy_device *phydev) { int err; - /* The Marvell PHY has an errata which requires - * that certain registers get written in order - * to restart autonegotiation */ - err = phy_write(phydev, MII_BMCR, BMCR_RESET); - - if (err < 0) - return err; - - err = phy_write(phydev, 0x1d, 0x1f); - if (err < 0) - return err; - - err = phy_write(phydev, 0x1e, 0x200c); - if (err < 0) - return err; - - err = phy_write(phydev, 0x1d, 0x5); - if (err < 0) - return err; - - err = phy_write(phydev, 0x1e, 0); - if (err < 0) - return err; - - err = phy_write(phydev, 0x1e, 0x100); - if (err < 0) - return err; - err = marvell_set_polarity(phydev, phydev->mdix_ctrl); if (err < 0) return err; @@ -316,6 +288,42 @@ static int marvell_config_aneg(struct phy_device *phydev) return 0; } +static int m88e1101_config_aneg(struct phy_device *phydev) +{ + int err; + + /* This Marvell PHY has an errata which requires + * that certain registers get written in order + * to restart autonegotiation + */ + err = phy_write(phydev, MII_BMCR, BMCR_RESET); + + if (err < 0) + return err; + + err = phy_write(phydev, 0x1d, 0x1f); + if (err < 0) + return err; + + err = phy_write(phydev, 0x1e, 0x200c); + if (err < 0) + return err; + + err = phy_write(phydev, 0x1d, 0x5); + if (err < 0) + return err; + + err = phy_write(phydev, 0x1e, 0); + if (err < 0) + return err; + + err = phy_write(phydev, 0x1e, 0x100); + if (err < 0) + return err; + + return marvell_config_aneg(phydev); +} + static int m88e1111_config_aneg(struct phy_device *phydev) { int err; @@ -1892,7 +1900,7 @@ static struct phy_driver marvell_drivers[] = { .flags = PHY_HAS_INTERRUPT, .probe = marvell_probe, .config_init = &marvell_config_init, - .config_aneg = &marvell_config_aneg, + .config_aneg = &m88e1101_config_aneg, .read_status = &genphy_read_status, .ack_interrupt = &marvell_ack_interrupt, .config_intr = &marvell_config_intr, diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index f3ae88fdf332..8ab281b478f2 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -310,6 +310,26 @@ skip: return -ENODEV; } + return 0; + +bad_desc: + dev_info(&dev->udev->dev, "bad CDC descriptors\n"); + return -ENODEV; +} +EXPORT_SYMBOL_GPL(usbnet_generic_cdc_bind); + + +/* like usbnet_generic_cdc_bind() but handles filter initialization + * correctly + */ +int usbnet_ether_cdc_bind(struct usbnet *dev, struct usb_interface *intf) +{ + int rv; + + rv = usbnet_generic_cdc_bind(dev, intf); + if (rv < 0) + goto bail_out; + /* Some devices don't initialise properly. In particular * the packet filter is not reset. There are devices that * don't do reset all the way. So the packet filter should @@ -317,13 +337,10 @@ skip: */ usbnet_cdc_update_filter(dev); - return 0; - -bad_desc: - dev_info(&dev->udev->dev, "bad CDC descriptors\n"); - return -ENODEV; +bail_out: + return rv; } -EXPORT_SYMBOL_GPL(usbnet_generic_cdc_bind); +EXPORT_SYMBOL_GPL(usbnet_ether_cdc_bind); void usbnet_cdc_unbind(struct usbnet *dev, struct usb_interface *intf) { @@ -417,7 +434,7 @@ int usbnet_cdc_bind(struct usbnet *dev, struct usb_interface *intf) BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct cdc_state))); - status = usbnet_generic_cdc_bind(dev, intf); + status = usbnet_ether_cdc_bind(dev, intf); if (status < 0) return status; diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 765400b62168..2dfca96a63b6 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -681,7 +681,7 @@ static int smsc95xx_set_features(struct net_device *netdev, if (ret < 0) return ret; - if (features & NETIF_F_HW_CSUM) + if (features & NETIF_F_IP_CSUM) read_buf |= Tx_COE_EN_; else read_buf &= ~Tx_COE_EN_; @@ -1279,12 +1279,19 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) spin_lock_init(&pdata->mac_cr_lock); + /* LAN95xx devices do not alter the computed checksum of 0 to 0xffff. + * RFC 2460, ipv6 UDP calculated checksum yields a result of zero must + * be changed to 0xffff. RFC 768, ipv4 UDP computed checksum is zero, + * it is transmitted as all ones. The zero transmitted checksum means + * transmitter generated no checksum. Hence, enable csum offload only + * for ipv4 packets. + */ if (DEFAULT_TX_CSUM_ENABLE) - dev->net->features |= NETIF_F_HW_CSUM; + dev->net->features |= NETIF_F_IP_CSUM; if (DEFAULT_RX_CSUM_ENABLE) dev->net->features |= NETIF_F_RXCSUM; - dev->net->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM; + dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM; smsc95xx_init_mac_address(dev); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 9320d96a1632..3e9246cc49c3 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1989,6 +1989,7 @@ static const struct net_device_ops virtnet_netdev = { .ndo_poll_controller = virtnet_netpoll, #endif .ndo_xdp = virtnet_xdp, + .ndo_features_check = passthru_features_check, }; static void virtnet_config_changed_work(struct work_struct *work) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index d5e0906262ea..a60926410438 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -925,6 +925,29 @@ static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) } #ifdef CONFIG_BLK_DEV_INTEGRITY +static void nvme_prep_integrity(struct gendisk *disk, struct nvme_id_ns *id, + u16 bs) +{ + struct nvme_ns *ns = disk->private_data; + u16 old_ms = ns->ms; + u8 pi_type = 0; + + ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms); + ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT); + + /* PI implementation requires metadata equal t10 pi tuple size */ + if (ns->ms == sizeof(struct t10_pi_tuple)) + pi_type = id->dps & NVME_NS_DPS_PI_MASK; + + if (blk_get_integrity(disk) && + (ns->pi_type != pi_type || ns->ms != old_ms || + bs != queue_logical_block_size(disk->queue) || + (ns->ms && ns->ext))) + blk_integrity_unregister(disk); + + ns->pi_type = pi_type; +} + static void nvme_init_integrity(struct nvme_ns *ns) { struct blk_integrity integrity; @@ -951,6 +974,10 @@ static void nvme_init_integrity(struct nvme_ns *ns) blk_queue_max_integrity_segments(ns->queue, 1); } #else +static void nvme_prep_integrity(struct gendisk *disk, struct nvme_id_ns *id, + u16 bs) +{ +} static void nvme_init_integrity(struct nvme_ns *ns) { } @@ -997,37 +1024,22 @@ static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id) static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) { struct nvme_ns *ns = disk->private_data; - u8 lbaf, pi_type; - u16 old_ms; - unsigned short bs; - - old_ms = ns->ms; - lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK; - ns->lba_shift = id->lbaf[lbaf].ds; - ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); - ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT); + u16 bs; /* * If identify namespace failed, use default 512 byte block size so * block layer can use before failing read/write for 0 capacity. */ + ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds; if (ns->lba_shift == 0) ns->lba_shift = 9; bs = 1 << ns->lba_shift; - /* XXX: PI implementation requires metadata equal t10 pi tuple size */ - pi_type = ns->ms == sizeof(struct t10_pi_tuple) ? - id->dps & NVME_NS_DPS_PI_MASK : 0; blk_mq_freeze_queue(disk->queue); - if (blk_get_integrity(disk) && (ns->pi_type != pi_type || - ns->ms != old_ms || - bs != queue_logical_block_size(disk->queue) || - (ns->ms && ns->ext))) - blk_integrity_unregister(disk); - ns->pi_type = pi_type; + if (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED) + nvme_prep_integrity(disk, id, bs); blk_queue_logical_block_size(ns->queue, bs); - if (ns->ms && !blk_get_integrity(disk) && !ns->ext) nvme_init_integrity(ns); if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk)) @@ -1605,7 +1617,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) } memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd)); - if (ctrl->ops->is_fabrics) { + if (ctrl->ops->flags & NVME_F_FABRICS) { ctrl->icdoff = le16_to_cpu(id->icdoff); ctrl->ioccsz = le32_to_cpu(id->ioccsz); ctrl->iorcsz = le32_to_cpu(id->iorcsz); @@ -2098,7 +2110,6 @@ static void nvme_ns_remove(struct nvme_ns *ns) if (ns->ndev) nvme_nvm_unregister_sysfs(ns); del_gendisk(ns->disk); - blk_mq_abort_requeue_list(ns->queue); blk_cleanup_queue(ns->queue); } @@ -2436,8 +2447,16 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl) continue; revalidate_disk(ns->disk); blk_set_queue_dying(ns->queue); - blk_mq_abort_requeue_list(ns->queue); - blk_mq_start_stopped_hw_queues(ns->queue, true); + + /* + * Forcibly start all queues to avoid having stuck requests. + * Note that we must ensure the queues are not stopped + * when the final removal happens. + */ + blk_mq_start_hw_queues(ns->queue); + + /* draining requests in requeue list */ + blk_mq_kick_requeue_list(ns->queue); } mutex_unlock(&ctrl->namespaces_mutex); } diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index dca7165fabcf..5b14cbefb724 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -45,8 +45,6 @@ enum nvme_fc_queue_flags { #define NVMEFC_QUEUE_DELAY 3 /* ms units */ -#define NVME_FC_MAX_CONNECT_ATTEMPTS 1 - struct nvme_fc_queue { struct nvme_fc_ctrl *ctrl; struct device *dev; @@ -165,8 +163,6 @@ struct nvme_fc_ctrl { struct work_struct delete_work; struct work_struct reset_work; struct delayed_work connect_work; - int reconnect_delay; - int connect_attempts; struct kref ref; u32 flags; @@ -1376,9 +1372,9 @@ done: complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op); if (!complete_rq) { if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) { - status = cpu_to_le16(NVME_SC_ABORT_REQ); + status = cpu_to_le16(NVME_SC_ABORT_REQ << 1); if (blk_queue_dying(rq->q)) - status |= cpu_to_le16(NVME_SC_DNR); + status |= cpu_to_le16(NVME_SC_DNR << 1); } nvme_end_request(rq, status, result); } else @@ -1751,7 +1747,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg) dev_warn(ctrl->ctrl.device, "NVME-FC{%d}: transport association error detected: %s\n", ctrl->cnum, errmsg); - dev_info(ctrl->ctrl.device, + dev_warn(ctrl->ctrl.device, "NVME-FC{%d}: resetting controller\n", ctrl->cnum); /* stop the queues on error, cleanup is in reset thread */ @@ -2195,9 +2191,6 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) if (!opts->nr_io_queues) return 0; - dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", - opts->nr_io_queues); - nvme_fc_init_io_queues(ctrl); memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); @@ -2268,9 +2261,6 @@ nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl) if (ctrl->queue_count == 1) return 0; - dev_info(ctrl->ctrl.device, "Recreating %d I/O queues.\n", - opts->nr_io_queues); - nvme_fc_init_io_queues(ctrl); ret = blk_mq_reinit_tagset(&ctrl->tag_set); @@ -2306,7 +2296,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) int ret; bool changed; - ctrl->connect_attempts++; + ++ctrl->ctrl.opts->nr_reconnects; /* * Create the admin queue @@ -2403,9 +2393,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); WARN_ON_ONCE(!changed); - ctrl->connect_attempts = 0; - - kref_get(&ctrl->ctrl.kref); + ctrl->ctrl.opts->nr_reconnects = 0; if (ctrl->queue_count > 1) { nvme_start_queues(&ctrl->ctrl); @@ -2536,26 +2524,32 @@ nvme_fc_delete_ctrl_work(struct work_struct *work) /* * tear down the controller - * This will result in the last reference on the nvme ctrl to - * expire, calling the transport nvme_fc_nvme_ctrl_freed() callback. - * From there, the transport will tear down it's logical queues and - * association. + * After the last reference on the nvme ctrl is removed, + * the transport nvme_fc_nvme_ctrl_freed() callback will be + * invoked. From there, the transport will tear down it's + * logical queues and association. */ nvme_uninit_ctrl(&ctrl->ctrl); nvme_put_ctrl(&ctrl->ctrl); } -static int -__nvme_fc_del_ctrl(struct nvme_fc_ctrl *ctrl) +static bool +__nvme_fc_schedule_delete_work(struct nvme_fc_ctrl *ctrl) { if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING)) - return -EBUSY; + return true; if (!queue_work(nvme_fc_wq, &ctrl->delete_work)) - return -EBUSY; + return true; - return 0; + return false; +} + +static int +__nvme_fc_del_ctrl(struct nvme_fc_ctrl *ctrl) +{ + return __nvme_fc_schedule_delete_work(ctrl) ? -EBUSY : 0; } /* @@ -2581,6 +2575,35 @@ nvme_fc_del_nvme_ctrl(struct nvme_ctrl *nctrl) } static void +nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) +{ + /* If we are resetting/deleting then do nothing */ + if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING) { + WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW || + ctrl->ctrl.state == NVME_CTRL_LIVE); + return; + } + + dev_info(ctrl->ctrl.device, + "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n", + ctrl->cnum, status); + + if (nvmf_should_reconnect(&ctrl->ctrl)) { + dev_info(ctrl->ctrl.device, + "NVME-FC{%d}: Reconnect attempt in %d seconds.\n", + ctrl->cnum, ctrl->ctrl.opts->reconnect_delay); + queue_delayed_work(nvme_fc_wq, &ctrl->connect_work, + ctrl->ctrl.opts->reconnect_delay * HZ); + } else { + dev_warn(ctrl->ctrl.device, + "NVME-FC{%d}: Max reconnect attempts (%d) " + "reached. Removing controller\n", + ctrl->cnum, ctrl->ctrl.opts->nr_reconnects); + WARN_ON(__nvme_fc_schedule_delete_work(ctrl)); + } +} + +static void nvme_fc_reset_ctrl_work(struct work_struct *work) { struct nvme_fc_ctrl *ctrl = @@ -2591,34 +2614,9 @@ nvme_fc_reset_ctrl_work(struct work_struct *work) nvme_fc_delete_association(ctrl); ret = nvme_fc_create_association(ctrl); - if (ret) { - dev_warn(ctrl->ctrl.device, - "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n", - ctrl->cnum, ret); - if (ctrl->connect_attempts >= NVME_FC_MAX_CONNECT_ATTEMPTS) { - dev_warn(ctrl->ctrl.device, - "NVME-FC{%d}: Max reconnect attempts (%d) " - "reached. Removing controller\n", - ctrl->cnum, ctrl->connect_attempts); - - if (!nvme_change_ctrl_state(&ctrl->ctrl, - NVME_CTRL_DELETING)) { - dev_err(ctrl->ctrl.device, - "NVME-FC{%d}: failed to change state " - "to DELETING\n", ctrl->cnum); - return; - } - - WARN_ON(!queue_work(nvme_fc_wq, &ctrl->delete_work)); - return; - } - - dev_warn(ctrl->ctrl.device, - "NVME-FC{%d}: Reconnect attempt in %d seconds.\n", - ctrl->cnum, ctrl->reconnect_delay); - queue_delayed_work(nvme_fc_wq, &ctrl->connect_work, - ctrl->reconnect_delay * HZ); - } else + if (ret) + nvme_fc_reconnect_or_delete(ctrl, ret); + else dev_info(ctrl->ctrl.device, "NVME-FC{%d}: controller reset complete\n", ctrl->cnum); } @@ -2632,7 +2630,7 @@ nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl) { struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); - dev_warn(ctrl->ctrl.device, + dev_info(ctrl->ctrl.device, "NVME-FC{%d}: admin requested controller reset\n", ctrl->cnum); if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING)) @@ -2649,7 +2647,7 @@ nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl) static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = { .name = "fc", .module = THIS_MODULE, - .is_fabrics = true, + .flags = NVME_F_FABRICS, .reg_read32 = nvmf_reg_read32, .reg_read64 = nvmf_reg_read64, .reg_write32 = nvmf_reg_write32, @@ -2671,34 +2669,9 @@ nvme_fc_connect_ctrl_work(struct work_struct *work) struct nvme_fc_ctrl, connect_work); ret = nvme_fc_create_association(ctrl); - if (ret) { - dev_warn(ctrl->ctrl.device, - "NVME-FC{%d}: Reconnect attempt failed (%d)\n", - ctrl->cnum, ret); - if (ctrl->connect_attempts >= NVME_FC_MAX_CONNECT_ATTEMPTS) { - dev_warn(ctrl->ctrl.device, - "NVME-FC{%d}: Max reconnect attempts (%d) " - "reached. Removing controller\n", - ctrl->cnum, ctrl->connect_attempts); - - if (!nvme_change_ctrl_state(&ctrl->ctrl, - NVME_CTRL_DELETING)) { - dev_err(ctrl->ctrl.device, - "NVME-FC{%d}: failed to change state " - "to DELETING\n", ctrl->cnum); - return; - } - - WARN_ON(!queue_work(nvme_fc_wq, &ctrl->delete_work)); - return; - } - - dev_warn(ctrl->ctrl.device, - "NVME-FC{%d}: Reconnect attempt in %d seconds.\n", - ctrl->cnum, ctrl->reconnect_delay); - queue_delayed_work(nvme_fc_wq, &ctrl->connect_work, - ctrl->reconnect_delay * HZ); - } else + if (ret) + nvme_fc_reconnect_or_delete(ctrl, ret); + else dev_info(ctrl->ctrl.device, "NVME-FC{%d}: controller reconnect complete\n", ctrl->cnum); @@ -2755,7 +2728,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, INIT_WORK(&ctrl->delete_work, nvme_fc_delete_ctrl_work); INIT_WORK(&ctrl->reset_work, nvme_fc_reset_ctrl_work); INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work); - ctrl->reconnect_delay = opts->reconnect_delay; spin_lock_init(&ctrl->lock); /* io queue count */ @@ -2819,7 +2791,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, ctrl->ctrl.opts = NULL; /* initiate nvme ctrl ref counting teardown */ nvme_uninit_ctrl(&ctrl->ctrl); - nvme_put_ctrl(&ctrl->ctrl); /* as we're past the point where we transition to the ref * counting teardown path, if we return a bad pointer here, @@ -2835,6 +2806,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, return ERR_PTR(ret); } + kref_get(&ctrl->ctrl.kref); + dev_info(ctrl->ctrl.device, "NVME-FC{%d}: new ctrl: NQN \"%s\"\n", ctrl->cnum, ctrl->ctrl.opts->subsysnqn); @@ -2971,7 +2944,7 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) static struct nvmf_transport_ops nvme_fc_transport = { .name = "fc", .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR, - .allowed_opts = NVMF_OPT_RECONNECT_DELAY, + .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO, .create_ctrl = nvme_fc_create_ctrl, }; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 29c708ca9621..9d6a070d4391 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -208,7 +208,9 @@ struct nvme_ns { struct nvme_ctrl_ops { const char *name; struct module *module; - bool is_fabrics; + unsigned int flags; +#define NVME_F_FABRICS (1 << 0) +#define NVME_F_METADATA_SUPPORTED (1 << 1) int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val); int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val); int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 4c2ff2bb26bc..d52701df7245 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -263,7 +263,7 @@ static void nvme_dbbuf_set(struct nvme_dev *dev) c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr); if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) { - dev_warn(dev->dev, "unable to set dbbuf\n"); + dev_warn(dev->ctrl.device, "unable to set dbbuf\n"); /* Free memory and continue on */ nvme_dbbuf_dma_free(dev); } @@ -1394,11 +1394,11 @@ static void nvme_warn_reset(struct nvme_dev *dev, u32 csts) result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS, &pci_status); if (result == PCIBIOS_SUCCESSFUL) - dev_warn(dev->dev, + dev_warn(dev->ctrl.device, "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n", csts, pci_status); else - dev_warn(dev->dev, + dev_warn(dev->ctrl.device, "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n", csts, result); } @@ -1740,8 +1740,8 @@ static int nvme_pci_enable(struct nvme_dev *dev) */ if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) { dev->q_depth = 2; - dev_warn(dev->dev, "detected Apple NVMe controller, set " - "queue depth=%u to work around controller resets\n", + dev_warn(dev->ctrl.device, "detected Apple NVMe controller, " + "set queue depth=%u to work around controller resets\n", dev->q_depth); } @@ -1759,7 +1759,7 @@ static int nvme_pci_enable(struct nvme_dev *dev) if (dev->cmbsz) { if (sysfs_add_file_to_group(&dev->ctrl.device->kobj, &dev_attr_cmb.attr, NULL)) - dev_warn(dev->dev, + dev_warn(dev->ctrl.device, "failed to add sysfs attribute for CMB\n"); } } @@ -2047,6 +2047,7 @@ static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl) static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { .name = "pcie", .module = THIS_MODULE, + .flags = NVME_F_METADATA_SUPPORTED, .reg_read32 = nvme_pci_reg_read32, .reg_write32 = nvme_pci_reg_write32, .reg_read64 = nvme_pci_reg_read64, @@ -2293,6 +2294,8 @@ static const struct pci_device_id nvme_id_table[] = { { PCI_VDEVICE(INTEL, 0x0a54), .driver_data = NVME_QUIRK_STRIPE_SIZE | NVME_QUIRK_DEALLOCATE_ZEROES, }, + { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */ + .driver_data = NVME_QUIRK_NO_DEEPEST_PS }, { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ .driver_data = NVME_QUIRK_IDENTIFY_CNS, }, { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index dd1c6deef82f..28bd255c144d 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1038,6 +1038,19 @@ static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) nvme_rdma_wr_error(cq, wc, "SEND"); } +static inline int nvme_rdma_queue_sig_limit(struct nvme_rdma_queue *queue) +{ + int sig_limit; + + /* + * We signal completion every queue depth/2 and also handle the + * degenerated case of a device with queue_depth=1, where we + * would need to signal every message. + */ + sig_limit = max(queue->queue_size / 2, 1); + return (++queue->sig_count % sig_limit) == 0; +} + static int nvme_rdma_post_send(struct nvme_rdma_queue *queue, struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge, struct ib_send_wr *first, bool flush) @@ -1065,9 +1078,6 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue, * Would have been way to obvious to handle this in hardware or * at least the RDMA stack.. * - * This messy and racy code sniplet is copy and pasted from the iSER - * initiator, and the magic '32' comes from there as well. - * * Always signal the flushes. The magic request used for the flush * sequencer is not allocated in our driver's tagset and it's * triggered to be freed by blk_cleanup_queue(). So we need to @@ -1075,7 +1085,7 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue, * embedded in request's payload, is not freed when __ib_process_cq() * calls wr_cqe->done(). */ - if ((++queue->sig_count % 32) == 0 || flush) + if (nvme_rdma_queue_sig_limit(queue) || flush) wr.send_flags |= IB_SEND_SIGNALED; if (first) @@ -1782,7 +1792,7 @@ static int nvme_rdma_reset_ctrl(struct nvme_ctrl *nctrl) static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { .name = "rdma", .module = THIS_MODULE, - .is_fabrics = true, + .flags = NVME_F_FABRICS, .reg_read32 = nvmf_reg_read32, .reg_read64 = nvmf_reg_read64, .reg_write32 = nvmf_reg_write32, diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index feb497134aee..e503cfff0337 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -558,7 +558,7 @@ static int nvme_loop_reset_ctrl(struct nvme_ctrl *nctrl) static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = { .name = "loop", .module = THIS_MODULE, - .is_fabrics = true, + .flags = NVME_F_FABRICS, .reg_read32 = nvmf_reg_read32, .reg_read64 = nvmf_reg_read64, .reg_write32 = nvmf_reg_write32, diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 71fecc2debfc..703a42118ffc 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -523,7 +523,7 @@ static int __init of_platform_default_populate_init(void) arch_initcall_sync(of_platform_default_populate_init); #endif -static int of_platform_device_destroy(struct device *dev, void *data) +int of_platform_device_destroy(struct device *dev, void *data) { /* Do not touch devices not populated from the device tree */ if (!dev->of_node || !of_node_check_flag(dev->of_node, OF_POPULATED)) @@ -544,6 +544,7 @@ static int of_platform_device_destroy(struct device *dev, void *data) of_node_clear_flag(dev->of_node, OF_POPULATED_BUS); return 0; } +EXPORT_SYMBOL_GPL(of_platform_device_destroy); /** * of_platform_depopulate() - Remove devices populated from device tree diff --git a/drivers/pci/dwc/pci-imx6.c b/drivers/pci/dwc/pci-imx6.c index a98cba55c7f0..19a289b8cc94 100644 --- a/drivers/pci/dwc/pci-imx6.c +++ b/drivers/pci/dwc/pci-imx6.c @@ -252,7 +252,34 @@ static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie) static int imx6q_pcie_abort_handler(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { - return 0; + unsigned long pc = instruction_pointer(regs); + unsigned long instr = *(unsigned long *)pc; + int reg = (instr >> 12) & 15; + + /* + * If the instruction being executed was a read, + * make it look like it read all-ones. + */ + if ((instr & 0x0c100000) == 0x04100000) { + unsigned long val; + + if (instr & 0x00400000) + val = 255; + else + val = -1; + + regs->uregs[reg] = val; + regs->ARM_pc += 4; + return 0; + } + + if ((instr & 0x0e100090) == 0x00100090) { + regs->uregs[reg] = -1; + regs->ARM_pc += 4; + return 0; + } + + return 1; } static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) @@ -819,8 +846,8 @@ static int __init imx6_pcie_init(void) * we can install the handler here without risking it * accessing some uninitialized driver state. */ - hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0, - "imprecise external abort"); + hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0, + "external abort on non-linefetch"); return platform_driver_register(&imx6_pcie_driver); } diff --git a/drivers/pci/endpoint/Kconfig b/drivers/pci/endpoint/Kconfig index c23f146fb5a6..c09623ca8c3b 100644 --- a/drivers/pci/endpoint/Kconfig +++ b/drivers/pci/endpoint/Kconfig @@ -6,6 +6,7 @@ menu "PCI Endpoint" config PCI_ENDPOINT bool "PCI Endpoint Support" + depends on HAS_DMA help Enable this configuration option to support configurable PCI endpoint. This should be enabled if the platform has a PCI diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index b01bd5bba8e6..563901cd9c06 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -2144,7 +2144,8 @@ bool pci_dev_keep_suspended(struct pci_dev *pci_dev) if (!pm_runtime_suspended(dev) || pci_target_state(pci_dev) != pci_dev->current_state - || platform_pci_need_resume(pci_dev)) + || platform_pci_need_resume(pci_dev) + || (pci_dev->dev_flags & PCI_DEV_FLAGS_NEEDS_RESUME)) return false; /* diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c index cc6e085008fb..f6a63406c76e 100644 --- a/drivers/pci/switch/switchtec.c +++ b/drivers/pci/switch/switchtec.c @@ -1291,7 +1291,6 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev) cdev = &stdev->cdev; cdev_init(cdev, &switchtec_fops); cdev->owner = THIS_MODULE; - cdev->kobj.parent = &dev->kobj; return stdev; @@ -1442,12 +1441,15 @@ static int switchtec_init_pci(struct switchtec_dev *stdev, stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET; stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET; stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET; - stdev->partition = ioread8(&stdev->mmio_ntb->partition_id); + stdev->partition = ioread8(&stdev->mmio_sys_info->partition_id); stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count); stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET; stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition]; stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET; + if (stdev->partition_count < 1) + stdev->partition_count = 1; + init_pff(stdev); pci_set_drvdata(pdev, stdev); @@ -1479,11 +1481,7 @@ static int switchtec_pci_probe(struct pci_dev *pdev, SWITCHTEC_EVENT_EN_IRQ, &stdev->mmio_part_cfg->mrpc_comp_hdr); - rc = cdev_add(&stdev->cdev, stdev->dev.devt, 1); - if (rc) - goto err_put; - - rc = device_add(&stdev->dev); + rc = cdev_device_add(&stdev->cdev, &stdev->dev); if (rc) goto err_devadd; @@ -1492,7 +1490,6 @@ static int switchtec_pci_probe(struct pci_dev *pdev, return 0; err_devadd: - cdev_del(&stdev->cdev); stdev_kill(stdev); err_put: ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt)); @@ -1506,8 +1503,7 @@ static void switchtec_pci_remove(struct pci_dev *pdev) pci_set_drvdata(pdev, NULL); - device_del(&stdev->dev); - cdev_del(&stdev->cdev); + cdev_device_del(&stdev->cdev, &stdev->dev); ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt)); dev_info(&stdev->dev, "unregistered.\n"); diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c index 14bde0db8c24..5b10b50f8686 100644 --- a/drivers/powercap/powercap_sys.c +++ b/drivers/powercap/powercap_sys.c @@ -538,6 +538,7 @@ struct powercap_zone *powercap_register_zone( power_zone->id = result; idr_init(&power_zone->idr); + result = -ENOMEM; power_zone->name = kstrdup(name, GFP_KERNEL); if (!power_zone->name) goto err_name_alloc; diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index b3de973a6260..9dca53df3584 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -1088,7 +1088,7 @@ static u32 rtc_handler(void *context) } spin_unlock_irqrestore(&rtc_lock, flags); - pm_wakeup_event(dev, 0); + pm_wakeup_hard_event(dev); acpi_clear_event(ACPI_EVENT_RTC); acpi_disable_event(ACPI_EVENT_RTC, 0); return ACPI_INTERRUPT_HANDLED; diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c index 622bdabc8894..dab195f04da7 100644 --- a/drivers/scsi/csiostor/csio_hw.c +++ b/drivers/scsi/csiostor/csio_hw.c @@ -1769,7 +1769,6 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param) goto bye; } - mempool_free(mbp, hw->mb_mempool); if (finicsum != cfcsum) { csio_warn(hw, "Config File checksum mismatch: csum=%#x, computed=%#x\n", @@ -1780,6 +1779,10 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param) rv = csio_hw_validate_caps(hw, mbp); if (rv != 0) goto bye; + + mempool_free(mbp, hw->mb_mempool); + mbp = NULL; + /* * Note that we're operating with parameters * not supplied by the driver, rather than from hard-wired diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index b44c3136eb51..520325867e2b 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -1422,7 +1422,7 @@ static void fc_rport_recv_rtv_req(struct fc_rport_priv *rdata, fp = fc_frame_alloc(lport, sizeof(*rtv)); if (!fp) { rjt_data.reason = ELS_RJT_UNAB; - rjt_data.reason = ELS_EXPL_INSUF_RES; + rjt_data.explan = ELS_EXPL_INSUF_RES; fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data); goto drop; } diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 6d7840b096e6..f2c0ba6ced78 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -141,6 +141,13 @@ struct lpfc_dmabuf { uint32_t buffer_tag; /* used for tagged queue ring */ }; +struct lpfc_nvmet_ctxbuf { + struct list_head list; + struct lpfc_nvmet_rcv_ctx *context; + struct lpfc_iocbq *iocbq; + struct lpfc_sglq *sglq; +}; + struct lpfc_dma_pool { struct lpfc_dmabuf *elements; uint32_t max_count; @@ -163,9 +170,7 @@ struct rqb_dmabuf { struct lpfc_dmabuf dbuf; uint16_t total_size; uint16_t bytes_recv; - void *context; - struct lpfc_iocbq *iocbq; - struct lpfc_sglq *sglq; + uint16_t idx; struct lpfc_queue *hrq; /* ptr to associated Header RQ */ struct lpfc_queue *drq; /* ptr to associated Data RQ */ }; @@ -670,6 +675,8 @@ struct lpfc_hba { /* INIT_LINK mailbox command */ #define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */ #define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */ +#define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */ +#define LS_MDS_LOOPBACK 0x16 /* MDS Diagnostics Link Up (Loopback) */ uint32_t hba_flag; /* hba generic flags */ #define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ @@ -777,7 +784,6 @@ struct lpfc_hba { uint32_t cfg_nvme_oas; uint32_t cfg_nvme_io_channel; uint32_t cfg_nvmet_mrq; - uint32_t cfg_nvmet_mrq_post; uint32_t cfg_enable_nvmet; uint32_t cfg_nvme_enable_fb; uint32_t cfg_nvmet_fb_size; @@ -943,6 +949,7 @@ struct lpfc_hba { struct pci_pool *lpfc_mbuf_pool; struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */ struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */ + struct pci_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */ struct pci_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */ struct pci_pool *txrdy_payload_pool; struct lpfc_dma_pool lpfc_mbuf_safety_pool; @@ -1228,7 +1235,11 @@ lpfc_sli_read_hs(struct lpfc_hba *phba) static inline struct lpfc_sli_ring * lpfc_phba_elsring(struct lpfc_hba *phba) { - if (phba->sli_rev == LPFC_SLI_REV4) - return phba->sli4_hba.els_wq->pring; + if (phba->sli_rev == LPFC_SLI_REV4) { + if (phba->sli4_hba.els_wq) + return phba->sli4_hba.els_wq->pring; + else + return NULL; + } return &phba->sli.sli3_ring[LPFC_ELS_RING]; } diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 4830370bfab1..bb2d9e238225 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -60,9 +60,9 @@ #define LPFC_MIN_DEVLOSS_TMO 1 #define LPFC_MAX_DEVLOSS_TMO 255 -#define LPFC_DEF_MRQ_POST 256 -#define LPFC_MIN_MRQ_POST 32 -#define LPFC_MAX_MRQ_POST 512 +#define LPFC_DEF_MRQ_POST 512 +#define LPFC_MIN_MRQ_POST 512 +#define LPFC_MAX_MRQ_POST 2048 /* * Write key size should be multiple of 4. If write key is changed @@ -205,8 +205,9 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, atomic_read(&tgtp->xmt_ls_rsp_error)); len += snprintf(buf+len, PAGE_SIZE-len, - "FCP: Rcv %08x Drop %08x\n", + "FCP: Rcv %08x Release %08x Drop %08x\n", atomic_read(&tgtp->rcv_fcp_cmd_in), + atomic_read(&tgtp->xmt_fcp_release), atomic_read(&tgtp->rcv_fcp_cmd_drop)); if (atomic_read(&tgtp->rcv_fcp_cmd_in) != @@ -218,15 +219,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, } len += snprintf(buf+len, PAGE_SIZE-len, - "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x\n", + "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x " + "drop %08x\n", atomic_read(&tgtp->xmt_fcp_read), atomic_read(&tgtp->xmt_fcp_read_rsp), atomic_read(&tgtp->xmt_fcp_write), - atomic_read(&tgtp->xmt_fcp_rsp)); - - len += snprintf(buf+len, PAGE_SIZE-len, - "FCP Rsp: abort %08x drop %08x\n", - atomic_read(&tgtp->xmt_fcp_abort), + atomic_read(&tgtp->xmt_fcp_rsp), atomic_read(&tgtp->xmt_fcp_drop)); len += snprintf(buf+len, PAGE_SIZE-len, @@ -236,10 +234,22 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, atomic_read(&tgtp->xmt_fcp_rsp_drop)); len += snprintf(buf+len, PAGE_SIZE-len, - "ABORT: Xmt %08x Err %08x Cmpl %08x", + "ABORT: Xmt %08x Cmpl %08x\n", + atomic_read(&tgtp->xmt_fcp_abort), + atomic_read(&tgtp->xmt_fcp_abort_cmpl)); + + len += snprintf(buf + len, PAGE_SIZE - len, + "ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x", + atomic_read(&tgtp->xmt_abort_sol), + atomic_read(&tgtp->xmt_abort_unsol), atomic_read(&tgtp->xmt_abort_rsp), - atomic_read(&tgtp->xmt_abort_rsp_error), - atomic_read(&tgtp->xmt_abort_cmpl)); + atomic_read(&tgtp->xmt_abort_rsp_error)); + + len += snprintf(buf + len, PAGE_SIZE - len, + "IO_CTX: %08x outstanding %08x total %x", + phba->sli4_hba.nvmet_ctx_cnt, + phba->sli4_hba.nvmet_io_wait_cnt, + phba->sli4_hba.nvmet_io_wait_total); len += snprintf(buf+len, PAGE_SIZE-len, "\n"); return len; @@ -3312,14 +3322,6 @@ LPFC_ATTR_R(nvmet_mrq, "Specify number of RQ pairs for processing NVMET cmds"); /* - * lpfc_nvmet_mrq_post: Specify number buffers to post on every MRQ - * - */ -LPFC_ATTR_R(nvmet_mrq_post, LPFC_DEF_MRQ_POST, - LPFC_MIN_MRQ_POST, LPFC_MAX_MRQ_POST, - "Specify number of buffers to post on every MRQ"); - -/* * lpfc_enable_fc4_type: Defines what FC4 types are supported. * Supported Values: 1 - register just FCP * 3 - register both FCP and NVME @@ -5154,7 +5156,6 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_suppress_rsp, &dev_attr_lpfc_nvme_io_channel, &dev_attr_lpfc_nvmet_mrq, - &dev_attr_lpfc_nvmet_mrq_post, &dev_attr_lpfc_nvme_enable_fb, &dev_attr_lpfc_nvmet_fb_size, &dev_attr_lpfc_enable_bg, @@ -6194,7 +6195,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type); lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq); - lpfc_nvmet_mrq_post_init(phba, lpfc_nvmet_mrq_post); /* Initialize first burst. Target vs Initiator are different. */ lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb); @@ -6291,7 +6291,6 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba) /* Not NVME Target mode. Turn off Target parameters. */ phba->nvmet_support = 0; phba->cfg_nvmet_mrq = 0; - phba->cfg_nvmet_mrq_post = 0; phba->cfg_nvmet_fb_size = 0; } diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 1c55408ac718..8912767e7bc8 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -75,6 +75,10 @@ void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *); void lpfc_retry_pport_discovery(struct lpfc_hba *); void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t); +int lpfc_init_iocb_list(struct lpfc_hba *phba, int cnt); +void lpfc_free_iocb_list(struct lpfc_hba *phba); +int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, + struct lpfc_queue *drq, int count, int idx); void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); @@ -246,16 +250,14 @@ struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *); void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *); struct rqb_dmabuf *lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba); void lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab); -void lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp, - struct lpfc_dmabuf *mp); +void lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, + struct lpfc_nvmet_ctxbuf *ctxp); int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, struct fc_frame_header *fc_hdr); void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *, uint16_t); int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe); -int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq, - struct lpfc_queue *dq, int count); int lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq); void lpfc_unregister_fcf(struct lpfc_hba *); void lpfc_unregister_fcf_rescan(struct lpfc_hba *); @@ -271,6 +273,7 @@ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t); void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *); int lpfc_mem_alloc(struct lpfc_hba *, int align); +int lpfc_nvmet_mem_alloc(struct lpfc_hba *phba); int lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *); void lpfc_mem_free(struct lpfc_hba *); void lpfc_mem_free_all(struct lpfc_hba *); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index c7962dae4dab..f2cd19c6c2df 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -2092,6 +2092,7 @@ lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport, ae->un.AttrTypes[3] = 0x02; /* Type 1 - ELS */ ae->un.AttrTypes[2] = 0x01; /* Type 8 - FCP */ + ae->un.AttrTypes[6] = 0x01; /* Type 40 - NVME */ ae->un.AttrTypes[7] = 0x01; /* Type 32 - CT */ size = FOURBYTES + 32; ad->AttrLen = cpu_to_be16(size); diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index fce549a91911..4bcb92c844ca 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -798,21 +798,22 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) atomic_read(&tgtp->xmt_fcp_rsp)); len += snprintf(buf + len, size - len, - "FCP Rsp: abort %08x drop %08x\n", - atomic_read(&tgtp->xmt_fcp_abort), - atomic_read(&tgtp->xmt_fcp_drop)); - - len += snprintf(buf + len, size - len, "FCP Rsp Cmpl: %08x err %08x drop %08x\n", atomic_read(&tgtp->xmt_fcp_rsp_cmpl), atomic_read(&tgtp->xmt_fcp_rsp_error), atomic_read(&tgtp->xmt_fcp_rsp_drop)); len += snprintf(buf + len, size - len, - "ABORT: Xmt %08x Err %08x Cmpl %08x", + "ABORT: Xmt %08x Cmpl %08x\n", + atomic_read(&tgtp->xmt_fcp_abort), + atomic_read(&tgtp->xmt_fcp_abort_cmpl)); + + len += snprintf(buf + len, size - len, + "ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x", + atomic_read(&tgtp->xmt_abort_sol), + atomic_read(&tgtp->xmt_abort_unsol), atomic_read(&tgtp->xmt_abort_rsp), - atomic_read(&tgtp->xmt_abort_rsp_error), - atomic_read(&tgtp->xmt_abort_cmpl)); + atomic_read(&tgtp->xmt_abort_rsp_error)); len += snprintf(buf + len, size - len, "\n"); @@ -841,6 +842,12 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) } spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); } + + len += snprintf(buf + len, size - len, + "IO_CTX: %08x outstanding %08x total %08x\n", + phba->sli4_hba.nvmet_ctx_cnt, + phba->sli4_hba.nvmet_io_wait_cnt, + phba->sli4_hba.nvmet_io_wait_total); } else { if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) return len; @@ -1959,6 +1966,7 @@ lpfc_debugfs_nvmestat_write(struct file *file, const char __user *buf, atomic_set(&tgtp->rcv_ls_req_out, 0); atomic_set(&tgtp->rcv_ls_req_drop, 0); atomic_set(&tgtp->xmt_ls_abort, 0); + atomic_set(&tgtp->xmt_ls_abort_cmpl, 0); atomic_set(&tgtp->xmt_ls_rsp, 0); atomic_set(&tgtp->xmt_ls_drop, 0); atomic_set(&tgtp->xmt_ls_rsp_error, 0); @@ -1967,19 +1975,22 @@ lpfc_debugfs_nvmestat_write(struct file *file, const char __user *buf, atomic_set(&tgtp->rcv_fcp_cmd_in, 0); atomic_set(&tgtp->rcv_fcp_cmd_out, 0); atomic_set(&tgtp->rcv_fcp_cmd_drop, 0); - atomic_set(&tgtp->xmt_fcp_abort, 0); atomic_set(&tgtp->xmt_fcp_drop, 0); atomic_set(&tgtp->xmt_fcp_read_rsp, 0); atomic_set(&tgtp->xmt_fcp_read, 0); atomic_set(&tgtp->xmt_fcp_write, 0); atomic_set(&tgtp->xmt_fcp_rsp, 0); + atomic_set(&tgtp->xmt_fcp_release, 0); atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0); atomic_set(&tgtp->xmt_fcp_rsp_error, 0); atomic_set(&tgtp->xmt_fcp_rsp_drop, 0); + atomic_set(&tgtp->xmt_fcp_abort, 0); + atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0); + atomic_set(&tgtp->xmt_abort_sol, 0); + atomic_set(&tgtp->xmt_abort_unsol, 0); atomic_set(&tgtp->xmt_abort_rsp, 0); atomic_set(&tgtp->xmt_abort_rsp_error, 0); - atomic_set(&tgtp->xmt_abort_cmpl, 0); } return nbytes; } @@ -3070,11 +3081,11 @@ __lpfc_idiag_print_wq(struct lpfc_queue *qp, char *wqtype, qp->assoc_qid, qp->q_cnt_1, (unsigned long long)qp->q_cnt_4); len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, - "\t\tWQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], " - "HOST-IDX[%04d], PORT-IDX[%04d]", + "\t\tWQID[%02d], QE-CNT[%04d], QE-SZ[%04d], " + "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]", qp->queue_id, qp->entry_count, qp->entry_size, qp->host_index, - qp->hba_index); + qp->hba_index, qp->entry_repost); len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); return len; @@ -3121,11 +3132,11 @@ __lpfc_idiag_print_cq(struct lpfc_queue *qp, char *cqtype, qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, - "\tCQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], " - "HOST-IDX[%04d], PORT-IDX[%04d]", + "\tCQID[%02d], QE-CNT[%04d], QE-SZ[%04d], " + "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]", qp->queue_id, qp->entry_count, qp->entry_size, qp->host_index, - qp->hba_index); + qp->hba_index, qp->entry_repost); len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); @@ -3143,20 +3154,20 @@ __lpfc_idiag_print_rqpair(struct lpfc_queue *qp, struct lpfc_queue *datqp, "\t\t%s RQ info: ", rqtype); len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "AssocCQID[%02d]: RQ-STAT[nopost:x%x nobuf:x%x " - "trunc:x%x rcv:x%llx]\n", + "posted:x%x rcv:x%llx]\n", qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, - "\t\tHQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], " - "HOST-IDX[%04d], PORT-IDX[%04d]\n", + "\t\tHQID[%02d], QE-CNT[%04d], QE-SZ[%04d], " + "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]\n", qp->queue_id, qp->entry_count, qp->entry_size, - qp->host_index, qp->hba_index); + qp->host_index, qp->hba_index, qp->entry_repost); len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, - "\t\tDQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], " - "HOST-IDX[%04d], PORT-IDX[%04d]\n", + "\t\tDQID[%02d], QE-CNT[%04d], QE-SZ[%04d], " + "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]\n", datqp->queue_id, datqp->entry_count, datqp->entry_size, datqp->host_index, - datqp->hba_index); + datqp->hba_index, datqp->entry_repost); return len; } @@ -3242,10 +3253,10 @@ __lpfc_idiag_print_eq(struct lpfc_queue *qp, char *eqtype, eqtype, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, - "EQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], " - "HOST-IDX[%04d], PORT-IDX[%04d]", + "EQID[%02d], QE-CNT[%04d], QE-SZ[%04d], " + "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]", qp->queue_id, qp->entry_count, qp->entry_size, - qp->host_index, qp->hba_index); + qp->host_index, qp->hba_index, qp->entry_repost); len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); return len; @@ -5855,8 +5866,10 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) atomic_dec(&lpfc_debugfs_hba_count); } - debugfs_remove(lpfc_debugfs_root); /* lpfc */ - lpfc_debugfs_root = NULL; + if (atomic_read(&lpfc_debugfs_hba_count) == 0) { + debugfs_remove(lpfc_debugfs_root); /* lpfc */ + lpfc_debugfs_root = NULL; + } } #endif return; diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index 9d5a379f4b15..094c97b9e5f7 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h @@ -90,6 +90,7 @@ struct lpfc_nodelist { #define NLP_FCP_INITIATOR 0x10 /* entry is an FCP Initiator */ #define NLP_NVME_TARGET 0x20 /* entry is a NVME Target */ #define NLP_NVME_INITIATOR 0x40 /* entry is a NVME Initiator */ +#define NLP_NVME_DISCOVERY 0x80 /* entry has NVME disc srvc */ uint16_t nlp_fc4_type; /* FC types node supports. */ /* Assigned from GID_FF, only diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 67827e397431..8e532b39ae93 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1047,6 +1047,13 @@ stop_rr_fcf_flogi: irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpTimeout); + + /* If this is not a loop open failure, bail out */ + if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT && + ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == + IOERR_LOOP_OPEN_FAILURE))) + goto flogifail; + /* FLOGI failed, so there is no fabric */ spin_lock_irq(shost->host_lock); vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); @@ -2077,16 +2084,19 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (irsp->ulpStatus) { /* Check for retry */ + ndlp->fc4_prli_sent--; if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { /* ELS command is being retried */ - ndlp->fc4_prli_sent--; goto out; } + /* PRLI failed */ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, - "2754 PRLI failure DID:%06X Status:x%x/x%x\n", + "2754 PRLI failure DID:%06X Status:x%x/x%x, " + "data: x%x\n", ndlp->nlp_DID, irsp->ulpStatus, - irsp->un.ulpWord[4]); + irsp->un.ulpWord[4], ndlp->fc4_prli_sent); + /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ if (lpfc_error_lost_link(irsp)) goto out; @@ -7441,6 +7451,13 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport) */ spin_lock_irq(&phba->hbalock); pring = lpfc_phba_elsring(phba); + + /* Bail out if we've no ELS wq, like in PCI error recovery case. */ + if (unlikely(!pring)) { + spin_unlock_irq(&phba->hbalock); + return; + } + if (phba->sli_rev == LPFC_SLI_REV4) spin_lock(&pring->ring_lock); @@ -8667,7 +8684,8 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_do_scr_ns_plogi(phba, vport); goto out; fdisc_failed: - if (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS) + if (vport->fc_vport && + (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS)) lpfc_vport_set_state(vport, FC_VPORT_FAILED); /* Cancel discovery timer */ lpfc_can_disctmo(vport); diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 0482c5580331..3ffcd9215ca8 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -693,15 +693,16 @@ lpfc_work_done(struct lpfc_hba *phba) pring = lpfc_phba_elsring(phba); status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); status >>= (4*LPFC_ELS_RING); - if ((status & HA_RXMASK) || - (pring->flag & LPFC_DEFERRED_RING_EVENT) || - (phba->hba_flag & HBA_SP_QUEUE_EVT)) { + if (pring && (status & HA_RXMASK || + pring->flag & LPFC_DEFERRED_RING_EVENT || + phba->hba_flag & HBA_SP_QUEUE_EVT)) { if (pring->flag & LPFC_STOP_IOCB_EVENT) { pring->flag |= LPFC_DEFERRED_RING_EVENT; /* Set the lpfc data pending flag */ set_bit(LPFC_DATA_READY, &phba->data_flags); } else { - if (phba->link_state >= LPFC_LINK_UP) { + if (phba->link_state >= LPFC_LINK_UP || + phba->link_flag & LS_MDS_LOOPBACK) { pring->flag &= ~LPFC_DEFERRED_RING_EVENT; lpfc_sli_handle_slow_ring_event(phba, pring, (status & diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 1d12f2be36bc..e0a5fce416ae 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -1356,6 +1356,7 @@ struct lpfc_mbx_wq_destroy { #define LPFC_HDR_BUF_SIZE 128 #define LPFC_DATA_BUF_SIZE 2048 +#define LPFC_NVMET_DATA_BUF_SIZE 128 struct rq_context { uint32_t word0; #define lpfc_rq_context_rqe_count_SHIFT 16 /* Version 0 Only */ @@ -4420,6 +4421,19 @@ struct fcp_treceive64_wqe { }; #define TXRDY_PAYLOAD_LEN 12 +#define CMD_SEND_FRAME 0xE1 + +struct send_frame_wqe { + struct ulp_bde64 bde; /* words 0-2 */ + uint32_t frame_len; /* word 3 */ + uint32_t fc_hdr_wd0; /* word 4 */ + uint32_t fc_hdr_wd1; /* word 5 */ + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t fc_hdr_wd2; /* word 12 */ + uint32_t fc_hdr_wd3; /* word 13 */ + uint32_t fc_hdr_wd4; /* word 14 */ + uint32_t fc_hdr_wd5; /* word 15 */ +}; union lpfc_wqe { uint32_t words[16]; @@ -4438,7 +4452,7 @@ union lpfc_wqe { struct fcp_trsp64_wqe fcp_trsp; struct fcp_tsend64_wqe fcp_tsend; struct fcp_treceive64_wqe fcp_treceive; - + struct send_frame_wqe send_frame; }; union lpfc_wqe128 { diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 4b1eb98c228d..9add9473cae5 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1099,7 +1099,7 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) { ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP); - lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); + lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); } } @@ -3381,7 +3381,7 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba) { struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; uint16_t i, lxri, xri_cnt, els_xri_cnt; - uint16_t nvmet_xri_cnt, tot_cnt; + uint16_t nvmet_xri_cnt; LIST_HEAD(nvmet_sgl_list); int rc; @@ -3389,15 +3389,9 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba) * update on pci function's nvmet xri-sgl list */ els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); - nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post; - tot_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; - if (nvmet_xri_cnt > tot_cnt) { - phba->cfg_nvmet_mrq_post = tot_cnt / phba->cfg_nvmet_mrq; - nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post; - lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "6301 NVMET post-sgl count changed to %d\n", - phba->cfg_nvmet_mrq_post); - } + + /* For NVMET, ALL remaining XRIs are dedicated for IO processing */ + nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) { /* els xri-sgl expanded */ @@ -4546,6 +4540,19 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) pmb->vport = phba->pport; if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) { + phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK); + + switch (phba->sli4_hba.link_state.status) { + case LPFC_FC_LA_TYPE_MDS_LINK_DOWN: + phba->link_flag |= LS_MDS_LINK_DOWN; + break; + case LPFC_FC_LA_TYPE_MDS_LOOPBACK: + phba->link_flag |= LS_MDS_LOOPBACK; + break; + default: + break; + } + /* Parse and translate status field */ mb = &pmb->u.mb; mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, @@ -5830,6 +5837,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_list); + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list); + /* Fast-path XRI aborted CQ Event work queue list */ INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue); } @@ -5837,6 +5847,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) /* This abort list used by worker thread */ spin_lock_init(&phba->sli4_hba.sgl_list_lock); spin_lock_init(&phba->sli4_hba.nvmet_io_lock); + spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock); /* * Initialize driver internal slow-path work queues @@ -5951,16 +5962,21 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { if (wwn == lpfc_enable_nvmet[i]) { #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) + if (lpfc_nvmet_mem_alloc(phba)) + break; + + phba->nvmet_support = 1; /* a match */ + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "6017 NVME Target %016llx\n", wwn); - phba->nvmet_support = 1; /* a match */ #else lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "6021 Can't enable NVME Target." " NVME_TARGET_FC infrastructure" " is not in kernel\n"); #endif + break; } } } @@ -6269,7 +6285,7 @@ lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) * * This routine is invoked to free the driver's IOCB list and memory. **/ -static void +void lpfc_free_iocb_list(struct lpfc_hba *phba) { struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; @@ -6297,7 +6313,7 @@ lpfc_free_iocb_list(struct lpfc_hba *phba) * 0 - successful * other values - error **/ -static int +int lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) { struct lpfc_iocbq *iocbq_entry = NULL; @@ -6525,7 +6541,6 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) uint16_t rpi_limit, curr_rpi_range; struct lpfc_dmabuf *dmabuf; struct lpfc_rpi_hdr *rpi_hdr; - uint32_t rpi_count; /* * If the SLI4 port supports extents, posting the rpi header isn't @@ -6538,8 +6553,7 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) return NULL; /* The limit on the logical index is just the max_rpi count. */ - rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + - phba->sli4_hba.max_cfg_param.max_rpi - 1; + rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi; spin_lock_irq(&phba->hbalock); /* @@ -6550,18 +6564,10 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) curr_rpi_range = phba->sli4_hba.next_rpi; spin_unlock_irq(&phba->hbalock); - /* - * The port has a limited number of rpis. The increment here - * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value - * and to allow the full max_rpi range per port. - */ - if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) - rpi_count = rpi_limit - curr_rpi_range; - else - rpi_count = LPFC_RPI_HDR_COUNT; - - if (!rpi_count) + /* Reached full RPI range */ + if (curr_rpi_range == rpi_limit) return NULL; + /* * First allocate the protocol header region for the port. The * port expects a 4KB DMA-mapped memory region that is 4K aligned. @@ -6595,13 +6601,9 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) /* The rpi_hdr stores the logical index only. */ rpi_hdr->start_rpi = curr_rpi_range; + rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT; list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); - /* - * The next_rpi stores the next logical module-64 rpi value used - * to post physical rpis in subsequent rpi postings. - */ - phba->sli4_hba.next_rpi += rpi_count; spin_unlock_irq(&phba->hbalock); return rpi_hdr; @@ -8172,7 +8174,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) /* Create NVMET Receive Queue for header */ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, - phba->sli4_hba.rq_ecount); + LPFC_NVMET_RQE_DEF_COUNT); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3146 Failed allocate " @@ -8194,7 +8196,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) /* Create NVMET Receive Queue for data */ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, - phba->sli4_hba.rq_ecount); + LPFC_NVMET_RQE_DEF_COUNT); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3156 Failed allocate " @@ -8326,46 +8328,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba) } int -lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, - struct lpfc_queue *drq, int count) -{ - int rc, i; - struct lpfc_rqe hrqe; - struct lpfc_rqe drqe; - struct lpfc_rqb *rqbp; - struct rqb_dmabuf *rqb_buffer; - LIST_HEAD(rqb_buf_list); - - rqbp = hrq->rqbp; - for (i = 0; i < count; i++) { - rqb_buffer = (rqbp->rqb_alloc_buffer)(phba); - if (!rqb_buffer) - break; - rqb_buffer->hrq = hrq; - rqb_buffer->drq = drq; - list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list); - } - while (!list_empty(&rqb_buf_list)) { - list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf, - hbuf.list); - - hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys); - hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys); - drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys); - drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys); - rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); - if (rc < 0) { - (rqbp->rqb_free_buffer)(phba, rqb_buffer); - } else { - list_add_tail(&rqb_buffer->hbuf.list, - &rqbp->rqb_buffer_list); - rqbp->buffer_count++; - } - } - return 1; -} - -int lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq) { struct lpfc_rqb *rqbp; @@ -8784,9 +8746,6 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) goto out_destroy; } - lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ); - lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ); - rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, phba->sli4_hba.els_cq, LPFC_USOL); if (rc) { @@ -11110,7 +11069,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) struct lpfc_hba *phba; struct lpfc_vport *vport = NULL; struct Scsi_Host *shost = NULL; - int error, cnt; + int error; uint32_t cfg_mode, intr_mode; /* Allocate memory for HBA structure */ @@ -11144,22 +11103,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) goto out_unset_pci_mem_s4; } - cnt = phba->cfg_iocb_cnt * 1024; - if (phba->nvmet_support) - cnt += phba->cfg_nvmet_mrq_post * phba->cfg_nvmet_mrq; - - /* Initialize and populate the iocb list per host */ - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2821 initialize iocb list %d total %d\n", - phba->cfg_iocb_cnt, cnt); - error = lpfc_init_iocb_list(phba, cnt); - - if (error) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1413 Failed to initialize iocb list.\n"); - goto out_unset_driver_resource_s4; - } - INIT_LIST_HEAD(&phba->active_rrq_list); INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); @@ -11168,7 +11111,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1414 Failed to set up driver resource.\n"); - goto out_free_iocb_list; + goto out_unset_driver_resource_s4; } /* Get the default values for Model Name and Description */ @@ -11268,8 +11211,6 @@ out_destroy_shost: lpfc_destroy_shost(phba); out_unset_driver_resource: lpfc_unset_driver_resource_phase2(phba); -out_free_iocb_list: - lpfc_free_iocb_list(phba); out_unset_driver_resource_s4: lpfc_sli4_driver_resource_unset(phba); out_unset_pci_mem_s4: diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index 5986c7957199..fcc05a1517c2 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -214,6 +214,21 @@ fail_free_drb_pool: return -ENOMEM; } +int +lpfc_nvmet_mem_alloc(struct lpfc_hba *phba) +{ + phba->lpfc_nvmet_drb_pool = + pci_pool_create("lpfc_nvmet_drb_pool", + phba->pcidev, LPFC_NVMET_DATA_BUF_SIZE, + SGL_ALIGN_SZ, 0); + if (!phba->lpfc_nvmet_drb_pool) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6024 Can't enable NVME Target - no memory\n"); + return -ENOMEM; + } + return 0; +} + /** * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc * @phba: HBA to free memory for @@ -232,6 +247,9 @@ lpfc_mem_free(struct lpfc_hba *phba) /* Free HBQ pools */ lpfc_sli_hbqbuf_free_all(phba); + if (phba->lpfc_nvmet_drb_pool) + pci_pool_destroy(phba->lpfc_nvmet_drb_pool); + phba->lpfc_nvmet_drb_pool = NULL; if (phba->lpfc_drb_pool) pci_pool_destroy(phba->lpfc_drb_pool); phba->lpfc_drb_pool = NULL; @@ -611,8 +629,6 @@ struct rqb_dmabuf * lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba) { struct rqb_dmabuf *dma_buf; - struct lpfc_iocbq *nvmewqe; - union lpfc_wqe128 *wqe; dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL); if (!dma_buf) @@ -624,69 +640,15 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba) kfree(dma_buf); return NULL; } - dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, - &dma_buf->dbuf.phys); + dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_nvmet_drb_pool, + GFP_KERNEL, &dma_buf->dbuf.phys); if (!dma_buf->dbuf.virt) { pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, dma_buf->hbuf.phys); kfree(dma_buf); return NULL; } - dma_buf->total_size = LPFC_DATA_BUF_SIZE; - - dma_buf->context = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), - GFP_KERNEL); - if (!dma_buf->context) { - pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt, - dma_buf->dbuf.phys); - pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, - dma_buf->hbuf.phys); - kfree(dma_buf); - return NULL; - } - - dma_buf->iocbq = lpfc_sli_get_iocbq(phba); - if (!dma_buf->iocbq) { - kfree(dma_buf->context); - pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt, - dma_buf->dbuf.phys); - pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, - dma_buf->hbuf.phys); - kfree(dma_buf); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, - "2621 Ran out of nvmet iocb/WQEs\n"); - return NULL; - } - dma_buf->iocbq->iocb_flag = LPFC_IO_NVMET; - nvmewqe = dma_buf->iocbq; - wqe = (union lpfc_wqe128 *)&nvmewqe->wqe; - /* Initialize WQE */ - memset(wqe, 0, sizeof(union lpfc_wqe)); - /* Word 7 */ - bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI); - bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3); - bf_set(wqe_pu, &wqe->generic.wqe_com, 1); - /* Word 10 */ - bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1); - bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0); - bf_set(wqe_qosd, &wqe->generic.wqe_com, 0); - - dma_buf->iocbq->context1 = NULL; - spin_lock(&phba->sli4_hba.sgl_list_lock); - dma_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, dma_buf->iocbq); - spin_unlock(&phba->sli4_hba.sgl_list_lock); - if (!dma_buf->sglq) { - lpfc_sli_release_iocbq(phba, dma_buf->iocbq); - kfree(dma_buf->context); - pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt, - dma_buf->dbuf.phys); - pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, - dma_buf->hbuf.phys); - kfree(dma_buf); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, - "6132 Ran out of nvmet XRIs\n"); - return NULL; - } + dma_buf->total_size = LPFC_NVMET_DATA_BUF_SIZE; return dma_buf; } @@ -705,20 +667,9 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba) void lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab) { - unsigned long flags; - - __lpfc_clear_active_sglq(phba, dmab->sglq->sli4_lxritag); - dmab->sglq->state = SGL_FREED; - dmab->sglq->ndlp = NULL; - - spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, flags); - list_add_tail(&dmab->sglq->list, &phba->sli4_hba.lpfc_nvmet_sgl_list); - spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, flags); - - lpfc_sli_release_iocbq(phba, dmab->iocbq); - kfree(dmab->context); pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); - pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys); + pci_pool_free(phba->lpfc_nvmet_drb_pool, + dmab->dbuf.virt, dmab->dbuf.phys); kfree(dmab); } @@ -803,6 +754,11 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe); if (rc < 0) { (rqbp->rqb_free_buffer)(phba, rqb_entry); + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6409 Cannot post to RQ %d: %x %x\n", + rqb_entry->hrq->queue_id, + rqb_entry->hrq->host_index, + rqb_entry->hrq->hba_index); } else { list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list); rqbp->buffer_count++; diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 8777c2d5f50d..bff3de053df4 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -1944,7 +1944,13 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* Target driver cannot solicit NVME FB. */ if (bf_get_be32(prli_tgt, nvpr)) { + /* Complete the nvme target roles. The transport + * needs to know if the rport is capable of + * discovery in addition to its role. + */ ndlp->nlp_type |= NLP_NVME_TARGET; + if (bf_get_be32(prli_disc, nvpr)) + ndlp->nlp_type |= NLP_NVME_DISCOVERY; if ((bf_get_be32(prli_fba, nvpr) == 1) && (bf_get_be32(prli_fb_sz, nvpr) > 0) && (phba->cfg_nvme_enable_fb) && diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 0488580eea12..074a6b5e7763 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -142,7 +142,7 @@ out: } /** - * lpfc_nvmet_rq_post - Repost a NVMET RQ DMA buffer and clean up context + * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context * @phba: HBA buffer is associated with * @ctxp: context to clean up * @mp: Buffer to free @@ -155,24 +155,113 @@ out: * Returns: None **/ void -lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp, - struct lpfc_dmabuf *mp) +lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) { - if (ctxp) { - if (ctxp->flag) - lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, - "6314 rq_post ctx xri x%x flag x%x\n", - ctxp->oxid, ctxp->flag); - - if (ctxp->txrdy) { - pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy, - ctxp->txrdy_phys); - ctxp->txrdy = NULL; - ctxp->txrdy_phys = 0; +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) + struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context; + struct lpfc_nvmet_tgtport *tgtp; + struct fc_frame_header *fc_hdr; + struct rqb_dmabuf *nvmebuf; + struct lpfc_dmabuf *hbufp; + uint32_t *payload; + uint32_t size, oxid, sid, rc; + unsigned long iflag; + + if (ctxp->txrdy) { + pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy, + ctxp->txrdy_phys); + ctxp->txrdy = NULL; + ctxp->txrdy_phys = 0; + } + ctxp->state = LPFC_NVMET_STE_FREE; + + spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag); + if (phba->sli4_hba.nvmet_io_wait_cnt) { + hbufp = &nvmebuf->hbuf; + list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list, + nvmebuf, struct rqb_dmabuf, + hbuf.list); + phba->sli4_hba.nvmet_io_wait_cnt--; + spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, + iflag); + + fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); + oxid = be16_to_cpu(fc_hdr->fh_ox_id); + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + payload = (uint32_t *)(nvmebuf->dbuf.virt); + size = nvmebuf->bytes_recv; + sid = sli4_sid_from_fc_hdr(fc_hdr); + + ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context; + memset(ctxp, 0, sizeof(ctxp->ctx)); + ctxp->wqeq = NULL; + ctxp->txrdy = NULL; + ctxp->offset = 0; + ctxp->phba = phba; + ctxp->size = size; + ctxp->oxid = oxid; + ctxp->sid = sid; + ctxp->state = LPFC_NVMET_STE_RCV; + ctxp->entry_cnt = 1; + ctxp->flag = 0; + ctxp->ctxbuf = ctx_buf; + spin_lock_init(&ctxp->ctxlock); + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (phba->ktime_on) { + ctxp->ts_cmd_nvme = ktime_get_ns(); + ctxp->ts_isr_cmd = ctxp->ts_cmd_nvme; + ctxp->ts_nvme_data = 0; + ctxp->ts_data_wqput = 0; + ctxp->ts_isr_data = 0; + ctxp->ts_data_nvme = 0; + ctxp->ts_nvme_status = 0; + ctxp->ts_status_wqput = 0; + ctxp->ts_isr_status = 0; + ctxp->ts_status_nvme = 0; } - ctxp->state = LPFC_NVMET_STE_FREE; +#endif + atomic_inc(&tgtp->rcv_fcp_cmd_in); + /* + * The calling sequence should be: + * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done + * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp. + * When we return from nvmet_fc_rcv_fcp_req, all relevant info + * the NVME command / FC header is stored. + * A buffer has already been reposted for this IO, so just free + * the nvmebuf. + */ + rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req, + payload, size); + + /* Process FCP command */ + if (rc == 0) { + atomic_inc(&tgtp->rcv_fcp_cmd_out); + nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); + return; + } + + atomic_inc(&tgtp->rcv_fcp_cmd_drop); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", + ctxp->oxid, rc, + atomic_read(&tgtp->rcv_fcp_cmd_in), + atomic_read(&tgtp->rcv_fcp_cmd_out), + atomic_read(&tgtp->xmt_fcp_release)); + + lpfc_nvmet_defer_release(phba, ctxp); + lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); + nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); + return; } - lpfc_rq_buf_free(phba, mp); + spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag); + + spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag); + list_add_tail(&ctx_buf->list, + &phba->sli4_hba.lpfc_nvmet_ctx_list); + phba->sli4_hba.nvmet_ctx_cnt++; + spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag); +#endif } #ifdef CONFIG_SCSI_LPFC_DEBUG_FS @@ -502,6 +591,7 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport, "6150 LS Drop IO x%x: Prep\n", ctxp->oxid); lpfc_in_buf_free(phba, &nvmebuf->dbuf); + atomic_inc(&nvmep->xmt_ls_abort); lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); return -ENOMEM; @@ -545,6 +635,7 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport, lpfc_nlp_put(nvmewqeq->context1); lpfc_in_buf_free(phba, &nvmebuf->dbuf); + atomic_inc(&nvmep->xmt_ls_abort); lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); return -ENXIO; } @@ -612,9 +703,9 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n", ctxp->oxid, rsp->op, rsp->rsplen); + ctxp->flag |= LPFC_NVMET_IO_INP; rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq); if (rc == WQE_SUCCESS) { - ctxp->flag |= LPFC_NVMET_IO_INP; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (!phba->ktime_on) return 0; @@ -692,6 +783,7 @@ static void lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport, struct nvmefc_tgt_fcp_req *rsp) { + struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private; struct lpfc_nvmet_rcv_ctx *ctxp = container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); struct lpfc_hba *phba = ctxp->phba; @@ -710,10 +802,12 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport, lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d\n", ctxp->oxid, ctxp->state, 0); + atomic_inc(&lpfc_nvmep->xmt_fcp_release); + if (aborting) return; - lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); + lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); } static struct nvmet_fc_target_template lpfc_tgttemplate = { @@ -734,17 +828,128 @@ static struct nvmet_fc_target_template lpfc_tgttemplate = { .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport), }; +void +lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba) +{ + struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf; + unsigned long flags; + + list_for_each_entry_safe( + ctx_buf, next_ctx_buf, + &phba->sli4_hba.lpfc_nvmet_ctx_list, list) { + spin_lock_irqsave( + &phba->sli4_hba.abts_nvme_buf_list_lock, flags); + list_del_init(&ctx_buf->list); + spin_unlock_irqrestore( + &phba->sli4_hba.abts_nvme_buf_list_lock, flags); + __lpfc_clear_active_sglq(phba, + ctx_buf->sglq->sli4_lxritag); + ctx_buf->sglq->state = SGL_FREED; + ctx_buf->sglq->ndlp = NULL; + + spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, flags); + list_add_tail(&ctx_buf->sglq->list, + &phba->sli4_hba.lpfc_nvmet_sgl_list); + spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, + flags); + + lpfc_sli_release_iocbq(phba, ctx_buf->iocbq); + kfree(ctx_buf->context); + } +} + +int +lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) +{ + struct lpfc_nvmet_ctxbuf *ctx_buf; + struct lpfc_iocbq *nvmewqe; + union lpfc_wqe128 *wqe; + int i; + + lpfc_printf_log(phba, KERN_INFO, LOG_NVME, + "6403 Allocate NVMET resources for %d XRIs\n", + phba->sli4_hba.nvmet_xri_cnt); + + /* For all nvmet xris, allocate resources needed to process a + * received command on a per xri basis. + */ + for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) { + ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL); + if (!ctx_buf) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + "6404 Ran out of memory for NVMET\n"); + return -ENOMEM; + } + + ctx_buf->context = kzalloc(sizeof(*ctx_buf->context), + GFP_KERNEL); + if (!ctx_buf->context) { + kfree(ctx_buf); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + "6405 Ran out of NVMET " + "context memory\n"); + return -ENOMEM; + } + ctx_buf->context->ctxbuf = ctx_buf; + + ctx_buf->iocbq = lpfc_sli_get_iocbq(phba); + if (!ctx_buf->iocbq) { + kfree(ctx_buf->context); + kfree(ctx_buf); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + "6406 Ran out of NVMET iocb/WQEs\n"); + return -ENOMEM; + } + ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET; + nvmewqe = ctx_buf->iocbq; + wqe = (union lpfc_wqe128 *)&nvmewqe->wqe; + /* Initialize WQE */ + memset(wqe, 0, sizeof(union lpfc_wqe)); + /* Word 7 */ + bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI); + bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3); + bf_set(wqe_pu, &wqe->generic.wqe_com, 1); + /* Word 10 */ + bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1); + bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0); + bf_set(wqe_qosd, &wqe->generic.wqe_com, 0); + + ctx_buf->iocbq->context1 = NULL; + spin_lock(&phba->sli4_hba.sgl_list_lock); + ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq); + spin_unlock(&phba->sli4_hba.sgl_list_lock); + if (!ctx_buf->sglq) { + lpfc_sli_release_iocbq(phba, ctx_buf->iocbq); + kfree(ctx_buf->context); + kfree(ctx_buf); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + "6407 Ran out of NVMET XRIs\n"); + return -ENOMEM; + } + spin_lock(&phba->sli4_hba.nvmet_io_lock); + list_add_tail(&ctx_buf->list, + &phba->sli4_hba.lpfc_nvmet_ctx_list); + spin_unlock(&phba->sli4_hba.nvmet_io_lock); + } + phba->sli4_hba.nvmet_ctx_cnt = phba->sli4_hba.nvmet_xri_cnt; + return 0; +} + int lpfc_nvmet_create_targetport(struct lpfc_hba *phba) { struct lpfc_vport *vport = phba->pport; struct lpfc_nvmet_tgtport *tgtp; struct nvmet_fc_port_info pinfo; - int error = 0; + int error; if (phba->targetport) return 0; + error = lpfc_nvmet_setup_io_context(phba); + if (error) + return error; + memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info)); pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn); pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn); @@ -772,13 +977,16 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba) &phba->pcidev->dev, &phba->targetport); #else - error = -ENOMEM; + error = -ENOENT; #endif if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, "6025 Cannot register NVME targetport " "x%x\n", error); phba->targetport = NULL; + + lpfc_nvmet_cleanup_io_context(phba); + } else { tgtp = (struct lpfc_nvmet_tgtport *) phba->targetport->private; @@ -795,6 +1003,7 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba) atomic_set(&tgtp->rcv_ls_req_out, 0); atomic_set(&tgtp->rcv_ls_req_drop, 0); atomic_set(&tgtp->xmt_ls_abort, 0); + atomic_set(&tgtp->xmt_ls_abort_cmpl, 0); atomic_set(&tgtp->xmt_ls_rsp, 0); atomic_set(&tgtp->xmt_ls_drop, 0); atomic_set(&tgtp->xmt_ls_rsp_error, 0); @@ -802,18 +1011,21 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba) atomic_set(&tgtp->rcv_fcp_cmd_in, 0); atomic_set(&tgtp->rcv_fcp_cmd_out, 0); atomic_set(&tgtp->rcv_fcp_cmd_drop, 0); - atomic_set(&tgtp->xmt_fcp_abort, 0); atomic_set(&tgtp->xmt_fcp_drop, 0); atomic_set(&tgtp->xmt_fcp_read_rsp, 0); atomic_set(&tgtp->xmt_fcp_read, 0); atomic_set(&tgtp->xmt_fcp_write, 0); atomic_set(&tgtp->xmt_fcp_rsp, 0); + atomic_set(&tgtp->xmt_fcp_release, 0); atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0); atomic_set(&tgtp->xmt_fcp_rsp_error, 0); atomic_set(&tgtp->xmt_fcp_rsp_drop, 0); + atomic_set(&tgtp->xmt_fcp_abort, 0); + atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0); + atomic_set(&tgtp->xmt_abort_unsol, 0); + atomic_set(&tgtp->xmt_abort_sol, 0); atomic_set(&tgtp->xmt_abort_rsp, 0); atomic_set(&tgtp->xmt_abort_rsp_error, 0); - atomic_set(&tgtp->xmt_abort_cmpl, 0); } return error; } @@ -864,7 +1076,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, list_for_each_entry_safe(ctxp, next_ctxp, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, list) { - if (ctxp->rqb_buffer->sglq->sli4_xritag != xri) + if (ctxp->ctxbuf->sglq->sli4_xritag != xri) continue; /* Check if we already received a free context call @@ -885,7 +1097,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE || ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { lpfc_set_rrq_active(phba, ndlp, - ctxp->rqb_buffer->sglq->sli4_lxritag, + ctxp->ctxbuf->sglq->sli4_lxritag, rxid, 1); lpfc_sli4_abts_err_handler(phba, ndlp, axri); } @@ -894,8 +1106,8 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, "6318 XB aborted %x flg x%x (%x)\n", ctxp->oxid, ctxp->flag, released); if (released) - lpfc_nvmet_rq_post(phba, ctxp, - &ctxp->rqb_buffer->hbuf); + lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); + if (rrq_empty) lpfc_worker_wake_up(phba); return; @@ -923,7 +1135,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, list_for_each_entry_safe(ctxp, next_ctxp, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, list) { - if (ctxp->rqb_buffer->sglq->sli4_xritag != xri) + if (ctxp->ctxbuf->sglq->sli4_xritag != xri) continue; spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); @@ -975,6 +1187,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) init_completion(&tgtp->tport_unreg_done); nvmet_fc_unregister_targetport(phba->targetport); wait_for_completion_timeout(&tgtp->tport_unreg_done, 5); + lpfc_nvmet_cleanup_io_context(phba); } phba->targetport = NULL; #endif @@ -1010,6 +1223,7 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, oxid = 0; size = 0; sid = 0; + ctxp = NULL; goto dropit; } @@ -1104,39 +1318,71 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp; struct lpfc_nvmet_tgtport *tgtp; struct fc_frame_header *fc_hdr; + struct lpfc_nvmet_ctxbuf *ctx_buf; uint32_t *payload; - uint32_t size, oxid, sid, rc; + uint32_t size, oxid, sid, rc, qno; + unsigned long iflag; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS uint32_t id; #endif + ctx_buf = NULL; if (!nvmebuf || !phba->targetport) { lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, - "6157 FCP Drop IO\n"); + "6157 NVMET FCP Drop IO\n"); oxid = 0; size = 0; sid = 0; + ctxp = NULL; goto dropit; } + spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag); + if (phba->sli4_hba.nvmet_ctx_cnt) { + list_remove_head(&phba->sli4_hba.lpfc_nvmet_ctx_list, + ctx_buf, struct lpfc_nvmet_ctxbuf, list); + phba->sli4_hba.nvmet_ctx_cnt--; + } + spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag); - tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; - payload = (uint32_t *)(nvmebuf->dbuf.virt); fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); - size = nvmebuf->bytes_recv; oxid = be16_to_cpu(fc_hdr->fh_ox_id); - sid = sli4_sid_from_fc_hdr(fc_hdr); + size = nvmebuf->bytes_recv; - ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmebuf->context; - if (ctxp == NULL) { - atomic_inc(&tgtp->rcv_fcp_cmd_drop); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, - "6158 FCP Drop IO x%x: Alloc\n", - oxid); - lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf); - /* Cannot send ABTS without context */ +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) { + id = smp_processor_id(); + if (id < LPFC_CHECK_CPU_CNT) + phba->cpucheck_rcv_io[id]++; + } +#endif + + lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n", + oxid, size, smp_processor_id()); + + if (!ctx_buf) { + /* Queue this NVME IO to process later */ + spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag); + list_add_tail(&nvmebuf->hbuf.list, + &phba->sli4_hba.lpfc_nvmet_io_wait_list); + phba->sli4_hba.nvmet_io_wait_cnt++; + phba->sli4_hba.nvmet_io_wait_total++; + spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, + iflag); + + /* Post a brand new DMA buffer to RQ */ + qno = nvmebuf->idx; + lpfc_post_rq_buffer( + phba, phba->sli4_hba.nvmet_mrq_hdr[qno], + phba->sli4_hba.nvmet_mrq_data[qno], 1, qno); return; } + + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + payload = (uint32_t *)(nvmebuf->dbuf.virt); + sid = sli4_sid_from_fc_hdr(fc_hdr); + + ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context; memset(ctxp, 0, sizeof(ctxp->ctx)); ctxp->wqeq = NULL; ctxp->txrdy = NULL; @@ -1146,9 +1392,9 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, ctxp->oxid = oxid; ctxp->sid = sid; ctxp->state = LPFC_NVMET_STE_RCV; - ctxp->rqb_buffer = nvmebuf; ctxp->entry_cnt = 1; ctxp->flag = 0; + ctxp->ctxbuf = ctx_buf; spin_lock_init(&ctxp->ctxlock); #ifdef CONFIG_SCSI_LPFC_DEBUG_FS @@ -1164,22 +1410,16 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, ctxp->ts_isr_status = 0; ctxp->ts_status_nvme = 0; } - - if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) { - id = smp_processor_id(); - if (id < LPFC_CHECK_CPU_CNT) - phba->cpucheck_rcv_io[id]++; - } #endif - lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n", - oxid, size, smp_processor_id()); - atomic_inc(&tgtp->rcv_fcp_cmd_in); /* * The calling sequence should be: * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp. + * When we return from nvmet_fc_rcv_fcp_req, all relevant info in + * the NVME command / FC header is stored, so we are free to repost + * the buffer. */ rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req, payload, size); @@ -1187,26 +1427,32 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, /* Process FCP command */ if (rc == 0) { atomic_inc(&tgtp->rcv_fcp_cmd_out); + lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ return; } atomic_inc(&tgtp->rcv_fcp_cmd_drop); lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, - "6159 FCP Drop IO x%x: err x%x\n", - ctxp->oxid, rc); + "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", + ctxp->oxid, rc, + atomic_read(&tgtp->rcv_fcp_cmd_in), + atomic_read(&tgtp->rcv_fcp_cmd_out), + atomic_read(&tgtp->xmt_fcp_release)); dropit: lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n", oxid, size, sid); if (oxid) { + lpfc_nvmet_defer_release(phba, ctxp); lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); + lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ return; } - if (nvmebuf) { - nvmebuf->iocbq->hba_wqidx = 0; - /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */ - lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf); - } + if (ctx_buf) + lpfc_nvmet_ctxbuf_post(phba, ctx_buf); + + if (nvmebuf) + lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ #endif } @@ -1258,7 +1504,7 @@ lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, uint64_t isr_timestamp) { if (phba->nvmet_support == 0) { - lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf); + lpfc_rq_buf_free(phba, &nvmebuf->hbuf); return; } lpfc_nvmet_unsol_fcp_buffer(phba, pring, nvmebuf, @@ -1459,7 +1705,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, nvmewqe = ctxp->wqeq; if (nvmewqe == NULL) { /* Allocate buffer for command wqe */ - nvmewqe = ctxp->rqb_buffer->iocbq; + nvmewqe = ctxp->ctxbuf->iocbq; if (nvmewqe == NULL) { lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, "6110 lpfc_nvmet_prep_fcp_wqe: No " @@ -1486,7 +1732,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, return NULL; } - sgl = (struct sli4_sge *)ctxp->rqb_buffer->sglq->sgl; + sgl = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl; switch (rsp->op) { case NVMET_FCOP_READDATA: case NVMET_FCOP_READDATA_RSP: @@ -1811,7 +2057,8 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, result = wcqe->parameter; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; - atomic_inc(&tgtp->xmt_abort_cmpl); + if (ctxp->flag & LPFC_NVMET_ABORT_OP) + atomic_inc(&tgtp->xmt_fcp_abort_cmpl); ctxp->state = LPFC_NVMET_STE_DONE; @@ -1826,6 +2073,7 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, } ctxp->flag &= ~LPFC_NVMET_ABORT_OP; spin_unlock_irqrestore(&ctxp->ctxlock, flags); + atomic_inc(&tgtp->xmt_abort_rsp); lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, "6165 ABORT cmpl: xri x%x flg x%x (%d) " @@ -1834,15 +2082,16 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, wcqe->word0, wcqe->total_data_placed, result, wcqe->word3); + cmdwqe->context2 = NULL; + cmdwqe->context3 = NULL; /* * if transport has released ctx, then can reuse it. Otherwise, * will be recycled by transport release call. */ if (released) - lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); + lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); - cmdwqe->context2 = NULL; - cmdwqe->context3 = NULL; + /* This is the iocbq for the abort, not the command */ lpfc_sli_release_iocbq(phba, cmdwqe); /* Since iaab/iaar are NOT set, there is no work left. @@ -1876,7 +2125,8 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, result = wcqe->parameter; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; - atomic_inc(&tgtp->xmt_abort_cmpl); + if (ctxp->flag & LPFC_NVMET_ABORT_OP) + atomic_inc(&tgtp->xmt_fcp_abort_cmpl); if (!ctxp) { /* if context is clear, related io alrady complete */ @@ -1906,6 +2156,7 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, } ctxp->flag &= ~LPFC_NVMET_ABORT_OP; spin_unlock_irqrestore(&ctxp->ctxlock, flags); + atomic_inc(&tgtp->xmt_abort_rsp); lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6316 ABTS cmpl xri x%x flg x%x (%x) " @@ -1913,15 +2164,15 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, ctxp->oxid, ctxp->flag, released, wcqe->word0, wcqe->total_data_placed, result, wcqe->word3); + + cmdwqe->context2 = NULL; + cmdwqe->context3 = NULL; /* * if transport has released ctx, then can reuse it. Otherwise, * will be recycled by transport release call. */ if (released) - lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); - - cmdwqe->context2 = NULL; - cmdwqe->context3 = NULL; + lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); /* Since iaab/iaar are NOT set, there is no work left. * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted @@ -1952,7 +2203,7 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, result = wcqe->parameter; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; - atomic_inc(&tgtp->xmt_abort_cmpl); + atomic_inc(&tgtp->xmt_ls_abort_cmpl); lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6083 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n", @@ -1983,10 +2234,6 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba, sid, xri, ctxp->wqeq->sli4_xritag); tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; - if (!ctxp->wqeq) { - ctxp->wqeq = ctxp->rqb_buffer->iocbq; - ctxp->wqeq->hba_wqidx = 0; - } ndlp = lpfc_findnode_did(phba->pport, sid); if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || @@ -2082,7 +2329,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; if (!ctxp->wqeq) { - ctxp->wqeq = ctxp->rqb_buffer->iocbq; + ctxp->wqeq = ctxp->ctxbuf->iocbq; ctxp->wqeq->hba_wqidx = 0; } @@ -2103,6 +2350,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, /* Issue ABTS for this WQE based on iotag */ ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba); if (!ctxp->abort_wqeq) { + atomic_inc(&tgtp->xmt_abort_rsp_error); lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, "6161 ABORT failed: No wqeqs: " "xri: x%x\n", ctxp->oxid); @@ -2127,6 +2375,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, /* driver queued commands are in process of being flushed */ if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) { spin_unlock_irqrestore(&phba->hbalock, flags); + atomic_inc(&tgtp->xmt_abort_rsp_error); lpfc_printf_log(phba, KERN_ERR, LOG_NVME, "6163 Driver in reset cleanup - flushing " "NVME Req now. hba_flag x%x oxid x%x\n", @@ -2139,6 +2388,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, /* Outstanding abort is in progress */ if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) { spin_unlock_irqrestore(&phba->hbalock, flags); + atomic_inc(&tgtp->xmt_abort_rsp_error); lpfc_printf_log(phba, KERN_ERR, LOG_NVME, "6164 Outstanding NVME I/O Abort Request " "still pending on oxid x%x\n", @@ -2189,9 +2439,12 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, abts_wqeq->context2 = ctxp; rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq); spin_unlock_irqrestore(&phba->hbalock, flags); - if (rc == WQE_SUCCESS) + if (rc == WQE_SUCCESS) { + atomic_inc(&tgtp->xmt_abort_sol); return 0; + } + atomic_inc(&tgtp->xmt_abort_rsp_error); ctxp->flag &= ~LPFC_NVMET_ABORT_OP; lpfc_sli_release_iocbq(phba, abts_wqeq); lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, @@ -2214,7 +2467,7 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; if (!ctxp->wqeq) { - ctxp->wqeq = ctxp->rqb_buffer->iocbq; + ctxp->wqeq = ctxp->ctxbuf->iocbq; ctxp->wqeq->hba_wqidx = 0; } @@ -2230,11 +2483,11 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq); spin_unlock_irqrestore(&phba->hbalock, flags); if (rc == WQE_SUCCESS) { - atomic_inc(&tgtp->xmt_abort_rsp); return 0; } aerr: + atomic_inc(&tgtp->xmt_abort_rsp_error); ctxp->flag &= ~LPFC_NVMET_ABORT_OP; atomic_inc(&tgtp->xmt_abort_rsp_error); lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, @@ -2269,6 +2522,7 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba, } abts_wqeq = ctxp->wqeq; wqe_abts = &abts_wqeq->wqe; + lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri); spin_lock_irqsave(&phba->hbalock, flags); @@ -2278,7 +2532,7 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba, rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq); spin_unlock_irqrestore(&phba->hbalock, flags); if (rc == WQE_SUCCESS) { - atomic_inc(&tgtp->xmt_abort_rsp); + atomic_inc(&tgtp->xmt_abort_unsol); return 0; } diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h index 128759fe6650..6eb2f5d8d4ed 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.h +++ b/drivers/scsi/lpfc/lpfc_nvmet.h @@ -22,6 +22,7 @@ ********************************************************************/ #define LPFC_NVMET_DEFAULT_SEGS (64 + 1) /* 256K IOs */ +#define LPFC_NVMET_RQE_DEF_COUNT 512 #define LPFC_NVMET_SUCCESS_LEN 12 /* Used for NVME Target */ @@ -34,6 +35,7 @@ struct lpfc_nvmet_tgtport { atomic_t rcv_ls_req_out; atomic_t rcv_ls_req_drop; atomic_t xmt_ls_abort; + atomic_t xmt_ls_abort_cmpl; /* Stats counters - lpfc_nvmet_xmt_ls_rsp */ atomic_t xmt_ls_rsp; @@ -47,9 +49,9 @@ struct lpfc_nvmet_tgtport { atomic_t rcv_fcp_cmd_in; atomic_t rcv_fcp_cmd_out; atomic_t rcv_fcp_cmd_drop; + atomic_t xmt_fcp_release; /* Stats counters - lpfc_nvmet_xmt_fcp_op */ - atomic_t xmt_fcp_abort; atomic_t xmt_fcp_drop; atomic_t xmt_fcp_read_rsp; atomic_t xmt_fcp_read; @@ -62,12 +64,13 @@ struct lpfc_nvmet_tgtport { atomic_t xmt_fcp_rsp_drop; - /* Stats counters - lpfc_nvmet_unsol_issue_abort */ + /* Stats counters - lpfc_nvmet_xmt_fcp_abort */ + atomic_t xmt_fcp_abort; + atomic_t xmt_fcp_abort_cmpl; + atomic_t xmt_abort_sol; + atomic_t xmt_abort_unsol; atomic_t xmt_abort_rsp; atomic_t xmt_abort_rsp_error; - - /* Stats counters - lpfc_nvmet_xmt_abort_cmp */ - atomic_t xmt_abort_cmpl; }; struct lpfc_nvmet_rcv_ctx { @@ -103,6 +106,7 @@ struct lpfc_nvmet_rcv_ctx { #define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */ #define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */ struct rqb_dmabuf *rqb_buffer; + struct lpfc_nvmet_ctxbuf *ctxbuf; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS uint64_t ts_isr_cmd; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 2a4fc00dfa9b..d6b184839bc2 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -74,6 +74,8 @@ static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *, struct lpfc_iocbq *); static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, struct hbq_dmabuf *); +static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, + struct hbq_dmabuf *dmabuf); static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *, struct lpfc_cqe *); static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *, @@ -479,22 +481,23 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, if (unlikely(!hq) || unlikely(!dq)) return -ENOMEM; put_index = hq->host_index; - temp_hrqe = hq->qe[hq->host_index].rqe; + temp_hrqe = hq->qe[put_index].rqe; temp_drqe = dq->qe[dq->host_index].rqe; if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) return -EINVAL; - if (hq->host_index != dq->host_index) + if (put_index != dq->host_index) return -EINVAL; /* If the host has not yet processed the next entry then we are done */ - if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index) + if (((put_index + 1) % hq->entry_count) == hq->hba_index) return -EBUSY; lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); /* Update the host index to point to the next slot */ - hq->host_index = ((hq->host_index + 1) % hq->entry_count); + hq->host_index = ((put_index + 1) % hq->entry_count); dq->host_index = ((dq->host_index + 1) % dq->entry_count); + hq->RQ_buf_posted++; /* Ring The Header Receive Queue Doorbell */ if (!(hq->host_index % hq->entry_repost)) { @@ -5906,7 +5909,7 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox, bf_set(lpfc_mbx_set_feature_mds, &mbox->u.mqe.un.set_feature, 1); bf_set(lpfc_mbx_set_feature_mds_deep_loopbk, - &mbox->u.mqe.un.set_feature, 0); + &mbox->u.mqe.un.set_feature, 1); mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS; mbox->u.mqe.un.set_feature.param_len = 8; break; @@ -6512,6 +6515,50 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC"); } +int +lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, + struct lpfc_queue *drq, int count, int idx) +{ + int rc, i; + struct lpfc_rqe hrqe; + struct lpfc_rqe drqe; + struct lpfc_rqb *rqbp; + struct rqb_dmabuf *rqb_buffer; + LIST_HEAD(rqb_buf_list); + + rqbp = hrq->rqbp; + for (i = 0; i < count; i++) { + /* IF RQ is already full, don't bother */ + if (rqbp->buffer_count + i >= rqbp->entry_count - 1) + break; + rqb_buffer = rqbp->rqb_alloc_buffer(phba); + if (!rqb_buffer) + break; + rqb_buffer->hrq = hrq; + rqb_buffer->drq = drq; + rqb_buffer->idx = idx; + list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list); + } + while (!list_empty(&rqb_buf_list)) { + list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf, + hbuf.list); + + hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys); + hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys); + drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys); + drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys); + rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); + if (rc < 0) { + rqbp->rqb_free_buffer(phba, rqb_buffer); + } else { + list_add_tail(&rqb_buffer->hbuf.list, + &rqbp->rqb_buffer_list); + rqbp->buffer_count++; + } + } + return 1; +} + /** * lpfc_sli4_hba_setup - SLI4 device initialization PCI function * @phba: Pointer to HBA context object. @@ -6524,7 +6571,7 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) int lpfc_sli4_hba_setup(struct lpfc_hba *phba) { - int rc, i; + int rc, i, cnt; LPFC_MBOXQ_t *mboxq; struct lpfc_mqe *mqe; uint8_t *vpd; @@ -6875,6 +6922,21 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) goto out_destroy_queue; } phba->sli4_hba.nvmet_xri_cnt = rc; + + cnt = phba->cfg_iocb_cnt * 1024; + /* We need 1 iocbq for every SGL, for IO processing */ + cnt += phba->sli4_hba.nvmet_xri_cnt; + /* Initialize and populate the iocb list per host */ + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2821 initialize iocb list %d total %d\n", + phba->cfg_iocb_cnt, cnt); + rc = lpfc_init_iocb_list(phba, cnt); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1413 Failed to init iocb list.\n"); + goto out_destroy_queue; + } + lpfc_nvmet_create_targetport(phba); } else { /* update host scsi xri-sgl sizes and mappings */ @@ -6894,28 +6956,34 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) "and mapping: %d\n", rc); goto out_destroy_queue; } + + cnt = phba->cfg_iocb_cnt * 1024; + /* Initialize and populate the iocb list per host */ + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2820 initialize iocb list %d total %d\n", + phba->cfg_iocb_cnt, cnt); + rc = lpfc_init_iocb_list(phba, cnt); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6301 Failed to init iocb list.\n"); + goto out_destroy_queue; + } } if (phba->nvmet_support && phba->cfg_nvmet_mrq) { - /* Post initial buffers to all RQs created */ for (i = 0; i < phba->cfg_nvmet_mrq; i++) { rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp; INIT_LIST_HEAD(&rqbp->rqb_buffer_list); rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc; rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free; - rqbp->entry_count = 256; + rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT; rqbp->buffer_count = 0; - /* Divide by 4 and round down to multiple of 16 */ - rc = (phba->cfg_nvmet_mrq_post >> 2) & 0xfff8; - phba->sli4_hba.nvmet_mrq_hdr[i]->entry_repost = rc; - phba->sli4_hba.nvmet_mrq_data[i]->entry_repost = rc; - lpfc_post_rq_buffer( phba, phba->sli4_hba.nvmet_mrq_hdr[i], phba->sli4_hba.nvmet_mrq_data[i], - phba->cfg_nvmet_mrq_post); + LPFC_NVMET_RQE_DEF_COUNT, i); } } @@ -7082,6 +7150,7 @@ out_unset_queue: /* Unset all the queues set up in this routine when error out */ lpfc_sli4_queue_unset(phba); out_destroy_queue: + lpfc_free_iocb_list(phba); lpfc_sli4_queue_destroy(phba); out_stop_timers: lpfc_stop_hba_timers(phba); @@ -8621,8 +8690,11 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, memset(wqe, 0, sizeof(union lpfc_wqe128)); /* Some of the fields are in the right position already */ memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); - wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */ - wqe->generic.wqe_com.word10 = 0; + if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) { + /* The ct field has moved so reset */ + wqe->generic.wqe_com.word7 = 0; + wqe->generic.wqe_com.word10 = 0; + } abort_tag = (uint32_t) iocbq->iotag; xritag = iocbq->sli4_xritag; @@ -9116,6 +9188,10 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, } break; + case CMD_SEND_FRAME: + bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); + bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); + return 0; case CMD_XRI_ABORTED_CX: case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ @@ -12788,6 +12864,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) struct fc_frame_header *fc_hdr; struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; struct lpfc_queue *drq = phba->sli4_hba.dat_rq; + struct lpfc_nvmet_tgtport *tgtp; struct hbq_dmabuf *dma_buf; uint32_t status, rq_id; unsigned long iflags; @@ -12808,7 +12885,6 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) case FC_STATUS_RQ_BUF_LEN_EXCEEDED: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2537 Receive Frame Truncated!!\n"); - hrq->RQ_buf_trunc++; case FC_STATUS_RQ_SUCCESS: lpfc_sli4_rq_release(hrq, drq); spin_lock_irqsave(&phba->hbalock, iflags); @@ -12819,6 +12895,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) goto out; } hrq->RQ_rcv_buf++; + hrq->RQ_buf_posted--; memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); /* If a NVME LS event (type 0x28), treat it as Fast path */ @@ -12832,8 +12909,21 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) spin_unlock_irqrestore(&phba->hbalock, iflags); workposted = true; break; - case FC_STATUS_INSUFF_BUF_NEED_BUF: case FC_STATUS_INSUFF_BUF_FRM_DISC: + if (phba->nvmet_support) { + tgtp = phba->targetport->private; + lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME, + "6402 RQE Error x%x, posted %d err_cnt " + "%d: %x %x %x\n", + status, hrq->RQ_buf_posted, + hrq->RQ_no_posted_buf, + atomic_read(&tgtp->rcv_fcp_cmd_in), + atomic_read(&tgtp->rcv_fcp_cmd_out), + atomic_read(&tgtp->xmt_fcp_release)); + } + /* fallthrough */ + + case FC_STATUS_INSUFF_BUF_NEED_BUF: hrq->RQ_no_posted_buf++; /* Post more buffers if possible */ spin_lock_irqsave(&phba->hbalock, iflags); @@ -12951,7 +13041,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, while ((cqe = lpfc_sli4_cq_get(cq))) { workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe); if (!(++ecount % cq->entry_repost)) - lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); + break; cq->CQ_mbox++; } break; @@ -12965,7 +13055,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, cqe); if (!(++ecount % cq->entry_repost)) - lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); + break; } /* Track the max number of CQEs processed in 1 EQ */ @@ -13135,6 +13225,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_queue *drq; struct rqb_dmabuf *dma_buf; struct fc_frame_header *fc_hdr; + struct lpfc_nvmet_tgtport *tgtp; uint32_t status, rq_id; unsigned long iflags; uint32_t fctl, idx; @@ -13165,8 +13256,6 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, case FC_STATUS_RQ_BUF_LEN_EXCEEDED: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "6126 Receive Frame Truncated!!\n"); - hrq->RQ_buf_trunc++; - break; case FC_STATUS_RQ_SUCCESS: lpfc_sli4_rq_release(hrq, drq); spin_lock_irqsave(&phba->hbalock, iflags); @@ -13178,6 +13267,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, } spin_unlock_irqrestore(&phba->hbalock, iflags); hrq->RQ_rcv_buf++; + hrq->RQ_buf_posted--; fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; /* Just some basic sanity checks on FCP Command frame */ @@ -13200,14 +13290,23 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, drop: lpfc_in_buf_free(phba, &dma_buf->dbuf); break; - case FC_STATUS_INSUFF_BUF_NEED_BUF: case FC_STATUS_INSUFF_BUF_FRM_DISC: + if (phba->nvmet_support) { + tgtp = phba->targetport->private; + lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME, + "6401 RQE Error x%x, posted %d err_cnt " + "%d: %x %x %x\n", + status, hrq->RQ_buf_posted, + hrq->RQ_no_posted_buf, + atomic_read(&tgtp->rcv_fcp_cmd_in), + atomic_read(&tgtp->rcv_fcp_cmd_out), + atomic_read(&tgtp->xmt_fcp_release)); + } + /* fallthrough */ + + case FC_STATUS_INSUFF_BUF_NEED_BUF: hrq->RQ_no_posted_buf++; /* Post more buffers if possible */ - spin_lock_irqsave(&phba->hbalock, iflags); - phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; - spin_unlock_irqrestore(&phba->hbalock, iflags); - workposted = true; break; } out: @@ -13361,7 +13460,7 @@ process_cq: while ((cqe = lpfc_sli4_cq_get(cq))) { workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe); if (!(++ecount % cq->entry_repost)) - lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); + break; } /* Track the max number of CQEs processed in 1 EQ */ @@ -13452,7 +13551,7 @@ lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) while ((cqe = lpfc_sli4_cq_get(cq))) { workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe); if (!(++ecount % cq->entry_repost)) - lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); + break; } /* Track the max number of CQEs processed in 1 EQ */ @@ -13534,7 +13633,7 @@ lpfc_sli4_fof_intr_handler(int irq, void *dev_id) while ((eqe = lpfc_sli4_eq_get(eq))) { lpfc_sli4_fof_handle_eqe(phba, eqe); if (!(++ecount % eq->entry_repost)) - lpfc_sli4_eq_release(eq, LPFC_QUEUE_NOARM); + break; eq->EQ_processed++; } @@ -13651,7 +13750,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx); if (!(++ecount % fpeq->entry_repost)) - lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM); + break; fpeq->EQ_processed++; } @@ -13832,17 +13931,10 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size, } queue->entry_size = entry_size; queue->entry_count = entry_count; - - /* - * entry_repost is calculated based on the number of entries in the - * queue. This works out except for RQs. If buffers are NOT initially - * posted for every RQE, entry_repost should be adjusted accordingly. - */ - queue->entry_repost = (entry_count >> 3); - if (queue->entry_repost < LPFC_QUEUE_MIN_REPOST) - queue->entry_repost = LPFC_QUEUE_MIN_REPOST; queue->phba = phba; + /* entry_repost will be set during q creation */ + return queue; out_fail: lpfc_sli4_queue_free(queue); @@ -14073,6 +14165,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) status = -ENXIO; eq->host_index = 0; eq->hba_index = 0; + eq->entry_repost = LPFC_EQ_REPOST; mempool_free(mbox, phba->mbox_mem_pool); return status; @@ -14146,9 +14239,9 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, default: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0361 Unsupported CQ count: " - "entry cnt %d sz %d pg cnt %d repost %d\n", + "entry cnt %d sz %d pg cnt %d\n", cq->entry_count, cq->entry_size, - cq->page_count, cq->entry_repost); + cq->page_count); if (cq->entry_count < 256) { status = -EINVAL; goto out; @@ -14201,6 +14294,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, cq->assoc_qid = eq->queue_id; cq->host_index = 0; cq->hba_index = 0; + cq->entry_repost = LPFC_CQ_REPOST; out: mempool_free(mbox, phba->mbox_mem_pool); @@ -14392,6 +14486,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, cq->assoc_qid = eq->queue_id; cq->host_index = 0; cq->hba_index = 0; + cq->entry_repost = LPFC_CQ_REPOST; rc = 0; list_for_each_entry(dmabuf, &cq->page_list, list) { @@ -14640,6 +14735,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, mq->subtype = subtype; mq->host_index = 0; mq->hba_index = 0; + mq->entry_repost = LPFC_MQ_REPOST; /* link the mq onto the parent cq child list */ list_add_tail(&mq->list, &cq->child_list); @@ -14865,34 +14961,6 @@ out: } /** - * lpfc_rq_adjust_repost - Adjust entry_repost for an RQ - * @phba: HBA structure that indicates port to create a queue on. - * @rq: The queue structure to use for the receive queue. - * @qno: The associated HBQ number - * - * - * For SLI4 we need to adjust the RQ repost value based on - * the number of buffers that are initially posted to the RQ. - */ -void -lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno) -{ - uint32_t cnt; - - /* sanity check on queue memory */ - if (!rq) - return; - cnt = lpfc_hbq_defs[qno]->entry_count; - - /* Recalc repost for RQs based on buffers initially posted */ - cnt = (cnt >> 3); - if (cnt < LPFC_QUEUE_MIN_REPOST) - cnt = LPFC_QUEUE_MIN_REPOST; - - rq->entry_repost = cnt; -} - -/** * lpfc_rq_create - Create a Receive Queue on the HBA * @phba: HBA structure that indicates port to create a queue on. * @hrq: The queue structure to use to create the header receive queue. @@ -15077,6 +15145,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, hrq->subtype = subtype; hrq->host_index = 0; hrq->hba_index = 0; + hrq->entry_repost = LPFC_RQ_REPOST; /* now create the data queue */ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, @@ -15087,7 +15156,12 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { bf_set(lpfc_rq_context_rqe_count_1, &rq_create->u.request.context, hrq->entry_count); - rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE; + if (subtype == LPFC_NVMET) + rq_create->u.request.context.buffer_size = + LPFC_NVMET_DATA_BUF_SIZE; + else + rq_create->u.request.context.buffer_size = + LPFC_DATA_BUF_SIZE; bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context, LPFC_RQE_SIZE_8); bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context, @@ -15124,8 +15198,14 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, LPFC_RQ_RING_SIZE_4096); break; } - bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, - LPFC_DATA_BUF_SIZE); + if (subtype == LPFC_NVMET) + bf_set(lpfc_rq_context_buf_size, + &rq_create->u.request.context, + LPFC_NVMET_DATA_BUF_SIZE); + else + bf_set(lpfc_rq_context_buf_size, + &rq_create->u.request.context, + LPFC_DATA_BUF_SIZE); } bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, cq->queue_id); @@ -15158,6 +15238,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, drq->subtype = subtype; drq->host_index = 0; drq->hba_index = 0; + drq->entry_repost = LPFC_RQ_REPOST; /* link the header and data RQs onto the parent cq child list */ list_add_tail(&hrq->list, &cq->child_list); @@ -15270,7 +15351,7 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, cq->queue_id); bf_set(lpfc_rq_context_data_size, &rq_create->u.request.context, - LPFC_DATA_BUF_SIZE); + LPFC_NVMET_DATA_BUF_SIZE); bf_set(lpfc_rq_context_hdr_size, &rq_create->u.request.context, LPFC_HDR_BUF_SIZE); @@ -15315,6 +15396,7 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, hrq->subtype = subtype; hrq->host_index = 0; hrq->hba_index = 0; + hrq->entry_repost = LPFC_RQ_REPOST; drq->db_format = LPFC_DB_RING_FORMAT; drq->db_regaddr = phba->sli4_hba.RQDBregaddr; @@ -15323,6 +15405,7 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, drq->subtype = subtype; drq->host_index = 0; drq->hba_index = 0; + drq->entry_repost = LPFC_RQ_REPOST; list_add_tail(&hrq->list, &cq->child_list); list_add_tail(&drq->list, &cq->child_list); @@ -16063,6 +16146,8 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) struct fc_vft_header *fc_vft_hdr; uint32_t *header = (uint32_t *) fc_hdr; +#define FC_RCTL_MDS_DIAGS 0xF4 + switch (fc_hdr->fh_r_ctl) { case FC_RCTL_DD_UNCAT: /* uncategorized information */ case FC_RCTL_DD_SOL_DATA: /* solicited data */ @@ -16090,6 +16175,7 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) case FC_RCTL_F_BSY: /* fabric busy to data frame */ case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ case FC_RCTL_LCR: /* link credit reset */ + case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */ case FC_RCTL_END: /* end */ break; case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ @@ -16099,12 +16185,16 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) default: goto drop; } + +#define FC_TYPE_VENDOR_UNIQUE 0xFF + switch (fc_hdr->fh_type) { case FC_TYPE_BLS: case FC_TYPE_ELS: case FC_TYPE_FCP: case FC_TYPE_CT: case FC_TYPE_NVME: + case FC_TYPE_VENDOR_UNIQUE: break; case FC_TYPE_IP: case FC_TYPE_ILS: @@ -16115,12 +16205,14 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "2538 Received frame rctl:%s (x%x), type:%s (x%x), " "frame Data:%08x %08x %08x %08x %08x %08x %08x\n", + (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS) ? "MDS Diags" : lpfc_rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl, - lpfc_type_names[fc_hdr->fh_type], fc_hdr->fh_type, - be32_to_cpu(header[0]), be32_to_cpu(header[1]), - be32_to_cpu(header[2]), be32_to_cpu(header[3]), - be32_to_cpu(header[4]), be32_to_cpu(header[5]), - be32_to_cpu(header[6])); + (fc_hdr->fh_type == FC_TYPE_VENDOR_UNIQUE) ? + "Vendor Unique" : lpfc_type_names[fc_hdr->fh_type], + fc_hdr->fh_type, be32_to_cpu(header[0]), + be32_to_cpu(header[1]), be32_to_cpu(header[2]), + be32_to_cpu(header[3]), be32_to_cpu(header[4]), + be32_to_cpu(header[5]), be32_to_cpu(header[6])); return 0; drop: lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, @@ -16926,6 +17018,96 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, lpfc_sli_release_iocbq(phba, iocbq); } +static void +lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_dmabuf *pcmd = cmdiocb->context2; + + if (pcmd && pcmd->virt) + pci_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); + kfree(pcmd); + lpfc_sli_release_iocbq(phba, cmdiocb); +} + +static void +lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, + struct hbq_dmabuf *dmabuf) +{ + struct fc_frame_header *fc_hdr; + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *iocbq = NULL; + union lpfc_wqe *wqe; + struct lpfc_dmabuf *pcmd = NULL; + uint32_t frame_len; + int rc; + + fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; + frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl); + + /* Send the received frame back */ + iocbq = lpfc_sli_get_iocbq(phba); + if (!iocbq) + goto exit; + + /* Allocate buffer for command payload */ + pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (pcmd) + pcmd->virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, + &pcmd->phys); + if (!pcmd || !pcmd->virt) + goto exit; + + INIT_LIST_HEAD(&pcmd->list); + + /* copyin the payload */ + memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len); + + /* fill in BDE's for command */ + iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys); + iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys); + iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64; + iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len; + + iocbq->context2 = pcmd; + iocbq->vport = vport; + iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK; + iocbq->iocb_flag |= LPFC_USE_FCPWQIDX; + + /* + * Setup rest of the iocb as though it were a WQE + * Build the SEND_FRAME WQE + */ + wqe = (union lpfc_wqe *)&iocbq->iocb; + + wqe->send_frame.frame_len = frame_len; + wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr)); + wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1)); + wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2)); + wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3)); + wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4)); + wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5)); + + iocbq->iocb.ulpCommand = CMD_SEND_FRAME; + iocbq->iocb.ulpLe = 1; + iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl; + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0); + if (rc == IOCB_ERROR) + goto exit; + + lpfc_in_buf_free(phba, &dmabuf->dbuf); + return; + +exit: + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "2023 Unable to process MDS loopback frame\n"); + if (pcmd && pcmd->virt) + pci_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); + kfree(pcmd); + lpfc_sli_release_iocbq(phba, iocbq); + lpfc_in_buf_free(phba, &dmabuf->dbuf); +} + /** * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware * @phba: Pointer to HBA context object. @@ -16964,6 +17146,13 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->cq_event.cqe.rcqe_cmpl); + if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) { + vport = phba->pport; + /* Handle MDS Loopback frames */ + lpfc_sli4_handle_mds_loopback(vport, dmabuf); + return; + } + /* d_id this frame is directed to */ did = sli4_did_from_fc_hdr(fc_hdr); @@ -17137,6 +17326,14 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); rc = -ENXIO; + } else { + /* + * The next_rpi stores the next logical module-64 rpi value used + * to post physical rpis in subsequent rpi postings. + */ + spin_lock_irq(&phba->hbalock); + phba->sli4_hba.next_rpi = rpi_page->next_rpi; + spin_unlock_irq(&phba->hbalock); } return rc; } @@ -18717,7 +18914,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number, spin_lock_irqsave(&pring->ring_lock, iflags); ctxp = pwqe->context2; - sglq = ctxp->rqb_buffer->sglq; + sglq = ctxp->ctxbuf->sglq; if (pwqe->sli4_xritag == NO_XRI) { pwqe->sli4_lxritag = sglq->sli4_lxritag; pwqe->sli4_xritag = sglq->sli4_xritag; diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index da46471337c8..cf863db27700 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -24,7 +24,6 @@ #define LPFC_XRI_EXCH_BUSY_WAIT_TMO 10000 #define LPFC_XRI_EXCH_BUSY_WAIT_T1 10 #define LPFC_XRI_EXCH_BUSY_WAIT_T2 30000 -#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 #define LPFC_RPI_LOW_WATER_MARK 10 #define LPFC_UNREG_FCF 1 @@ -155,7 +154,11 @@ struct lpfc_queue { uint32_t entry_count; /* Number of entries to support on the queue */ uint32_t entry_size; /* Size of each queue entry. */ uint32_t entry_repost; /* Count of entries before doorbell is rung */ -#define LPFC_QUEUE_MIN_REPOST 8 +#define LPFC_EQ_REPOST 8 +#define LPFC_MQ_REPOST 8 +#define LPFC_CQ_REPOST 64 +#define LPFC_RQ_REPOST 64 +#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 /* For WQs */ uint32_t queue_id; /* Queue ID assigned by the hardware */ uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */ uint32_t page_count; /* Number of pages allocated for this queue */ @@ -195,7 +198,7 @@ struct lpfc_queue { /* defines for RQ stats */ #define RQ_no_posted_buf q_cnt_1 #define RQ_no_buf_found q_cnt_2 -#define RQ_buf_trunc q_cnt_3 +#define RQ_buf_posted q_cnt_3 #define RQ_rcv_buf q_cnt_4 uint64_t isr_timestamp; @@ -617,12 +620,17 @@ struct lpfc_sli4_hba { uint16_t scsi_xri_start; uint16_t els_xri_cnt; uint16_t nvmet_xri_cnt; + uint16_t nvmet_ctx_cnt; + uint16_t nvmet_io_wait_cnt; + uint16_t nvmet_io_wait_total; struct list_head lpfc_els_sgl_list; struct list_head lpfc_abts_els_sgl_list; struct list_head lpfc_nvmet_sgl_list; struct list_head lpfc_abts_nvmet_ctx_list; struct list_head lpfc_abts_scsi_buf_list; struct list_head lpfc_abts_nvme_buf_list; + struct list_head lpfc_nvmet_ctx_list; + struct list_head lpfc_nvmet_io_wait_list; struct lpfc_sglq **lpfc_sglq_active_list; struct list_head lpfc_rpi_hdr_list; unsigned long *rpi_bmask; @@ -654,6 +662,7 @@ struct lpfc_sli4_hba { spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ spinlock_t sgl_list_lock; /* list of aborted els IOs */ spinlock_t nvmet_io_lock; + spinlock_t nvmet_io_wait_lock; /* IOs waiting for ctx resources */ uint32_t physical_port; /* CPU to vector mapping information */ @@ -661,8 +670,6 @@ struct lpfc_sli4_hba { uint16_t num_online_cpu; uint16_t num_present_cpu; uint16_t curr_disp_cpu; - - uint16_t nvmet_mrq_post_idx; }; enum lpfc_sge_type { @@ -698,6 +705,7 @@ struct lpfc_rpi_hdr { struct lpfc_dmabuf *dmabuf; uint32_t page_count; uint32_t start_rpi; + uint16_t next_rpi; }; struct lpfc_rsrc_blks { @@ -762,7 +770,6 @@ int lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *, int lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, struct lpfc_queue **drqp, struct lpfc_queue **cqp, uint32_t subtype); -void lpfc_rq_adjust_repost(struct lpfc_hba *, struct lpfc_queue *, int); int lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *); int lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *); int lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *); diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 1c26dc67151b..c2653244221c 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -20,7 +20,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "11.2.0.12" +#define LPFC_DRIVER_VERSION "11.2.0.14" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index e31f1cc90b81..99e16ac479e3 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1851,7 +1851,7 @@ static int scsi_mq_prep_fn(struct request *req) /* zero out the cmd, except for the embedded scsi_request */ memset((char *)cmd + sizeof(cmd->req), 0, - sizeof(*cmd) - sizeof(cmd->req)); + sizeof(*cmd) - sizeof(cmd->req) + shost->hostt->cmd_size); req->special = cmd; diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index f9d1432d7cc5..b6bb4e0ce0e3 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -827,21 +827,32 @@ static int sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd) struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9); u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9); + int ret; if (!(rq->cmd_flags & REQ_NOUNMAP)) { switch (sdkp->zeroing_mode) { case SD_ZERO_WS16_UNMAP: - return sd_setup_write_same16_cmnd(cmd, true); + ret = sd_setup_write_same16_cmnd(cmd, true); + goto out; case SD_ZERO_WS10_UNMAP: - return sd_setup_write_same10_cmnd(cmd, true); + ret = sd_setup_write_same10_cmnd(cmd, true); + goto out; } } if (sdp->no_write_same) return BLKPREP_INVALID; + if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff) - return sd_setup_write_same16_cmnd(cmd, false); - return sd_setup_write_same10_cmnd(cmd, false); + ret = sd_setup_write_same16_cmnd(cmd, false); + else + ret = sd_setup_write_same10_cmnd(cmd, false); + +out: + if (sd_is_zoned(sdkp) && ret == BLKPREP_OK) + return sd_zbc_write_lock_zone(cmd); + + return ret; } static void sd_config_write_same(struct scsi_disk *sdkp) @@ -948,6 +959,10 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd) rq->__data_len = sdp->sector_size; ret = scsi_init_io(cmd); rq->__data_len = nr_bytes; + + if (sd_is_zoned(sdkp) && ret != BLKPREP_OK) + sd_zbc_write_unlock_zone(cmd); + return ret; } @@ -1567,17 +1582,21 @@ out: return retval; } -static int sd_sync_cache(struct scsi_disk *sdkp) +static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr) { int retries, res; struct scsi_device *sdp = sdkp->device; const int timeout = sdp->request_queue->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER; - struct scsi_sense_hdr sshdr; + struct scsi_sense_hdr my_sshdr; if (!scsi_device_online(sdp)) return -ENODEV; + /* caller might not be interested in sense, but we need it */ + if (!sshdr) + sshdr = &my_sshdr; + for (retries = 3; retries > 0; --retries) { unsigned char cmd[10] = { 0 }; @@ -1586,7 +1605,7 @@ static int sd_sync_cache(struct scsi_disk *sdkp) * Leave the rest of the command zero to indicate * flush everything. */ - res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr, + res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, sshdr, timeout, SD_MAX_RETRIES, 0, RQF_PM, NULL); if (res == 0) break; @@ -1596,11 +1615,12 @@ static int sd_sync_cache(struct scsi_disk *sdkp) sd_print_result(sdkp, "Synchronize Cache(10) failed", res); if (driver_byte(res) & DRIVER_SENSE) - sd_print_sense_hdr(sdkp, &sshdr); + sd_print_sense_hdr(sdkp, sshdr); + /* we need to evaluate the error return */ - if (scsi_sense_valid(&sshdr) && - (sshdr.asc == 0x3a || /* medium not present */ - sshdr.asc == 0x20)) /* invalid command */ + if (scsi_sense_valid(sshdr) && + (sshdr->asc == 0x3a || /* medium not present */ + sshdr->asc == 0x20)) /* invalid command */ /* this is no error here */ return 0; @@ -3444,7 +3464,7 @@ static void sd_shutdown(struct device *dev) if (sdkp->WCE && sdkp->media_present) { sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); - sd_sync_cache(sdkp); + sd_sync_cache(sdkp, NULL); } if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) { @@ -3456,6 +3476,7 @@ static void sd_shutdown(struct device *dev) static int sd_suspend_common(struct device *dev, bool ignore_stop_errors) { struct scsi_disk *sdkp = dev_get_drvdata(dev); + struct scsi_sense_hdr sshdr; int ret = 0; if (!sdkp) /* E.g.: runtime suspend following sd_remove() */ @@ -3463,12 +3484,23 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors) if (sdkp->WCE && sdkp->media_present) { sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); - ret = sd_sync_cache(sdkp); + ret = sd_sync_cache(sdkp, &sshdr); + if (ret) { /* ignore OFFLINE device */ if (ret == -ENODEV) - ret = 0; - goto done; + return 0; + + if (!scsi_sense_valid(&sshdr) || + sshdr.sense_key != ILLEGAL_REQUEST) + return ret; + + /* + * sshdr.sense_key == ILLEGAL_REQUEST means this drive + * doesn't support sync. There's not much to do and + * suspend shouldn't fail. + */ + ret = 0; } } @@ -3480,7 +3512,6 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors) ret = 0; } -done: return ret; } diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 0a38ba01b7b4..82c33a6edbea 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -2074,11 +2074,12 @@ sg_get_rq_mark(Sg_fd * sfp, int pack_id) if ((1 == resp->done) && (!resp->sg_io_owned) && ((-1 == pack_id) || (resp->header.pack_id == pack_id))) { resp->done = 2; /* guard against other readers */ - break; + write_unlock_irqrestore(&sfp->rq_list_lock, iflags); + return resp; } } write_unlock_irqrestore(&sfp->rq_list_lock, iflags); - return resp; + return NULL; } /* always adds to end of list */ diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index abc7e87937cc..ffe8d8608818 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -7698,6 +7698,12 @@ static inline void ufshcd_add_sysfs_nodes(struct ufs_hba *hba) ufshcd_add_spm_lvl_sysfs_nodes(hba); } +static inline void ufshcd_remove_sysfs_nodes(struct ufs_hba *hba) +{ + device_remove_file(hba->dev, &hba->rpm_lvl_attr); + device_remove_file(hba->dev, &hba->spm_lvl_attr); +} + /** * ufshcd_shutdown - shutdown routine * @hba: per adapter instance @@ -7735,6 +7741,7 @@ EXPORT_SYMBOL(ufshcd_shutdown); */ void ufshcd_remove(struct ufs_hba *hba) { + ufshcd_remove_sysfs_nodes(hba); scsi_remove_host(hba->host); /* disable interrupts */ ufshcd_disable_intr(hba, hba->intr_mask); diff --git a/drivers/thermal/broadcom/Kconfig b/drivers/thermal/broadcom/Kconfig index ab08af4654ef..42c098e86f84 100644 --- a/drivers/thermal/broadcom/Kconfig +++ b/drivers/thermal/broadcom/Kconfig @@ -9,8 +9,9 @@ config BCM2835_THERMAL config BCM_NS_THERMAL tristate "Northstar thermal driver" depends on ARCH_BCM_IPROC || COMPILE_TEST + default y if ARCH_BCM_IPROC help - Northstar is a family of SoCs that includes e.g. BCM4708, BCM47081, - BCM4709 and BCM47094. It contains DMU (Device Management Unit) block - with a thermal sensor that allows checking CPU temperature. This - driver provides support for it. + Support for the Northstar and Northstar Plus family of SoCs (e.g. + BCM4708, BCM4709, BCM5301x, BCM95852X, etc). It contains DMU (Device + Management Unit) block with a thermal sensor that allows checking CPU + temperature. diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c index 644ba526d9ea..4362a69ac88d 100644 --- a/drivers/thermal/qoriq_thermal.c +++ b/drivers/thermal/qoriq_thermal.c @@ -195,7 +195,6 @@ static struct thermal_zone_of_device_ops tmu_tz_ops = { static int qoriq_tmu_probe(struct platform_device *pdev) { int ret; - const struct thermal_trip *trip; struct qoriq_tmu_data *data; struct device_node *np = pdev->dev.of_node; u32 site = 0; @@ -243,8 +242,6 @@ static int qoriq_tmu_probe(struct platform_device *pdev) goto err_tmu; } - trip = of_thermal_get_trip_points(data->tz); - /* Enable monitoring */ site |= 0x1 << (15 - data->sensor_id); tmu_write(data, site | TMR_ME | TMR_ALPF, &data->regs->tmr); diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index b21b9cc2c8d6..5a51c740e372 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -359,7 +359,7 @@ static DECLARE_DELAYED_WORK(thermal_emergency_poweroff_work, * This may be called from any critical situation to trigger a system shutdown * after a known period of time. By default this is not scheduled. */ -void thermal_emergency_poweroff(void) +static void thermal_emergency_poweroff(void) { int poweroff_delay_ms = CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS; /* diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c index ba9c302454fb..696ab3046b87 100644 --- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c +++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c @@ -1010,7 +1010,7 @@ ti_bandgap_force_single_read(struct ti_bandgap *bgp, int id) } /** - * ti_bandgap_set_continous_mode() - One time enabling of continuous mode + * ti_bandgap_set_continuous_mode() - One time enabling of continuous mode * @bgp: pointer to struct ti_bandgap * * Call this function only if HAS(MODE_CONFIG) is set. As this driver may @@ -1214,22 +1214,18 @@ static struct ti_bandgap *ti_bandgap_build(struct platform_device *pdev) } bgp = devm_kzalloc(&pdev->dev, sizeof(*bgp), GFP_KERNEL); - if (!bgp) { - dev_err(&pdev->dev, "Unable to allocate mem for driver ref\n"); + if (!bgp) return ERR_PTR(-ENOMEM); - } of_id = of_match_device(of_ti_bandgap_match, &pdev->dev); if (of_id) bgp->conf = of_id->data; /* register shadow for context save and restore */ - bgp->regval = devm_kzalloc(&pdev->dev, sizeof(*bgp->regval) * - bgp->conf->sensor_count, GFP_KERNEL); - if (!bgp->regval) { - dev_err(&pdev->dev, "Unable to allocate mem for driver ref\n"); + bgp->regval = devm_kcalloc(&pdev->dev, bgp->conf->sensor_count, + sizeof(*bgp->regval), GFP_KERNEL); + if (!bgp->regval) return ERR_PTR(-ENOMEM); - } i = 0; do { diff --git a/drivers/tty/ehv_bytechan.c b/drivers/tty/ehv_bytechan.c index 7ac9bcdf1e61..61fe8d6fd24e 100644 --- a/drivers/tty/ehv_bytechan.c +++ b/drivers/tty/ehv_bytechan.c @@ -764,7 +764,7 @@ static int __init ehv_bc_init(void) ehv_bc_driver = alloc_tty_driver(count); if (!ehv_bc_driver) { ret = -ENOMEM; - goto error; + goto err_free_bcs; } ehv_bc_driver->driver_name = "ehv-bc"; @@ -778,24 +778,23 @@ static int __init ehv_bc_init(void) ret = tty_register_driver(ehv_bc_driver); if (ret) { pr_err("ehv-bc: could not register tty driver (ret=%i)\n", ret); - goto error; + goto err_put_tty_driver; } ret = platform_driver_register(&ehv_bc_tty_driver); if (ret) { pr_err("ehv-bc: could not register platform driver (ret=%i)\n", ret); - goto error; + goto err_deregister_tty_driver; } return 0; -error: - if (ehv_bc_driver) { - tty_unregister_driver(ehv_bc_driver); - put_tty_driver(ehv_bc_driver); - } - +err_deregister_tty_driver: + tty_unregister_driver(ehv_bc_driver); +err_put_tty_driver: + put_tty_driver(ehv_bc_driver); +err_free_bcs: kfree(bcs); return ret; diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c index 433de5ea9b02..f71b47334149 100644 --- a/drivers/tty/serdev/core.c +++ b/drivers/tty/serdev/core.c @@ -122,6 +122,18 @@ void serdev_device_write_wakeup(struct serdev_device *serdev) } EXPORT_SYMBOL_GPL(serdev_device_write_wakeup); +int serdev_device_write_buf(struct serdev_device *serdev, + const unsigned char *buf, size_t count) +{ + struct serdev_controller *ctrl = serdev->ctrl; + + if (!ctrl || !ctrl->ops->write_buf) + return -EINVAL; + + return ctrl->ops->write_buf(ctrl, buf, count); +} +EXPORT_SYMBOL_GPL(serdev_device_write_buf); + int serdev_device_write(struct serdev_device *serdev, const unsigned char *buf, size_t count, unsigned long timeout) diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c index 487c88f6aa0e..d0a021c93986 100644 --- a/drivers/tty/serdev/serdev-ttyport.c +++ b/drivers/tty/serdev/serdev-ttyport.c @@ -102,9 +102,6 @@ static int ttyport_open(struct serdev_controller *ctrl) return PTR_ERR(tty); serport->tty = tty; - serport->port->client_ops = &client_ops; - serport->port->client_data = ctrl; - if (tty->ops->open) tty->ops->open(serport->tty, NULL); else @@ -215,6 +212,7 @@ struct device *serdev_tty_port_register(struct tty_port *port, struct device *parent, struct tty_driver *drv, int idx) { + const struct tty_port_client_operations *old_ops; struct serdev_controller *ctrl; struct serport *serport; int ret; @@ -233,28 +231,37 @@ struct device *serdev_tty_port_register(struct tty_port *port, ctrl->ops = &ctrl_ops; + old_ops = port->client_ops; + port->client_ops = &client_ops; + port->client_data = ctrl; + ret = serdev_controller_add(ctrl); if (ret) - goto err_controller_put; + goto err_reset_data; dev_info(&ctrl->dev, "tty port %s%d registered\n", drv->name, idx); return &ctrl->dev; -err_controller_put: +err_reset_data: + port->client_data = NULL; + port->client_ops = old_ops; serdev_controller_put(ctrl); + return ERR_PTR(ret); } -void serdev_tty_port_unregister(struct tty_port *port) +int serdev_tty_port_unregister(struct tty_port *port) { struct serdev_controller *ctrl = port->client_data; struct serport *serport = serdev_controller_get_drvdata(ctrl); if (!serport) - return; + return -ENODEV; serdev_controller_remove(ctrl); port->client_ops = NULL; port->client_data = NULL; serdev_controller_put(ctrl); + + return 0; } diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 09a65a3ec7f7..68fd045a7025 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -47,6 +47,7 @@ /* * These are definitions for the Exar XR17V35X and XR17(C|D)15X */ +#define UART_EXAR_INT0 0x80 #define UART_EXAR_SLEEP 0x8b /* Sleep mode */ #define UART_EXAR_DVID 0x8d /* Device identification */ @@ -1337,7 +1338,7 @@ out_lock: /* * Check if the device is a Fintek F81216A */ - if (port->type == PORT_16550A) + if (port->type == PORT_16550A && port->iotype == UPIO_PORT) fintek_8250_probe(up); if (up->capabilities != old_capabilities) { @@ -1869,17 +1870,13 @@ static int serial8250_default_handle_irq(struct uart_port *port) static int exar_handle_irq(struct uart_port *port) { unsigned int iir = serial_port_in(port, UART_IIR); - int ret; + int ret = 0; - ret = serial8250_handle_irq(port, iir); + if (((port->type == PORT_XR17V35X) || (port->type == PORT_XR17D15X)) && + serial_port_in(port, UART_EXAR_INT0) != 0) + ret = 1; - if ((port->type == PORT_XR17V35X) || - (port->type == PORT_XR17D15X)) { - serial_port_in(port, 0x80); - serial_port_in(port, 0x81); - serial_port_in(port, 0x82); - serial_port_in(port, 0x83); - } + ret |= serial8250_handle_irq(port, iir); return ret; } @@ -2177,6 +2174,8 @@ int serial8250_do_startup(struct uart_port *port) serial_port_in(port, UART_RX); serial_port_in(port, UART_IIR); serial_port_in(port, UART_MSR); + if ((port->type == PORT_XR17V35X) || (port->type == PORT_XR17D15X)) + serial_port_in(port, UART_EXAR_INT0); /* * At this point, there's no way the LSR could still be 0xff; @@ -2335,6 +2334,8 @@ dont_test_tx_en: serial_port_in(port, UART_RX); serial_port_in(port, UART_IIR); serial_port_in(port, UART_MSR); + if ((port->type == PORT_XR17V35X) || (port->type == PORT_XR17D15X)) + serial_port_in(port, UART_EXAR_INT0); up->lsr_saved_flags = 0; up->msr_saved_flags = 0; diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c index 18e3f8342b85..0475f5d261ce 100644 --- a/drivers/tty/serial/altera_jtaguart.c +++ b/drivers/tty/serial/altera_jtaguart.c @@ -478,6 +478,7 @@ static int altera_jtaguart_remove(struct platform_device *pdev) port = &altera_jtaguart_ports[i].port; uart_remove_one_port(&altera_jtaguart_driver, port); + iounmap(port->membase); return 0; } diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c index 46d3438a0d27..3e4b717670d7 100644 --- a/drivers/tty/serial/altera_uart.c +++ b/drivers/tty/serial/altera_uart.c @@ -615,6 +615,7 @@ static int altera_uart_remove(struct platform_device *pdev) if (port) { uart_remove_one_port(&altera_uart_driver, port); port->mapbase = 0; + iounmap(port->membase); } return 0; diff --git a/drivers/tty/serial/efm32-uart.c b/drivers/tty/serial/efm32-uart.c index ebd8569f9ad5..9fff25be87f9 100644 --- a/drivers/tty/serial/efm32-uart.c +++ b/drivers/tty/serial/efm32-uart.c @@ -27,6 +27,7 @@ #define UARTn_FRAME 0x04 #define UARTn_FRAME_DATABITS__MASK 0x000f #define UARTn_FRAME_DATABITS(n) ((n) - 3) +#define UARTn_FRAME_PARITY__MASK 0x0300 #define UARTn_FRAME_PARITY_NONE 0x0000 #define UARTn_FRAME_PARITY_EVEN 0x0200 #define UARTn_FRAME_PARITY_ODD 0x0300 @@ -572,12 +573,16 @@ static void efm32_uart_console_get_options(struct efm32_uart_port *efm_port, 16 * (4 + (clkdiv >> 6))); frame = efm32_uart_read32(efm_port, UARTn_FRAME); - if (frame & UARTn_FRAME_PARITY_ODD) + switch (frame & UARTn_FRAME_PARITY__MASK) { + case UARTn_FRAME_PARITY_ODD: *parity = 'o'; - else if (frame & UARTn_FRAME_PARITY_EVEN) + break; + case UARTn_FRAME_PARITY_EVEN: *parity = 'e'; - else + break; + default: *parity = 'n'; + } *bits = (frame & UARTn_FRAME_DATABITS__MASK) - UARTn_FRAME_DATABITS(4) + 4; diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c index 157883653256..f190a84a0246 100644 --- a/drivers/tty/serial/ifx6x60.c +++ b/drivers/tty/serial/ifx6x60.c @@ -1382,9 +1382,9 @@ static struct spi_driver ifx_spi_driver = { static void __exit ifx_spi_exit(void) { /* unregister */ + spi_unregister_driver(&ifx_spi_driver); tty_unregister_driver(tty_drv); put_tty_driver(tty_drv); - spi_unregister_driver(&ifx_spi_driver); unregister_reboot_notifier(&ifx_modem_reboot_notifier_block); } diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 33509b4beaec..bbefddd92bfe 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -2184,7 +2184,9 @@ static int serial_imx_probe(struct platform_device *pdev) * and DCD (when they are outputs) or enables the respective * irqs. So set this bit early, i.e. before requesting irqs. */ - writel(UFCR_DCEDTE, sport->port.membase + UFCR); + reg = readl(sport->port.membase + UFCR); + if (!(reg & UFCR_DCEDTE)) + writel(reg | UFCR_DCEDTE, sport->port.membase + UFCR); /* * Disable UCR3_RI and UCR3_DCD irqs. They are also not @@ -2195,7 +2197,15 @@ static int serial_imx_probe(struct platform_device *pdev) sport->port.membase + UCR3); } else { - writel(0, sport->port.membase + UFCR); + unsigned long ucr3 = UCR3_DSR; + + reg = readl(sport->port.membase + UFCR); + if (reg & UFCR_DCEDTE) + writel(reg & ~UFCR_DCEDTE, sport->port.membase + UFCR); + + if (!is_imx1_uart(sport)) + ucr3 |= IMX21_UCR3_RXDMUXSEL | UCR3_ADNIMP; + writel(ucr3, sport->port.membase + UCR3); } clk_disable_unprepare(sport->clk_ipg); diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 0f45b7884a2c..13bfd5dcffce 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -2083,7 +2083,7 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport) mutex_lock(&port->mutex); tty_dev = device_find_child(uport->dev, &match, serial_match_port); - if (device_may_wakeup(tty_dev)) { + if (tty_dev && device_may_wakeup(tty_dev)) { if (!enable_irq_wake(uport->irq)) uport->irq_wake = 1; put_device(tty_dev); @@ -2782,7 +2782,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport) * Register the port whether it's detected or not. This allows * setserial to be used to alter this port's parameters. */ - tty_dev = tty_port_register_device_attr(port, drv->tty_driver, + tty_dev = tty_port_register_device_attr_serdev(port, drv->tty_driver, uport->line, uport->dev, port, uport->tty_groups); if (likely(!IS_ERR(tty_dev))) { device_set_wakeup_capable(tty_dev, 1); @@ -2845,7 +2845,7 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport) /* * Remove the devices from the tty layer */ - tty_unregister_device(drv->tty_driver, uport->line); + tty_port_unregister_device(port, drv->tty_driver, uport->line); tty = tty_port_tty_get(port); if (tty) { diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c index 1d21a9c1d33e..4fb3165384c4 100644 --- a/drivers/tty/tty_port.c +++ b/drivers/tty/tty_port.c @@ -34,7 +34,9 @@ static int tty_port_default_receive_buf(struct tty_port *port, if (!disc) return 0; + mutex_lock(&tty->atomic_write_lock); ret = tty_ldisc_receive_buf(disc, p, (char *)f, count); + mutex_unlock(&tty->atomic_write_lock); tty_ldisc_deref(disc); @@ -129,19 +131,85 @@ struct device *tty_port_register_device_attr(struct tty_port *port, struct device *device, void *drvdata, const struct attribute_group **attr_grp) { + tty_port_link_device(port, driver, index); + return tty_register_device_attr(driver, index, device, drvdata, + attr_grp); +} +EXPORT_SYMBOL_GPL(tty_port_register_device_attr); + +/** + * tty_port_register_device_attr_serdev - register tty or serdev device + * @port: tty_port of the device + * @driver: tty_driver for this device + * @index: index of the tty + * @device: parent if exists, otherwise NULL + * @drvdata: driver data for the device + * @attr_grp: attribute group for the device + * + * Register a serdev or tty device depending on if the parent device has any + * defined serdev clients or not. + */ +struct device *tty_port_register_device_attr_serdev(struct tty_port *port, + struct tty_driver *driver, unsigned index, + struct device *device, void *drvdata, + const struct attribute_group **attr_grp) +{ struct device *dev; tty_port_link_device(port, driver, index); dev = serdev_tty_port_register(port, device, driver, index); - if (PTR_ERR(dev) != -ENODEV) + if (PTR_ERR(dev) != -ENODEV) { /* Skip creating cdev if we registered a serdev device */ return dev; + } return tty_register_device_attr(driver, index, device, drvdata, attr_grp); } -EXPORT_SYMBOL_GPL(tty_port_register_device_attr); +EXPORT_SYMBOL_GPL(tty_port_register_device_attr_serdev); + +/** + * tty_port_register_device_serdev - register tty or serdev device + * @port: tty_port of the device + * @driver: tty_driver for this device + * @index: index of the tty + * @device: parent if exists, otherwise NULL + * + * Register a serdev or tty device depending on if the parent device has any + * defined serdev clients or not. + */ +struct device *tty_port_register_device_serdev(struct tty_port *port, + struct tty_driver *driver, unsigned index, + struct device *device) +{ + return tty_port_register_device_attr_serdev(port, driver, index, + device, NULL, NULL); +} +EXPORT_SYMBOL_GPL(tty_port_register_device_serdev); + +/** + * tty_port_unregister_device - deregister a tty or serdev device + * @port: tty_port of the device + * @driver: tty_driver for this device + * @index: index of the tty + * + * If a tty or serdev device is registered with a call to + * tty_port_register_device_serdev() then this function must be called when + * the device is gone. + */ +void tty_port_unregister_device(struct tty_port *port, + struct tty_driver *driver, unsigned index) +{ + int ret; + + ret = serdev_tty_port_unregister(port); + if (ret == 0) + return; + + tty_unregister_device(driver, index); +} +EXPORT_SYMBOL_GPL(tty_port_unregister_device); int tty_port_alloc_xmit_buf(struct tty_port *port) { @@ -189,9 +257,6 @@ static void tty_port_destructor(struct kref *kref) /* check if last port ref was dropped before tty release */ if (WARN_ON(port->itty)) return; - - serdev_tty_port_unregister(port); - if (port->xmit_buf) free_page((unsigned long)port->xmit_buf); tty_port_destroy(port); diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c index 9cd8722f24f6..a3ffe97170ff 100644 --- a/drivers/usb/dwc2/params.c +++ b/drivers/usb/dwc2/params.c @@ -144,6 +144,8 @@ const struct of_device_id dwc2_of_match_table[] = { { .compatible = "lantiq,xrx200-usb", .data = dwc2_set_ltq_params }, { .compatible = "snps,dwc2" }, { .compatible = "samsung,s3c6400-hsotg" }, + { .compatible = "amlogic,meson8-usb", + .data = dwc2_set_amlogic_params }, { .compatible = "amlogic,meson8b-usb", .data = dwc2_set_amlogic_params }, { .compatible = "amlogic,meson-gxbb-usb", diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index 4c8aacc232c0..74d57d6994da 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -396,7 +396,11 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep) /* Caller must hold fsg->lock */ static void wakeup_thread(struct fsg_common *common) { - smp_wmb(); /* ensure the write of bh->state is complete */ + /* + * Ensure the reading of thread_wakeup_needed + * and the writing of bh->state are completed + */ + smp_mb(); /* Tell the main thread that something has happened */ common->thread_wakeup_needed = 1; if (common->thread_task) @@ -627,7 +631,12 @@ static int sleep_thread(struct fsg_common *common, bool can_freeze) } __set_current_state(TASK_RUNNING); common->thread_wakeup_needed = 0; - smp_rmb(); /* ensure the latest bh->state is visible */ + + /* + * Ensure the writing of thread_wakeup_needed + * and the reading of bh->state are completed + */ + smp_mb(); return rc; } diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index 5a2d845fb1a6..cd4c88529721 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c @@ -623,7 +623,6 @@ static void renesas_usb3_stop_controller(struct renesas_usb3 *usb3) { usb3_disconnect(usb3); usb3_write(usb3, 0, USB3_P0_INT_ENA); - usb3_write(usb3, 0, USB3_PN_INT_ENA); usb3_write(usb3, 0, USB3_USB_OTG_INT_ENA); usb3_write(usb3, 0, USB3_USB_INT_ENA_1); usb3_write(usb3, 0, USB3_USB_INT_ENA_2); @@ -1475,7 +1474,13 @@ static void usb3_request_done_pipen(struct renesas_usb3 *usb3, struct renesas_usb3_request *usb3_req, int status) { - usb3_pn_stop(usb3); + unsigned long flags; + + spin_lock_irqsave(&usb3->lock, flags); + if (usb3_pn_change(usb3, usb3_ep->num)) + usb3_pn_stop(usb3); + spin_unlock_irqrestore(&usb3->lock, flags); + usb3_disable_pipe_irq(usb3, usb3_ep->num); usb3_request_done(usb3_ep, usb3_req, status); @@ -1504,30 +1509,46 @@ static void usb3_irq_epc_pipen_bfrdy(struct renesas_usb3 *usb3, int num) { struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, num); struct renesas_usb3_request *usb3_req = usb3_get_request(usb3_ep); + bool done = false; if (!usb3_req) return; + spin_lock(&usb3->lock); + if (usb3_pn_change(usb3, num)) + goto out; + if (usb3_ep->dir_in) { /* Do not stop the IN pipe here to detect LSTTR interrupt */ if (!usb3_write_pipe(usb3_ep, usb3_req, USB3_PN_WRITE)) usb3_clear_bit(usb3, PN_INT_BFRDY, USB3_PN_INT_ENA); } else { if (!usb3_read_pipe(usb3_ep, usb3_req, USB3_PN_READ)) - usb3_request_done_pipen(usb3, usb3_ep, usb3_req, 0); + done = true; } + +out: + /* need to unlock because usb3_request_done_pipen() locks it */ + spin_unlock(&usb3->lock); + + if (done) + usb3_request_done_pipen(usb3, usb3_ep, usb3_req, 0); } static void usb3_irq_epc_pipen(struct renesas_usb3 *usb3, int num) { u32 pn_int_sta; - if (usb3_pn_change(usb3, num) < 0) + spin_lock(&usb3->lock); + if (usb3_pn_change(usb3, num) < 0) { + spin_unlock(&usb3->lock); return; + } pn_int_sta = usb3_read(usb3, USB3_PN_INT_STA); pn_int_sta &= usb3_read(usb3, USB3_PN_INT_ENA); usb3_write(usb3, pn_int_sta, USB3_PN_INT_STA); + spin_unlock(&usb3->lock); if (pn_int_sta & PN_INT_LSTTR) usb3_irq_epc_pipen_lsttr(usb3, num); if (pn_int_sta & PN_INT_BFRDY) @@ -1660,6 +1681,7 @@ static int usb3_disable_pipe_n(struct renesas_usb3_ep *usb3_ep) spin_lock_irqsave(&usb3->lock, flags); if (!usb3_pn_change(usb3, usb3_ep->num)) { + usb3_write(usb3, 0, USB3_PN_INT_ENA); usb3_write(usb3, 0, USB3_PN_RAMMAP); usb3_clear_bit(usb3, PN_CON_EN, USB3_PN_CON); } @@ -1799,6 +1821,9 @@ static int renesas_usb3_start(struct usb_gadget *gadget, /* hook up the driver */ usb3->driver = driver; + pm_runtime_enable(usb3_to_dev(usb3)); + pm_runtime_get_sync(usb3_to_dev(usb3)); + renesas_usb3_init_controller(usb3); return 0; @@ -1807,14 +1832,14 @@ static int renesas_usb3_start(struct usb_gadget *gadget, static int renesas_usb3_stop(struct usb_gadget *gadget) { struct renesas_usb3 *usb3 = gadget_to_renesas_usb3(gadget); - unsigned long flags; - spin_lock_irqsave(&usb3->lock, flags); usb3->softconnect = false; usb3->gadget.speed = USB_SPEED_UNKNOWN; usb3->driver = NULL; renesas_usb3_stop_controller(usb3); - spin_unlock_irqrestore(&usb3->lock, flags); + + pm_runtime_put(usb3_to_dev(usb3)); + pm_runtime_disable(usb3_to_dev(usb3)); return 0; } @@ -1891,9 +1916,6 @@ static int renesas_usb3_remove(struct platform_device *pdev) device_remove_file(&pdev->dev, &dev_attr_role); - pm_runtime_put(&pdev->dev); - pm_runtime_disable(&pdev->dev); - usb_del_gadget_udc(&usb3->gadget); __renesas_usb3_ep_free_request(usb3->ep0_req); @@ -2099,9 +2121,6 @@ static int renesas_usb3_probe(struct platform_device *pdev) usb3->workaround_for_vbus = priv->workaround_for_vbus; - pm_runtime_enable(&pdev->dev); - pm_runtime_get_sync(&pdev->dev); - dev_info(&pdev->dev, "probed\n"); return 0; |