diff options
Diffstat (limited to 'drivers')
869 files changed, 13906 insertions, 7890 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index c95df0b8c880..5d9248526d78 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -235,17 +235,6 @@ config ACPI_INITRD_TABLE_OVERRIDE initrd, therefore it's safe to say Y. See Documentation/acpi/initrd_table_override.txt for details -config ACPI_BLACKLIST_YEAR - int "Disable ACPI for systems before Jan 1st this year" if X86_32 - default 0 - help - Enter a 4-digit year, e.g., 2001, to disable ACPI by default - on platforms with DMI BIOS date before January 1st that year. - "acpi=force" can be used to override this mechanism. - - Enter 0 to disable this mechanism and allow ACPI to - run by default no matter what the year. (default) - config ACPI_DEBUG bool "Debug Statements" default n diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index b9f0d5f4bba5..8711e3797165 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c @@ -56,7 +56,6 @@ static int ac_sleep_before_get_state_ms; struct acpi_ac { struct power_supply charger; - struct acpi_device *adev; struct platform_device *pdev; unsigned long long state; }; @@ -70,8 +69,9 @@ struct acpi_ac { static int acpi_ac_get_state(struct acpi_ac *ac) { acpi_status status; + acpi_handle handle = ACPI_HANDLE(&ac->pdev->dev); - status = acpi_evaluate_integer(ac->adev->handle, "_PSR", NULL, + status = acpi_evaluate_integer(handle, "_PSR", NULL, &ac->state); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, @@ -119,6 +119,7 @@ static enum power_supply_property ac_props[] = { static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data) { struct acpi_ac *ac = data; + struct acpi_device *adev; if (!ac) return; @@ -141,10 +142,11 @@ static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data) msleep(ac_sleep_before_get_state_ms); acpi_ac_get_state(ac); - acpi_bus_generate_netlink_event(ac->adev->pnp.device_class, + adev = ACPI_COMPANION(&ac->pdev->dev); + acpi_bus_generate_netlink_event(adev->pnp.device_class, dev_name(&ac->pdev->dev), event, (u32) ac->state); - acpi_notifier_call_chain(ac->adev, event, (u32) ac->state); + acpi_notifier_call_chain(adev, event, (u32) ac->state); kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE); } @@ -178,8 +180,8 @@ static int acpi_ac_probe(struct platform_device *pdev) if (!pdev) return -EINVAL; - result = acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev); - if (result) + adev = ACPI_COMPANION(&pdev->dev); + if (!adev) return -ENODEV; ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL); @@ -188,7 +190,6 @@ static int acpi_ac_probe(struct platform_device *pdev) strcpy(acpi_device_name(adev), ACPI_AC_DEVICE_NAME); strcpy(acpi_device_class(adev), ACPI_AC_CLASS); - ac->adev = adev; ac->pdev = pdev; platform_set_drvdata(pdev, ac); diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index d3961014aad7..6745fe137b9e 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -163,6 +163,15 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = { { "80860F41", (unsigned long)&byt_i2c_dev_desc }, { "INT33B2", }, + { "INT3430", (unsigned long)&lpt_dev_desc }, + { "INT3431", (unsigned long)&lpt_dev_desc }, + { "INT3432", (unsigned long)&lpt_dev_desc }, + { "INT3433", (unsigned long)&lpt_dev_desc }, + { "INT3434", (unsigned long)&lpt_uart_dev_desc }, + { "INT3435", (unsigned long)&lpt_uart_dev_desc }, + { "INT3436", (unsigned long)&lpt_sdio_dev_desc }, + { "INT3437", }, + { } }; diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c index 8a4cfc7e71f0..dbfe49e5fd63 100644 --- a/drivers/acpi/acpi_platform.c +++ b/drivers/acpi/acpi_platform.c @@ -111,7 +111,7 @@ int acpi_create_platform_device(struct acpi_device *adev, pdevinfo.id = -1; pdevinfo.res = resources; pdevinfo.num_res = count; - pdevinfo.acpi_node.handle = adev->handle; + pdevinfo.acpi_node.companion = adev; pdev = platform_device_register_full(&pdevinfo); if (IS_ERR(pdev)) { dev_err(&adev->dev, "platform device creation failed: %ld\n", diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h index f691d0e4d9fa..ff97430455cb 100644 --- a/drivers/acpi/acpica/acresrc.h +++ b/drivers/acpi/acpica/acresrc.h @@ -184,7 +184,7 @@ acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer, struct acpi_buffer *output_buffer); acpi_status -acpi_rs_create_aml_resources(struct acpi_resource *linked_list_buffer, +acpi_rs_create_aml_resources(struct acpi_buffer *resource_list, struct acpi_buffer *output_buffer); acpi_status @@ -227,8 +227,8 @@ acpi_rs_get_list_length(u8 * aml_buffer, u32 aml_buffer_length, acpi_size * size_needed); acpi_status -acpi_rs_get_aml_length(struct acpi_resource *linked_list_buffer, - acpi_size * size_needed); +acpi_rs_get_aml_length(struct acpi_resource *resource_list, + acpi_size resource_list_size, acpi_size * size_needed); acpi_status acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object, diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c index 243737363fb8..fd1ff54cda19 100644 --- a/drivers/acpi/acpica/nsalloc.c +++ b/drivers/acpi/acpica/nsalloc.c @@ -106,6 +106,7 @@ struct acpi_namespace_node *acpi_ns_create_node(u32 name) void acpi_ns_delete_node(struct acpi_namespace_node *node) { union acpi_operand_object *obj_desc; + union acpi_operand_object *next_desc; ACPI_FUNCTION_NAME(ns_delete_node); @@ -114,12 +115,13 @@ void acpi_ns_delete_node(struct acpi_namespace_node *node) acpi_ns_detach_object(node); /* - * Delete an attached data object if present (an object that was created - * and attached via acpi_attach_data). Note: After any normal object is - * detached above, the only possible remaining object is a data object. + * Delete an attached data object list if present (objects that were + * attached via acpi_attach_data). Note: After any normal object is + * detached above, the only possible remaining object(s) are data + * objects, in a linked list. */ obj_desc = node->object; - if (obj_desc && (obj_desc->common.type == ACPI_TYPE_LOCAL_DATA)) { + while (obj_desc && (obj_desc->common.type == ACPI_TYPE_LOCAL_DATA)) { /* Invoke the attached data deletion handler if present */ @@ -127,7 +129,15 @@ void acpi_ns_delete_node(struct acpi_namespace_node *node) obj_desc->data.handler(node, obj_desc->data.pointer); } + next_desc = obj_desc->common.next_object; acpi_ut_remove_reference(obj_desc); + obj_desc = next_desc; + } + + /* Special case for the statically allocated root node */ + + if (node == acpi_gbl_root_node) { + return; } /* Now we can delete the node */ diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c index cc2fea94c5f0..4a0665b6bcc1 100644 --- a/drivers/acpi/acpica/nsutils.c +++ b/drivers/acpi/acpica/nsutils.c @@ -593,24 +593,26 @@ struct acpi_namespace_node *acpi_ns_validate_handle(acpi_handle handle) void acpi_ns_terminate(void) { - union acpi_operand_object *obj_desc; + acpi_status status; ACPI_FUNCTION_TRACE(ns_terminate); /* - * 1) Free the entire namespace -- all nodes and objects - * - * Delete all object descriptors attached to namepsace nodes + * Free the entire namespace -- all nodes and all objects + * attached to the nodes */ acpi_ns_delete_namespace_subtree(acpi_gbl_root_node); - /* Detach any objects attached to the root */ + /* Delete any objects attached to the root node */ - obj_desc = acpi_ns_get_attached_object(acpi_gbl_root_node); - if (obj_desc) { - acpi_ns_detach_object(acpi_gbl_root_node); + status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE(status)) { + return_VOID; } + acpi_ns_delete_node(acpi_gbl_root_node); + (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Namespace freed\n")); return_VOID; } diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c index b62a0f4f4f9b..b60c9cf82862 100644 --- a/drivers/acpi/acpica/rscalc.c +++ b/drivers/acpi/acpica/rscalc.c @@ -174,6 +174,7 @@ acpi_rs_stream_option_length(u32 resource_length, * FUNCTION: acpi_rs_get_aml_length * * PARAMETERS: resource - Pointer to the resource linked list + * resource_list_size - Size of the resource linked list * size_needed - Where the required size is returned * * RETURN: Status @@ -185,16 +186,20 @@ acpi_rs_stream_option_length(u32 resource_length, ******************************************************************************/ acpi_status -acpi_rs_get_aml_length(struct acpi_resource * resource, acpi_size * size_needed) +acpi_rs_get_aml_length(struct acpi_resource *resource, + acpi_size resource_list_size, acpi_size * size_needed) { acpi_size aml_size_needed = 0; + struct acpi_resource *resource_end; acpi_rs_length total_size; ACPI_FUNCTION_TRACE(rs_get_aml_length); /* Traverse entire list of internal resource descriptors */ - while (resource) { + resource_end = + ACPI_ADD_PTR(struct acpi_resource, resource, resource_list_size); + while (resource < resource_end) { /* Validate the descriptor type */ diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c index 65f3e1c5b598..3a2ace93e62c 100644 --- a/drivers/acpi/acpica/rscreate.c +++ b/drivers/acpi/acpica/rscreate.c @@ -418,22 +418,21 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object, * * FUNCTION: acpi_rs_create_aml_resources * - * PARAMETERS: linked_list_buffer - Pointer to the resource linked list - * output_buffer - Pointer to the user's buffer + * PARAMETERS: resource_list - Pointer to the resource list buffer + * output_buffer - Where the AML buffer is returned * * RETURN: Status AE_OK if okay, else a valid acpi_status code. * If the output_buffer is too small, the error will be * AE_BUFFER_OVERFLOW and output_buffer->Length will point * to the size buffer needed. * - * DESCRIPTION: Takes the linked list of device resources and - * creates a bytestream to be used as input for the - * _SRS control method. + * DESCRIPTION: Converts a list of device resources to an AML bytestream + * to be used as input for the _SRS control method. * ******************************************************************************/ acpi_status -acpi_rs_create_aml_resources(struct acpi_resource *linked_list_buffer, +acpi_rs_create_aml_resources(struct acpi_buffer *resource_list, struct acpi_buffer *output_buffer) { acpi_status status; @@ -441,16 +440,16 @@ acpi_rs_create_aml_resources(struct acpi_resource *linked_list_buffer, ACPI_FUNCTION_TRACE(rs_create_aml_resources); - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "LinkedListBuffer = %p\n", - linked_list_buffer)); + /* Params already validated, no need to re-validate here */ - /* - * Params already validated, so we don't re-validate here - * - * Pass the linked_list_buffer into a module that calculates - * the buffer size needed for the byte stream. - */ - status = acpi_rs_get_aml_length(linked_list_buffer, &aml_size_needed); + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "ResourceList Buffer = %p\n", + resource_list->pointer)); + + /* Get the buffer size needed for the AML byte stream */ + + status = acpi_rs_get_aml_length(resource_list->pointer, + resource_list->length, + &aml_size_needed); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "AmlSizeNeeded=%X, %s\n", (u32)aml_size_needed, acpi_format_exception(status))); @@ -467,10 +466,9 @@ acpi_rs_create_aml_resources(struct acpi_resource *linked_list_buffer, /* Do the conversion */ - status = - acpi_rs_convert_resources_to_aml(linked_list_buffer, - aml_size_needed, - output_buffer->pointer); + status = acpi_rs_convert_resources_to_aml(resource_list->pointer, + aml_size_needed, + output_buffer->pointer); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c index aef303d56d86..14a7982c9961 100644 --- a/drivers/acpi/acpica/rsutils.c +++ b/drivers/acpi/acpica/rsutils.c @@ -753,7 +753,7 @@ acpi_rs_set_srs_method_data(struct acpi_namespace_node *node, * Convert the linked list into a byte stream */ buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER; - status = acpi_rs_create_aml_resources(in_buffer->pointer, &buffer); + status = acpi_rs_create_aml_resources(in_buffer, &buffer); if (ACPI_FAILURE(status)) { goto cleanup; } diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c index 1a67b3944b3b..03ae8affe48f 100644 --- a/drivers/acpi/acpica/utdebug.c +++ b/drivers/acpi/acpica/utdebug.c @@ -185,6 +185,7 @@ acpi_debug_print(u32 requested_debug_level, } acpi_gbl_prev_thread_id = thread_id; + acpi_gbl_nesting_level = 0; } /* @@ -193,13 +194,21 @@ acpi_debug_print(u32 requested_debug_level, */ acpi_os_printf("%9s-%04ld ", module_name, line_number); +#ifdef ACPI_EXEC_APP + /* + * For acpi_exec only, emit the thread ID and nesting level. + * Note: nesting level is really only useful during a single-thread + * execution. Otherwise, multiple threads will keep resetting the + * level. + */ if (ACPI_LV_THREADS & acpi_dbg_level) { acpi_os_printf("[%u] ", (u32)thread_id); } - acpi_os_printf("[%02ld] %-22.22s: ", - acpi_gbl_nesting_level, - acpi_ut_trim_function_name(function_name)); + acpi_os_printf("[%02ld] ", acpi_gbl_nesting_level); +#endif + + acpi_os_printf("%-22.22s: ", acpi_ut_trim_function_name(function_name)); va_start(args, format); acpi_os_vprintf(format, args); @@ -420,7 +429,9 @@ acpi_ut_exit(u32 line_number, component_id, "%s\n", acpi_gbl_fn_exit_str); } - acpi_gbl_nesting_level--; + if (acpi_gbl_nesting_level) { + acpi_gbl_nesting_level--; + } } ACPI_EXPORT_SYMBOL(acpi_ut_exit) @@ -467,7 +478,9 @@ acpi_ut_status_exit(u32 line_number, } } - acpi_gbl_nesting_level--; + if (acpi_gbl_nesting_level) { + acpi_gbl_nesting_level--; + } } ACPI_EXPORT_SYMBOL(acpi_ut_status_exit) @@ -504,7 +517,9 @@ acpi_ut_value_exit(u32 line_number, ACPI_FORMAT_UINT64(value)); } - acpi_gbl_nesting_level--; + if (acpi_gbl_nesting_level) { + acpi_gbl_nesting_level--; + } } ACPI_EXPORT_SYMBOL(acpi_ut_value_exit) @@ -540,7 +555,9 @@ acpi_ut_ptr_exit(u32 line_number, ptr); } - acpi_gbl_nesting_level--; + if (acpi_gbl_nesting_level) { + acpi_gbl_nesting_level--; + } } #endif diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c index fb848378d582..078c4f7fe2dd 100644 --- a/drivers/acpi/blacklist.c +++ b/drivers/acpi/blacklist.c @@ -75,39 +75,6 @@ static struct acpi_blacklist_item acpi_blacklist[] __initdata = { {""} }; -#if CONFIG_ACPI_BLACKLIST_YEAR - -static int __init blacklist_by_year(void) -{ - int year; - - /* Doesn't exist? Likely an old system */ - if (!dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL)) { - printk(KERN_ERR PREFIX "no DMI BIOS year, " - "acpi=force is required to enable ACPI\n" ); - return 1; - } - /* 0? Likely a buggy new BIOS */ - if (year == 0) { - printk(KERN_ERR PREFIX "DMI BIOS year==0, " - "assuming ACPI-capable machine\n" ); - return 0; - } - if (year < CONFIG_ACPI_BLACKLIST_YEAR) { - printk(KERN_ERR PREFIX "BIOS age (%d) fails cutoff (%d), " - "acpi=force is required to enable ACPI\n", - year, CONFIG_ACPI_BLACKLIST_YEAR); - return 1; - } - return 0; -} -#else -static inline int blacklist_by_year(void) -{ - return 0; -} -#endif - int __init acpi_blacklisted(void) { int i = 0; @@ -166,8 +133,6 @@ int __init acpi_blacklisted(void) } } - blacklisted += blacklist_by_year(); - dmi_check_system(acpi_osi_dmi_table); return blacklisted; diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index d42b2fb5a7e9..b3480cf7db1a 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -22,16 +22,12 @@ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -#include <linux/device.h> +#include <linux/acpi.h> #include <linux/export.h> #include <linux/mutex.h> #include <linux/pm_qos.h> #include <linux/pm_runtime.h> -#include <acpi/acpi.h> -#include <acpi/acpi_bus.h> -#include <acpi/acpi_drivers.h> - #include "internal.h" #define _COMPONENT ACPI_POWER_COMPONENT @@ -548,7 +544,7 @@ static int acpi_dev_pm_get_state(struct device *dev, struct acpi_device *adev, */ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in) { - acpi_handle handle = DEVICE_ACPI_HANDLE(dev); + acpi_handle handle = ACPI_HANDLE(dev); struct acpi_device *adev; int ret, d_min, d_max; @@ -656,7 +652,7 @@ int acpi_pm_device_run_wake(struct device *phys_dev, bool enable) if (!device_run_wake(phys_dev)) return -EINVAL; - handle = DEVICE_ACPI_HANDLE(phys_dev); + handle = ACPI_HANDLE(phys_dev); if (!handle || acpi_bus_get_device(handle, &adev)) { dev_dbg(phys_dev, "ACPI handle without context in %s!\n", __func__); @@ -700,7 +696,7 @@ int acpi_pm_device_sleep_wake(struct device *dev, bool enable) if (!device_can_wakeup(dev)) return -EINVAL; - handle = DEVICE_ACPI_HANDLE(dev); + handle = ACPI_HANDLE(dev); if (!handle || acpi_bus_get_device(handle, &adev)) { dev_dbg(dev, "ACPI handle without context in %s!\n", __func__); return -ENODEV; @@ -722,7 +718,7 @@ int acpi_pm_device_sleep_wake(struct device *dev, bool enable) */ struct acpi_device *acpi_dev_pm_get_node(struct device *dev) { - acpi_handle handle = DEVICE_ACPI_HANDLE(dev); + acpi_handle handle = ACPI_HANDLE(dev); struct acpi_device *adev; return handle && !acpi_bus_get_device(handle, &adev) ? adev : NULL; diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index d5309fd49458..ba5b56db9d27 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -173,9 +173,10 @@ static void start_transaction(struct acpi_ec *ec) static void advance_transaction(struct acpi_ec *ec, u8 status) { unsigned long flags; - struct transaction *t = ec->curr; + struct transaction *t; spin_lock_irqsave(&ec->lock, flags); + t = ec->curr; if (!t) goto unlock; if (t->wlen > t->wi) { diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c index fdef416c0ff6..cae3b387b867 100644 --- a/drivers/acpi/event.c +++ b/drivers/acpi/event.c @@ -78,15 +78,17 @@ enum { #define ACPI_GENL_VERSION 0x01 #define ACPI_GENL_MCAST_GROUP_NAME "acpi_mc_group" +static const struct genl_multicast_group acpi_event_mcgrps[] = { + { .name = ACPI_GENL_MCAST_GROUP_NAME, }, +}; + static struct genl_family acpi_event_genl_family = { .id = GENL_ID_GENERATE, .name = ACPI_GENL_FAMILY_NAME, .version = ACPI_GENL_VERSION, .maxattr = ACPI_GENL_ATTR_MAX, -}; - -static struct genl_multicast_group acpi_event_mcgrp = { - .name = ACPI_GENL_MCAST_GROUP_NAME, + .mcgrps = acpi_event_mcgrps, + .n_mcgrps = ARRAY_SIZE(acpi_event_mcgrps), }; int acpi_bus_generate_netlink_event(const char *device_class, @@ -141,7 +143,7 @@ int acpi_bus_generate_netlink_event(const char *device_class, return result; } - genlmsg_multicast(skb, 0, acpi_event_mcgrp.id, GFP_ATOMIC); + genlmsg_multicast(&acpi_event_genl_family, skb, 0, 0, GFP_ATOMIC); return 0; } @@ -149,18 +151,7 @@ EXPORT_SYMBOL(acpi_bus_generate_netlink_event); static int acpi_event_genetlink_init(void) { - int result; - - result = genl_register_family(&acpi_event_genl_family); - if (result) - return result; - - result = genl_register_mc_group(&acpi_event_genl_family, - &acpi_event_mcgrp); - if (result) - genl_unregister_family(&acpi_event_genl_family); - - return result; + return genl_register_family(&acpi_event_genl_family); } #else diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index 10f0f40587bb..a22a295edb69 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c @@ -197,30 +197,28 @@ static void acpi_physnode_link_name(char *buf, unsigned int node_id) int acpi_bind_one(struct device *dev, acpi_handle handle) { - struct acpi_device *acpi_dev; - acpi_status status; + struct acpi_device *acpi_dev = NULL; struct acpi_device_physical_node *physical_node, *pn; char physical_node_name[PHYSICAL_NODE_NAME_SIZE]; struct list_head *physnode_list; unsigned int node_id; int retval = -EINVAL; - if (ACPI_HANDLE(dev)) { + if (ACPI_COMPANION(dev)) { if (handle) { - dev_warn(dev, "ACPI handle is already set\n"); + dev_warn(dev, "ACPI companion already set\n"); return -EINVAL; } else { - handle = ACPI_HANDLE(dev); + acpi_dev = ACPI_COMPANION(dev); } + } else { + acpi_bus_get_device(handle, &acpi_dev); } - if (!handle) + if (!acpi_dev) return -EINVAL; + get_device(&acpi_dev->dev); get_device(dev); - status = acpi_bus_get_device(handle, &acpi_dev); - if (ACPI_FAILURE(status)) - goto err; - physical_node = kzalloc(sizeof(*physical_node), GFP_KERNEL); if (!physical_node) { retval = -ENOMEM; @@ -242,10 +240,11 @@ int acpi_bind_one(struct device *dev, acpi_handle handle) dev_warn(dev, "Already associated with ACPI node\n"); kfree(physical_node); - if (ACPI_HANDLE(dev) != handle) + if (ACPI_COMPANION(dev) != acpi_dev) goto err; put_device(dev); + put_device(&acpi_dev->dev); return 0; } if (pn->node_id == node_id) { @@ -259,8 +258,8 @@ int acpi_bind_one(struct device *dev, acpi_handle handle) list_add(&physical_node->node, physnode_list); acpi_dev->physical_node_count++; - if (!ACPI_HANDLE(dev)) - ACPI_HANDLE_SET(dev, acpi_dev->handle); + if (!ACPI_COMPANION(dev)) + ACPI_COMPANION_SET(dev, acpi_dev); acpi_physnode_link_name(physical_node_name, node_id); retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, @@ -283,27 +282,21 @@ int acpi_bind_one(struct device *dev, acpi_handle handle) return 0; err: - ACPI_HANDLE_SET(dev, NULL); + ACPI_COMPANION_SET(dev, NULL); put_device(dev); + put_device(&acpi_dev->dev); return retval; } EXPORT_SYMBOL_GPL(acpi_bind_one); int acpi_unbind_one(struct device *dev) { + struct acpi_device *acpi_dev = ACPI_COMPANION(dev); struct acpi_device_physical_node *entry; - struct acpi_device *acpi_dev; - acpi_status status; - if (!ACPI_HANDLE(dev)) + if (!acpi_dev) return 0; - status = acpi_bus_get_device(ACPI_HANDLE(dev), &acpi_dev); - if (ACPI_FAILURE(status)) { - dev_err(dev, "Oops, ACPI handle corrupt in %s()\n", __func__); - return -EINVAL; - } - mutex_lock(&acpi_dev->physical_node_lock); list_for_each_entry(entry, &acpi_dev->physical_node_list, node) @@ -316,9 +309,10 @@ int acpi_unbind_one(struct device *dev) acpi_physnode_link_name(physnode_name, entry->node_id); sysfs_remove_link(&acpi_dev->dev.kobj, physnode_name); sysfs_remove_link(&dev->kobj, "firmware_node"); - ACPI_HANDLE_SET(dev, NULL); - /* acpi_bind_one() increase refcnt by one. */ + ACPI_COMPANION_SET(dev, NULL); + /* Drop references taken by acpi_bind_one(). */ put_device(dev); + put_device(&acpi_dev->dev); kfree(entry); break; } @@ -328,6 +322,15 @@ int acpi_unbind_one(struct device *dev) } EXPORT_SYMBOL_GPL(acpi_unbind_one); +void acpi_preset_companion(struct device *dev, acpi_handle parent, u64 addr) +{ + struct acpi_device *adev; + + if (!acpi_bus_get_device(acpi_get_child(parent, addr), &adev)) + ACPI_COMPANION_SET(dev, adev); +} +EXPORT_SYMBOL_GPL(acpi_preset_companion); + static int acpi_platform_notify(struct device *dev) { struct acpi_bus_type *type = acpi_get_bus_type(dev); diff --git a/drivers/acpi/nvs.c b/drivers/acpi/nvs.c index 266bc58ce0ce..386a9fe497b4 100644 --- a/drivers/acpi/nvs.c +++ b/drivers/acpi/nvs.c @@ -13,7 +13,6 @@ #include <linux/slab.h> #include <linux/acpi.h> #include <linux/acpi_io.h> -#include <acpi/acpiosxf.h> /* ACPI NVS regions, APEI may use it */ diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 56f05869b08d..20360e480bd8 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -65,6 +65,9 @@ static struct acpi_scan_handler pci_root_handler = { .ids = root_device_ids, .attach = acpi_pci_root_add, .detach = acpi_pci_root_remove, + .hotplug = { + .ignore = true, + }, }; static DEFINE_MUTEX(osc_lock); @@ -575,6 +578,7 @@ static int acpi_pci_root_add(struct acpi_device *device, dev_err(&device->dev, "Bus %04x:%02x not present in PCI namespace\n", root->segment, (unsigned int)root->secondary.start); + device->driver_data = NULL; result = -ENODEV; goto end; } diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 55f9dedbbf9f..fd39459926b1 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -289,24 +289,17 @@ void acpi_bus_device_eject(void *data, u32 ost_src) { struct acpi_device *device = data; acpi_handle handle = device->handle; - struct acpi_scan_handler *handler; u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; int error; lock_device_hotplug(); mutex_lock(&acpi_scan_lock); - handler = device->handler; - if (!handler || !handler->hotplug.enabled) { - put_device(&device->dev); - goto err_support; - } - if (ost_src == ACPI_NOTIFY_EJECT_REQUEST) acpi_evaluate_hotplug_ost(handle, ACPI_NOTIFY_EJECT_REQUEST, ACPI_OST_SC_EJECT_IN_PROGRESS, NULL); - if (handler->hotplug.mode == AHM_CONTAINER) + if (device->handler && device->handler->hotplug.mode == AHM_CONTAINER) kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE); error = acpi_scan_hot_remove(device); @@ -411,8 +404,7 @@ static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data) break; case ACPI_NOTIFY_EJECT_REQUEST: acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n"); - status = acpi_bus_get_device(handle, &adev); - if (ACPI_FAILURE(status)) + if (acpi_bus_get_device(handle, &adev)) goto err_out; get_device(&adev->dev); @@ -1780,7 +1772,7 @@ static void acpi_scan_init_hotplug(acpi_handle handle, int type) */ list_for_each_entry(hwid, &pnp.ids, list) { handler = acpi_scan_match_handler(hwid->id, NULL); - if (handler) { + if (handler && !handler->hotplug.ignore) { acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, acpi_hotplug_notify_cb, handler); break; @@ -1997,6 +1989,7 @@ static int acpi_bus_scan_fixed(void) if (result) return result; + device->flags.match_driver = true; result = device_attach(&device->dev); if (result < 0) return result; @@ -2013,6 +2006,7 @@ static int acpi_bus_scan_fixed(void) if (result) return result; + device->flags.match_driver = true; result = device_attach(&device->dev); } diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 14df30580e15..721e949e606e 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -525,7 +525,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state) * generate wakeup events. */ if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) { - acpi_event_status pwr_btn_status; + acpi_event_status pwr_btn_status = ACPI_EVENT_FLAG_DISABLED; acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status); diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index db5293650f62..6dbc3ca45223 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c @@ -309,7 +309,7 @@ static void acpi_table_attr_init(struct acpi_table_attr *table_attr, sprintf(table_attr->name + ACPI_NAME_SIZE, "%d", table_attr->instance); - table_attr->attr.size = 0; + table_attr->attr.size = table_header->length; table_attr->attr.read = acpi_table_show; table_attr->attr.attr.name = table_attr->name; table_attr->attr.attr.mode = 0400; @@ -354,8 +354,9 @@ static int acpi_tables_sysfs_init(void) { struct acpi_table_attr *table_attr; struct acpi_table_header *table_header = NULL; - int table_index = 0; - int result; + int table_index; + acpi_status status; + int ret; tables_kobj = kobject_create_and_add("tables", acpi_kobj); if (!tables_kobj) @@ -365,33 +366,34 @@ static int acpi_tables_sysfs_init(void) if (!dynamic_tables_kobj) goto err_dynamic_tables; - do { - result = acpi_get_table_by_index(table_index, &table_header); - if (!result) { - table_index++; - table_attr = NULL; - table_attr = - kzalloc(sizeof(struct acpi_table_attr), GFP_KERNEL); - if (!table_attr) - return -ENOMEM; - - acpi_table_attr_init(table_attr, table_header); - result = - sysfs_create_bin_file(tables_kobj, - &table_attr->attr); - if (result) { - kfree(table_attr); - return result; - } else - list_add_tail(&table_attr->node, - &acpi_table_attr_list); + for (table_index = 0;; table_index++) { + status = acpi_get_table_by_index(table_index, &table_header); + + if (status == AE_BAD_PARAMETER) + break; + + if (ACPI_FAILURE(status)) + continue; + + table_attr = NULL; + table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL); + if (!table_attr) + return -ENOMEM; + + acpi_table_attr_init(table_attr, table_header); + ret = sysfs_create_bin_file(tables_kobj, &table_attr->attr); + if (ret) { + kfree(table_attr); + return ret; } - } while (!result); + list_add_tail(&table_attr->node, &acpi_table_attr_list); + } + kobject_uevent(tables_kobj, KOBJ_ADD); kobject_uevent(dynamic_tables_kobj, KOBJ_ADD); - result = acpi_install_table_handler(acpi_sysfs_table_handler, NULL); + status = acpi_install_table_handler(acpi_sysfs_table_handler, NULL); - return result == AE_OK ? 0 : -EINVAL; + return ACPI_FAILURE(status) ? -EINVAL : 0; err_dynamic_tables: kobject_put(tables_kobj); err: diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 18dbdff4656e..995e91bcb97b 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -82,13 +82,6 @@ static bool allow_duplicates; module_param(allow_duplicates, bool, 0644); /* - * Some BIOSes claim they use minimum backlight at boot, - * and this may bring dimming screen after boot - */ -static bool use_bios_initial_backlight = 1; -module_param(use_bios_initial_backlight, bool, 0644); - -/* * For Windows 8 systems: if set ture and the GPU driver has * registered a backlight interface, skip registering ACPI video's. */ @@ -406,12 +399,6 @@ static int __init video_set_bqc_offset(const struct dmi_system_id *d) return 0; } -static int video_ignore_initial_backlight(const struct dmi_system_id *d) -{ - use_bios_initial_backlight = 0; - return 0; -} - static struct dmi_system_id video_dmi_table[] __initdata = { /* * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121 @@ -456,54 +443,6 @@ static struct dmi_system_id video_dmi_table[] __initdata = { DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"), }, }, - { - .callback = video_ignore_initial_backlight, - .ident = "HP Folio 13-2000", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13 - 2000 Notebook PC"), - }, - }, - { - .callback = video_ignore_initial_backlight, - .ident = "Fujitsu E753", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E753"), - }, - }, - { - .callback = video_ignore_initial_backlight, - .ident = "HP Pavilion dm4", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dm4 Notebook PC"), - }, - }, - { - .callback = video_ignore_initial_backlight, - .ident = "HP Pavilion g6 Notebook PC", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion g6 Notebook PC"), - }, - }, - { - .callback = video_ignore_initial_backlight, - .ident = "HP 1000 Notebook PC", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP 1000 Notebook PC"), - }, - }, - { - .callback = video_ignore_initial_backlight, - .ident = "HP Pavilion m4", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion m4 Notebook PC"), - }, - }, {} }; @@ -839,20 +778,18 @@ acpi_video_init_brightness(struct acpi_video_device *device) if (!device->cap._BQC) goto set_level; - if (use_bios_initial_backlight) { - level = acpi_video_bqc_value_to_level(device, level_old); - /* - * On some buggy laptops, _BQC returns an uninitialized - * value when invoked for the first time, i.e. - * level_old is invalid (no matter whether it's a level - * or an index). Set the backlight to max_level in this case. - */ - for (i = 2; i < br->count; i++) - if (level == br->levels[i]) - break; - if (i == br->count || !level) - level = max_level; - } + level = acpi_video_bqc_value_to_level(device, level_old); + /* + * On some buggy laptops, _BQC returns an uninitialized + * value when invoked for the first time, i.e. + * level_old is invalid (no matter whether it's a level + * or an index). Set the backlight to max_level in this case. + */ + for (i = 2; i < br->count; i++) + if (level == br->levels[i]) + break; + if (i == br->count || !level) + level = max_level; set_level: result = acpi_video_device_lcd_set_level(device, level); diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index e2903d03180e..14f1e9506338 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -435,6 +435,8 @@ static const struct pci_device_id ahci_pci_tbl[] = { .driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3), .driver_data = board_ahci_yes_fbs }, + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230), + .driver_data = board_ahci_yes_fbs }, /* Promise */ { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c index f9554318504f..4b231baceb09 100644 --- a/drivers/ata/ahci_platform.c +++ b/drivers/ata/ahci_platform.c @@ -329,6 +329,7 @@ static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_suspend, ahci_resume); static const struct of_device_id ahci_of_match[] = { { .compatible = "snps,spear-ahci", }, { .compatible = "snps,exynos5440-ahci", }, + { .compatible = "ibm,476gtr-ahci", }, {}, }; MODULE_DEVICE_TABLE(of, ahci_of_match); diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index ab714d2ad978..4372cfa883c9 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c @@ -185,7 +185,7 @@ void ata_acpi_bind_port(struct ata_port *ap) if (libata_noacpi || ap->flags & ATA_FLAG_ACPI_SATA || !host_handle) return; - ACPI_HANDLE_SET(&ap->tdev, acpi_get_child(host_handle, ap->port_no)); + acpi_preset_companion(&ap->tdev, host_handle, ap->port_no); if (ata_acpi_gtm(ap, &ap->__acpi_init_gtm) == 0) ap->pflags |= ATA_PFLAG_INIT_GTM_VALID; @@ -222,7 +222,7 @@ void ata_acpi_bind_dev(struct ata_device *dev) parent_handle = port_handle; } - ACPI_HANDLE_SET(&dev->tdev, acpi_get_child(parent_handle, adr)); + acpi_preset_companion(&dev->tdev, parent_handle, adr); register_hotplug_dock_device(ata_dev_acpi_handle(dev), &ata_acpi_dev_dock_ops, dev, NULL, NULL); diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 81a94a3919db..75b93678bbcd 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -6304,10 +6304,9 @@ static void ata_port_detach(struct ata_port *ap) for (i = 0; i < SATA_PMP_MAX_PORTS; i++) ata_tlink_delete(&ap->pmp_link[i]); } - ata_tport_delete(ap); - /* remove the associated SCSI host */ scsi_remove_host(ap->scsi_host); + ata_tport_delete(ap); } /** diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index db6dfcfa3e2e..ab58556d347c 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -3625,6 +3625,7 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht) shost->max_lun = 1; shost->max_channel = 1; shost->max_cmd_len = 16; + shost->no_write_same = 1; /* Schedule policy is determined by ->qc_defer() * callback and it needs to see every deferred qc. diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c index 68f9e3293e9c..88949c6d55dd 100644 --- a/drivers/ata/libata-zpodd.c +++ b/drivers/ata/libata-zpodd.c @@ -88,15 +88,13 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev) static bool odd_can_poweroff(struct ata_device *ata_dev) { acpi_handle handle; - acpi_status status; struct acpi_device *acpi_dev; handle = ata_dev_acpi_handle(ata_dev); if (!handle) return false; - status = acpi_bus_get_device(handle, &acpi_dev); - if (ACPI_FAILURE(status)) + if (acpi_bus_get_device(handle, &acpi_dev)) return false; return acpi_device_can_poweroff(acpi_dev); diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c index 853f610af28f..73492dd4a4bc 100644 --- a/drivers/ata/pata_arasan_cf.c +++ b/drivers/ata/pata_arasan_cf.c @@ -319,6 +319,7 @@ static int cf_init(struct arasan_cf_dev *acdev) ret = clk_set_rate(acdev->clk, 166000000); if (ret) { dev_warn(acdev->host->dev, "clock set rate failed"); + clk_disable_unprepare(acdev->clk); return ret; } @@ -396,8 +397,7 @@ dma_xfer(struct arasan_cf_dev *acdev, dma_addr_t src, dma_addr_t dest, u32 len) struct dma_async_tx_descriptor *tx; struct dma_chan *chan = acdev->dma_chan; dma_cookie_t cookie; - unsigned long flags = DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP | - DMA_COMPL_SKIP_DEST_UNMAP; + unsigned long flags = DMA_PREP_INTERRUPT; int ret = 0; tx = chan->device->device_prep_dma_memcpy(chan, dest, src, len, flags); diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index 272f00927761..1bdf104e90bb 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c @@ -3511,7 +3511,7 @@ static int init_card(struct atm_dev *dev) tmp = dev_get_by_name(&init_net, tname); /* jhs: was "tmp = dev_get(tname);" */ if (tmp) { memcpy(card->atmdev->esi, tmp->dev_addr, 6); - + dev_put(tmp); printk("%s: ESI %pM\n", card->name, card->atmdev->esi); } /* diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 47051cd25113..3a94b799f166 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -432,7 +432,7 @@ struct platform_device *platform_device_register_full( goto err_alloc; pdev->dev.parent = pdevinfo->parent; - ACPI_HANDLE_SET(&pdev->dev, pdevinfo->acpi_node.handle); + ACPI_COMPANION_SET(&pdev->dev, pdevinfo->acpi_node.companion); if (pdevinfo->dma_mask) { /* @@ -463,7 +463,7 @@ struct platform_device *platform_device_register_full( ret = platform_device_add(pdev); if (ret) { err: - ACPI_HANDLE_SET(&pdev->dev, NULL); + ACPI_COMPANION_SET(&pdev->dev, NULL); kfree(pdev->dev.dma_mask); err_alloc: diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index c12e9b9556be..1b41fca3d65a 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -1350,6 +1350,9 @@ static int device_prepare(struct device *dev, pm_message_t state) device_unlock(dev); + if (error) + pm_runtime_put(dev); + return error; } diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c index 98745dd77e8c..81f977510775 100644 --- a/drivers/base/regmap/regmap-mmio.c +++ b/drivers/base/regmap/regmap-mmio.c @@ -40,7 +40,7 @@ static int regmap_mmio_gather_write(void *context, BUG_ON(reg_size != 4); - if (ctx->clk) { + if (!IS_ERR(ctx->clk)) { ret = clk_enable(ctx->clk); if (ret < 0) return ret; @@ -73,7 +73,7 @@ static int regmap_mmio_gather_write(void *context, offset += ctx->val_bytes; } - if (ctx->clk) + if (!IS_ERR(ctx->clk)) clk_disable(ctx->clk); return 0; @@ -96,7 +96,7 @@ static int regmap_mmio_read(void *context, BUG_ON(reg_size != 4); - if (ctx->clk) { + if (!IS_ERR(ctx->clk)) { ret = clk_enable(ctx->clk); if (ret < 0) return ret; @@ -129,7 +129,7 @@ static int regmap_mmio_read(void *context, offset += ctx->val_bytes; } - if (ctx->clk) + if (!IS_ERR(ctx->clk)) clk_disable(ctx->clk); return 0; @@ -139,7 +139,7 @@ static void regmap_mmio_free_context(void *context) { struct regmap_mmio_context *ctx = context; - if (ctx->clk) { + if (!IS_ERR(ctx->clk)) { clk_unprepare(ctx->clk); clk_put(ctx->clk); } @@ -209,6 +209,7 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev, ctx->regs = regs; ctx->val_bytes = config->val_bits / 8; + ctx->clk = ERR_PTR(-ENODEV); if (clk_id == NULL) return ctx; diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 9c021d9cace0..c2e002100949 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -1549,7 +1549,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, val + (i * val_bytes), val_bytes); if (ret != 0) - return ret; + goto out; } } else { ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count); @@ -1743,7 +1743,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg, /** * regmap_read(): Read a value from a single register * - * @map: Register map to write to + * @map: Register map to read from * @reg: Register to be read from * @val: Pointer to store read value * @@ -1770,7 +1770,7 @@ EXPORT_SYMBOL_GPL(regmap_read); /** * regmap_raw_read(): Read raw data from the device * - * @map: Register map to write to + * @map: Register map to read from * @reg: First register to be read from * @val: Pointer to store read value * @val_len: Size of data to read @@ -1882,7 +1882,7 @@ EXPORT_SYMBOL_GPL(regmap_fields_read); /** * regmap_bulk_read(): Read multiple registers from the device * - * @map: Register map to write to + * @map: Register map to read from * @reg: First register to be read from * @val: Pointer to store read value, in native register size for device * @val_count: Number of registers to read diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index b5d842370cc9..f370fc13aea5 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -223,7 +223,7 @@ static void null_softirq_done_fn(struct request *rq) blk_end_request_all(rq, 0); } -#if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS) +#ifdef CONFIG_SMP static void null_ipi_cmd_end_io(void *data) { @@ -260,7 +260,7 @@ static void null_cmd_end_ipi(struct nullb_cmd *cmd) put_cpu(); } -#endif /* CONFIG_SMP && CONFIG_USE_GENERIC_SMP_HELPERS */ +#endif /* CONFIG_SMP */ static inline void null_handle_cmd(struct nullb_cmd *cmd) { @@ -270,7 +270,7 @@ static inline void null_handle_cmd(struct nullb_cmd *cmd) end_cmd(cmd); break; case NULL_IRQ_SOFTIRQ: -#if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS) +#ifdef CONFIG_SMP null_cmd_end_ipi(cmd); #else end_cmd(cmd); @@ -495,23 +495,23 @@ static int null_add_dev(void) spin_lock_init(&nullb->lock); + if (queue_mode == NULL_Q_MQ && use_per_node_hctx) + submit_queues = nr_online_nodes; + if (setup_queues(nullb)) goto err; if (queue_mode == NULL_Q_MQ) { null_mq_reg.numa_node = home_node; null_mq_reg.queue_depth = hw_queue_depth; + null_mq_reg.nr_hw_queues = submit_queues; if (use_per_node_hctx) { null_mq_reg.ops->alloc_hctx = null_alloc_hctx; null_mq_reg.ops->free_hctx = null_free_hctx; - - null_mq_reg.nr_hw_queues = nr_online_nodes; } else { null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue; null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue; - - null_mq_reg.nr_hw_queues = submit_queues; } nullb->q = blk_mq_init_queue(&null_mq_reg, nullb); @@ -571,7 +571,7 @@ static int __init null_init(void) { unsigned int i; -#if !defined(CONFIG_SMP) || !defined(CONFIG_USE_GENERIC_SMP_HELPERS) +#if !defined(CONFIG_SMP) if (irqmode == NULL_IRQ_SOFTIRQ) { pr_warn("null_blk: softirq completions not available.\n"); pr_warn("null_blk: using direct completions.\n"); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 588479d58f52..6a680d4de7f1 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -199,15 +199,16 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) spin_lock_irqsave(&vblk->vq_lock, flags); if (__virtblk_add_req(vblk->vq, vbr, vbr->sg, num) < 0) { + virtqueue_kick(vblk->vq); spin_unlock_irqrestore(&vblk->vq_lock, flags); blk_mq_stop_hw_queue(hctx); - virtqueue_kick(vblk->vq); return BLK_MQ_RQ_QUEUE_BUSY; } - spin_unlock_irqrestore(&vblk->vq_lock, flags); if (last) virtqueue_kick(vblk->vq); + + spin_unlock_irqrestore(&vblk->vq_lock, flags); return BLK_MQ_RQ_QUEUE_OK; } diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 432db1b59b00..c4a4c9006288 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -489,7 +489,7 @@ static int blkif_queue_request(struct request *req) if ((ring_req->operation == BLKIF_OP_INDIRECT) && (i % SEGS_PER_INDIRECT_FRAME == 0)) { - unsigned long pfn; + unsigned long uninitialized_var(pfn); if (segments) kunmap_atomic(segments); @@ -2011,6 +2011,10 @@ static void blkif_release(struct gendisk *disk, fmode_t mode) bdev = bdget_disk(disk, 0); + if (!bdev) { + WARN(1, "Block device %s yanked out from us!\n", disk->disk_name); + goto out_mutex; + } if (bdev->bd_openers) goto out; @@ -2041,6 +2045,7 @@ static void blkif_release(struct gendisk *disk, fmode_t mode) out: bdput(bdev); +out_mutex: mutex_unlock(&blkfront_mutex); } diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index c206de2951f2..2f2b08457c67 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -165,6 +165,19 @@ config HW_RANDOM_OMAP If unsure, say Y. +config HW_RANDOM_OMAP3_ROM + tristate "OMAP3 ROM Random Number Generator support" + depends on HW_RANDOM && ARCH_OMAP3 + default HW_RANDOM + ---help--- + This driver provides kernel-side support for the Random Number + Generator hardware found on OMAP34xx processors. + + To compile this driver as a module, choose M here: the + module will be called omap3-rom-rng. + + If unsure, say Y. + config HW_RANDOM_OCTEON tristate "Octeon Random Number Generator support" depends on HW_RANDOM && CAVIUM_OCTEON_SOC @@ -327,3 +340,15 @@ config HW_RANDOM_TPM module will be called tpm-rng. If unsure, say Y. + +config HW_RANDOM_MSM + tristate "Qualcomm MSM Random Number Generator support" + depends on HW_RANDOM && ARCH_MSM + ---help--- + This driver provides kernel-side support for the Random Number + Generator hardware found on Qualcomm MSM SoCs. + + To compile this driver as a module, choose M here. the + module will be called msm-rng. + + If unsure, say Y. diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index d7d2435ff7fa..3ae7755a52e7 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile @@ -15,6 +15,7 @@ n2-rng-y := n2-drv.o n2-asm.o obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o +obj-$(CONFIG_HW_RANDOM_OMAP3_ROM) += omap3-rom-rng.o obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o @@ -28,3 +29,4 @@ obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-rng.o obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o +obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o diff --git a/drivers/char/hw_random/msm-rng.c b/drivers/char/hw_random/msm-rng.c new file mode 100644 index 000000000000..148521e51dc6 --- /dev/null +++ b/drivers/char/hw_random/msm-rng.c @@ -0,0 +1,197 @@ +/* + * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/hw_random.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> + +/* Device specific register offsets */ +#define PRNG_DATA_OUT 0x0000 +#define PRNG_STATUS 0x0004 +#define PRNG_LFSR_CFG 0x0100 +#define PRNG_CONFIG 0x0104 + +/* Device specific register masks and config values */ +#define PRNG_LFSR_CFG_MASK 0x0000ffff +#define PRNG_LFSR_CFG_CLOCKS 0x0000dddd +#define PRNG_CONFIG_HW_ENABLE BIT(1) +#define PRNG_STATUS_DATA_AVAIL BIT(0) + +#define MAX_HW_FIFO_DEPTH 16 +#define MAX_HW_FIFO_SIZE (MAX_HW_FIFO_DEPTH * 4) +#define WORD_SZ 4 + +struct msm_rng { + void __iomem *base; + struct clk *clk; + struct hwrng hwrng; +}; + +#define to_msm_rng(p) container_of(p, struct msm_rng, hwrng) + +static int msm_rng_enable(struct hwrng *hwrng, int enable) +{ + struct msm_rng *rng = to_msm_rng(hwrng); + u32 val; + int ret; + + ret = clk_prepare_enable(rng->clk); + if (ret) + return ret; + + if (enable) { + /* Enable PRNG only if it is not already enabled */ + val = readl_relaxed(rng->base + PRNG_CONFIG); + if (val & PRNG_CONFIG_HW_ENABLE) + goto already_enabled; + + val = readl_relaxed(rng->base + PRNG_LFSR_CFG); + val &= ~PRNG_LFSR_CFG_MASK; + val |= PRNG_LFSR_CFG_CLOCKS; + writel(val, rng->base + PRNG_LFSR_CFG); + + val = readl_relaxed(rng->base + PRNG_CONFIG); + val |= PRNG_CONFIG_HW_ENABLE; + writel(val, rng->base + PRNG_CONFIG); + } else { + val = readl_relaxed(rng->base + PRNG_CONFIG); + val &= ~PRNG_CONFIG_HW_ENABLE; + writel(val, rng->base + PRNG_CONFIG); + } + +already_enabled: + clk_disable_unprepare(rng->clk); + return 0; +} + +static int msm_rng_read(struct hwrng *hwrng, void *data, size_t max, bool wait) +{ + struct msm_rng *rng = to_msm_rng(hwrng); + size_t currsize = 0; + u32 *retdata = data; + size_t maxsize; + int ret; + u32 val; + + /* calculate max size bytes to transfer back to caller */ + maxsize = min_t(size_t, MAX_HW_FIFO_SIZE, max); + + /* no room for word data */ + if (maxsize < WORD_SZ) + return 0; + + ret = clk_prepare_enable(rng->clk); + if (ret) + return ret; + + /* read random data from hardware */ + do { + val = readl_relaxed(rng->base + PRNG_STATUS); + if (!(val & PRNG_STATUS_DATA_AVAIL)) + break; + + val = readl_relaxed(rng->base + PRNG_DATA_OUT); + if (!val) + break; + + *retdata++ = val; + currsize += WORD_SZ; + + /* make sure we stay on 32bit boundary */ + if ((maxsize - currsize) < WORD_SZ) + break; + } while (currsize < maxsize); + + clk_disable_unprepare(rng->clk); + + return currsize; +} + +static int msm_rng_init(struct hwrng *hwrng) +{ + return msm_rng_enable(hwrng, 1); +} + +static void msm_rng_cleanup(struct hwrng *hwrng) +{ + msm_rng_enable(hwrng, 0); +} + +static int msm_rng_probe(struct platform_device *pdev) +{ + struct resource *res; + struct msm_rng *rng; + int ret; + + rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL); + if (!rng) + return -ENOMEM; + + platform_set_drvdata(pdev, rng); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + rng->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(rng->base)) + return PTR_ERR(rng->base); + + rng->clk = devm_clk_get(&pdev->dev, "core"); + if (IS_ERR(rng->clk)) + return PTR_ERR(rng->clk); + + rng->hwrng.name = KBUILD_MODNAME, + rng->hwrng.init = msm_rng_init, + rng->hwrng.cleanup = msm_rng_cleanup, + rng->hwrng.read = msm_rng_read, + + ret = hwrng_register(&rng->hwrng); + if (ret) { + dev_err(&pdev->dev, "failed to register hwrng\n"); + return ret; + } + + return 0; +} + +static int msm_rng_remove(struct platform_device *pdev) +{ + struct msm_rng *rng = platform_get_drvdata(pdev); + + hwrng_unregister(&rng->hwrng); + return 0; +} + +static const struct of_device_id msm_rng_of_match[] = { + { .compatible = "qcom,prng", }, + {} +}; +MODULE_DEVICE_TABLE(of, msm_rng_of_match); + +static struct platform_driver msm_rng_driver = { + .probe = msm_rng_probe, + .remove = msm_rng_remove, + .driver = { + .name = KBUILD_MODNAME, + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(msm_rng_of_match), + } +}; +module_platform_driver(msm_rng_driver); + +MODULE_ALIAS("platform:" KBUILD_MODNAME); +MODULE_AUTHOR("The Linux Foundation"); +MODULE_DESCRIPTION("Qualcomm MSM random number generator driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c new file mode 100644 index 000000000000..c853e9e68573 --- /dev/null +++ b/drivers/char/hw_random/omap3-rom-rng.c @@ -0,0 +1,141 @@ +/* + * omap3-rom-rng.c - RNG driver for TI OMAP3 CPU family + * + * Copyright (C) 2009 Nokia Corporation + * Author: Juha Yrjola <juha.yrjola@solidboot.com> + * + * Copyright (C) 2013 Pali Rohár <pali.rohar@gmail.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/random.h> +#include <linux/hw_random.h> +#include <linux/timer.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/platform_device.h> + +#define RNG_RESET 0x01 +#define RNG_GEN_PRNG_HW_INIT 0x02 +#define RNG_GEN_HW 0x08 + +/* param1: ptr, param2: count, param3: flag */ +static u32 (*omap3_rom_rng_call)(u32, u32, u32); + +static struct timer_list idle_timer; +static int rng_idle; +static struct clk *rng_clk; + +static void omap3_rom_rng_idle(unsigned long data) +{ + int r; + + r = omap3_rom_rng_call(0, 0, RNG_RESET); + if (r != 0) { + pr_err("reset failed: %d\n", r); + return; + } + clk_disable_unprepare(rng_clk); + rng_idle = 1; +} + +static int omap3_rom_rng_get_random(void *buf, unsigned int count) +{ + u32 r; + u32 ptr; + + del_timer_sync(&idle_timer); + if (rng_idle) { + clk_prepare_enable(rng_clk); + r = omap3_rom_rng_call(0, 0, RNG_GEN_PRNG_HW_INIT); + if (r != 0) { + clk_disable_unprepare(rng_clk); + pr_err("HW init failed: %d\n", r); + return -EIO; + } + rng_idle = 0; + } + + ptr = virt_to_phys(buf); + r = omap3_rom_rng_call(ptr, count, RNG_GEN_HW); + mod_timer(&idle_timer, jiffies + msecs_to_jiffies(500)); + if (r != 0) + return -EINVAL; + return 0; +} + +static int omap3_rom_rng_data_present(struct hwrng *rng, int wait) +{ + return 1; +} + +static int omap3_rom_rng_data_read(struct hwrng *rng, u32 *data) +{ + int r; + + r = omap3_rom_rng_get_random(data, 4); + if (r < 0) + return r; + return 4; +} + +static struct hwrng omap3_rom_rng_ops = { + .name = "omap3-rom", + .data_present = omap3_rom_rng_data_present, + .data_read = omap3_rom_rng_data_read, +}; + +static int omap3_rom_rng_probe(struct platform_device *pdev) +{ + pr_info("initializing\n"); + + omap3_rom_rng_call = pdev->dev.platform_data; + if (!omap3_rom_rng_call) { + pr_err("omap3_rom_rng_call is NULL\n"); + return -EINVAL; + } + + setup_timer(&idle_timer, omap3_rom_rng_idle, 0); + rng_clk = clk_get(&pdev->dev, "ick"); + if (IS_ERR(rng_clk)) { + pr_err("unable to get RNG clock\n"); + return PTR_ERR(rng_clk); + } + + /* Leave the RNG in reset state. */ + clk_prepare_enable(rng_clk); + omap3_rom_rng_idle(0); + + return hwrng_register(&omap3_rom_rng_ops); +} + +static int omap3_rom_rng_remove(struct platform_device *pdev) +{ + hwrng_unregister(&omap3_rom_rng_ops); + clk_disable_unprepare(rng_clk); + clk_put(rng_clk); + return 0; +} + +static struct platform_driver omap3_rom_rng_driver = { + .driver = { + .name = "omap3-rom-rng", + .owner = THIS_MODULE, + }, + .probe = omap3_rom_rng_probe, + .remove = omap3_rom_rng_remove, +}; + +module_platform_driver(omap3_rom_rng_driver); + +MODULE_ALIAS("platform:omap3-rom-rng"); +MODULE_AUTHOR("Juha Yrjola"); +MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c index b761459a3436..ab7ffdec0ec3 100644 --- a/drivers/char/hw_random/pseries-rng.c +++ b/drivers/char/hw_random/pseries-rng.c @@ -24,7 +24,6 @@ #include <linux/hw_random.h> #include <asm/vio.h> -#define MODULE_NAME "pseries-rng" static int pseries_rng_data_read(struct hwrng *rng, u32 *data) { @@ -55,7 +54,7 @@ static unsigned long pseries_rng_get_desired_dma(struct vio_dev *vdev) }; static struct hwrng pseries_rng = { - .name = MODULE_NAME, + .name = KBUILD_MODNAME, .data_read = pseries_rng_data_read, }; @@ -78,7 +77,7 @@ static struct vio_device_id pseries_rng_driver_ids[] = { MODULE_DEVICE_TABLE(vio, pseries_rng_driver_ids); static struct vio_driver pseries_rng_driver = { - .name = MODULE_NAME, + .name = KBUILD_MODNAME, .probe = pseries_rng_probe, .remove = pseries_rng_remove, .get_desired_dma = pseries_rng_get_desired_dma, diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c index e737772ad69a..de5a6dcfb3e2 100644 --- a/drivers/char/hw_random/via-rng.c +++ b/drivers/char/hw_random/via-rng.c @@ -221,7 +221,7 @@ static void __exit mod_exit(void) module_init(mod_init); module_exit(mod_exit); -static struct x86_cpu_id via_rng_cpu_id[] = { +static struct x86_cpu_id __maybe_unused via_rng_cpu_id[] = { X86_FEATURE_MATCH(X86_FEATURE_XSTORE), {} }; diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c index 40cc0cf2ded6..e6939e13e338 100644 --- a/drivers/char/i8k.c +++ b/drivers/char/i8k.c @@ -664,6 +664,13 @@ static struct dmi_system_id __initdata i8k_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Vostro"), }, }, + { + .ident = "Dell XPS421", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "XPS L421X"), + }, + }, { } }; diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig index 94c0c74434ea..1a65838888cd 100644 --- a/drivers/char/tpm/Kconfig +++ b/drivers/char/tpm/Kconfig @@ -33,6 +33,15 @@ config TCG_TIS from within Linux. To compile this driver as a module, choose M here; the module will be called tpm_tis. +config TCG_TIS_I2C_ATMEL + tristate "TPM Interface Specification 1.2 Interface (I2C - Atmel)" + depends on I2C + ---help--- + If you have an Atmel I2C TPM security chip say Yes and it will be + accessible from within Linux. + To compile this driver as a module, choose M here; the module will + be called tpm_tis_i2c_atmel. + config TCG_TIS_I2C_INFINEON tristate "TPM Interface Specification 1.2 Interface (I2C - Infineon)" depends on I2C @@ -42,7 +51,17 @@ config TCG_TIS_I2C_INFINEON Specification 0.20 say Yes and it will be accessible from within Linux. To compile this driver as a module, choose M here; the module - will be called tpm_tis_i2c_infineon. + will be called tpm_i2c_infineon. + +config TCG_TIS_I2C_NUVOTON + tristate "TPM Interface Specification 1.2 Interface (I2C - Nuvoton)" + depends on I2C + ---help--- + If you have a TPM security chip with an I2C interface from + Nuvoton Technology Corp. say Yes and it will be accessible + from within Linux. + To compile this driver as a module, choose M here; the module + will be called tpm_i2c_nuvoton. config TCG_NSC tristate "National Semiconductor TPM Interface" @@ -82,14 +101,14 @@ config TCG_IBMVTPM as a module, choose M here; the module will be called tpm_ibmvtpm. config TCG_ST33_I2C - tristate "STMicroelectronics ST33 I2C TPM" - depends on I2C - depends on GPIOLIB - ---help--- - If you have a TPM security chip from STMicroelectronics working with - an I2C bus say Yes and it will be accessible from within Linux. - To compile this driver as a module, choose M here; the module will be - called tpm_stm_st33_i2c. + tristate "STMicroelectronics ST33 I2C TPM" + depends on I2C + depends on GPIOLIB + ---help--- + If you have a TPM security chip from STMicroelectronics working with + an I2C bus say Yes and it will be accessible from within Linux. + To compile this driver as a module, choose M here; the module will be + called tpm_stm_st33_i2c. config TCG_XEN tristate "XEN TPM Interface" diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile index eb41ff97d0ad..b80a4000daee 100644 --- a/drivers/char/tpm/Makefile +++ b/drivers/char/tpm/Makefile @@ -2,17 +2,20 @@ # Makefile for the kernel tpm device drivers. # obj-$(CONFIG_TCG_TPM) += tpm.o +tpm-y := tpm-interface.o +tpm-$(CONFIG_ACPI) += tpm_ppi.o + ifdef CONFIG_ACPI - obj-$(CONFIG_TCG_TPM) += tpm_bios.o - tpm_bios-objs += tpm_eventlog.o tpm_acpi.o tpm_ppi.o + tpm-y += tpm_eventlog.o tpm_acpi.o else ifdef CONFIG_TCG_IBMVTPM - obj-$(CONFIG_TCG_TPM) += tpm_bios.o - tpm_bios-objs += tpm_eventlog.o tpm_of.o + tpm-y += tpm_eventlog.o tpm_of.o endif endif obj-$(CONFIG_TCG_TIS) += tpm_tis.o +obj-$(CONFIG_TCG_TIS_I2C_ATMEL) += tpm_i2c_atmel.o obj-$(CONFIG_TCG_TIS_I2C_INFINEON) += tpm_i2c_infineon.o +obj-$(CONFIG_TCG_TIS_I2C_NUVOTON) += tpm_i2c_nuvoton.o obj-$(CONFIG_TCG_NSC) += tpm_nsc.o obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm-interface.c index e3c974a6c522..6ae41d337630 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm-interface.c @@ -10,13 +10,13 @@ * Maintained by: <tpmdd-devel@lists.sourceforge.net> * * Device driver for TCG/TCPA TPM (trusted platform module). - * Specifications at www.trustedcomputinggroup.org + * Specifications at www.trustedcomputinggroup.org * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. - * + * * Note, the TPM chip is not interrupt driven (only polling) * and can have very long timeouts (minutes!). Hence the unusual * calls to msleep. @@ -371,13 +371,14 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf, return -ENODATA; if (count > bufsiz) { dev_err(chip->dev, - "invalid count value %x %zx \n", count, bufsiz); + "invalid count value %x %zx\n", count, bufsiz); return -E2BIG; } mutex_lock(&chip->tpm_mutex); - if ((rc = chip->vendor.send(chip, (u8 *) buf, count)) < 0) { + rc = chip->vendor.send(chip, (u8 *) buf, count); + if (rc < 0) { dev_err(chip->dev, "tpm_transmit: tpm_send: error %zd\n", rc); goto out; @@ -444,7 +445,7 @@ static ssize_t transmit_cmd(struct tpm_chip *chip, struct tpm_cmd_t *cmd, { int err; - len = tpm_transmit(chip,(u8 *) cmd, len); + len = tpm_transmit(chip, (u8 *) cmd, len); if (len < 0) return len; else if (len < TPM_HEADER_SIZE) @@ -658,7 +659,7 @@ static int tpm_continue_selftest(struct tpm_chip *chip) return rc; } -ssize_t tpm_show_enabled(struct device * dev, struct device_attribute * attr, +ssize_t tpm_show_enabled(struct device *dev, struct device_attribute *attr, char *buf) { cap_t cap; @@ -674,7 +675,7 @@ ssize_t tpm_show_enabled(struct device * dev, struct device_attribute * attr, } EXPORT_SYMBOL_GPL(tpm_show_enabled); -ssize_t tpm_show_active(struct device * dev, struct device_attribute * attr, +ssize_t tpm_show_active(struct device *dev, struct device_attribute *attr, char *buf) { cap_t cap; @@ -690,7 +691,7 @@ ssize_t tpm_show_active(struct device * dev, struct device_attribute * attr, } EXPORT_SYMBOL_GPL(tpm_show_active); -ssize_t tpm_show_owned(struct device * dev, struct device_attribute * attr, +ssize_t tpm_show_owned(struct device *dev, struct device_attribute *attr, char *buf) { cap_t cap; @@ -706,8 +707,8 @@ ssize_t tpm_show_owned(struct device * dev, struct device_attribute * attr, } EXPORT_SYMBOL_GPL(tpm_show_owned); -ssize_t tpm_show_temp_deactivated(struct device * dev, - struct device_attribute * attr, char *buf) +ssize_t tpm_show_temp_deactivated(struct device *dev, + struct device_attribute *attr, char *buf) { cap_t cap; ssize_t rc; @@ -769,10 +770,10 @@ static int __tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf) /** * tpm_pcr_read - read a pcr value - * @chip_num: tpm idx # or ANY + * @chip_num: tpm idx # or ANY * @pcr_idx: pcr idx to retrieve - * @res_buf: TPM_PCR value - * size of res_buf is 20 bytes (or NULL if you don't care) + * @res_buf: TPM_PCR value + * size of res_buf is 20 bytes (or NULL if you don't care) * * The TPM driver should be built-in, but for whatever reason it * isn't, protect against the chip disappearing, by incrementing @@ -794,9 +795,9 @@ EXPORT_SYMBOL_GPL(tpm_pcr_read); /** * tpm_pcr_extend - extend pcr value with hash - * @chip_num: tpm idx # or AN& + * @chip_num: tpm idx # or AN& * @pcr_idx: pcr idx to extend - * @hash: hash value used to extend pcr value + * @hash: hash value used to extend pcr value * * The TPM driver should be built-in, but for whatever reason it * isn't, protect against the chip disappearing, by incrementing @@ -847,8 +848,7 @@ int tpm_do_selftest(struct tpm_chip *chip) unsigned long duration; struct tpm_cmd_t cmd; - duration = tpm_calc_ordinal_duration(chip, - TPM_ORD_CONTINUE_SELFTEST); + duration = tpm_calc_ordinal_duration(chip, TPM_ORD_CONTINUE_SELFTEST); loops = jiffies_to_msecs(duration) / delay_msec; @@ -965,12 +965,12 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr, if (err) goto out; - /* + /* ignore header 10 bytes algorithm 32 bits (1 == RSA ) encscheme 16 bits sigscheme 16 bits - parameters (RSA 12->bytes: keybit, #primes, expbit) + parameters (RSA 12->bytes: keybit, #primes, expbit) keylenbytes 32 bits 256 byte modulus ignore checksum 20 bytes @@ -1020,43 +1020,33 @@ ssize_t tpm_show_caps(struct device *dev, struct device_attribute *attr, str += sprintf(str, "Manufacturer: 0x%x\n", be32_to_cpu(cap.manufacturer_id)); - rc = tpm_getcap(dev, CAP_VERSION_1_1, &cap, - "attempting to determine the 1.1 version"); - if (rc) - return 0; - str += sprintf(str, - "TCG version: %d.%d\nFirmware version: %d.%d\n", - cap.tpm_version.Major, cap.tpm_version.Minor, - cap.tpm_version.revMajor, cap.tpm_version.revMinor); - return str - buf; -} -EXPORT_SYMBOL_GPL(tpm_show_caps); - -ssize_t tpm_show_caps_1_2(struct device * dev, - struct device_attribute * attr, char *buf) -{ - cap_t cap; - ssize_t rc; - char *str = buf; - - rc = tpm_getcap(dev, TPM_CAP_PROP_MANUFACTURER, &cap, - "attempting to determine the manufacturer"); - if (rc) - return 0; - str += sprintf(str, "Manufacturer: 0x%x\n", - be32_to_cpu(cap.manufacturer_id)); + /* Try to get a TPM version 1.2 TPM_CAP_VERSION_INFO */ rc = tpm_getcap(dev, CAP_VERSION_1_2, &cap, "attempting to determine the 1.2 version"); - if (rc) - return 0; - str += sprintf(str, - "TCG version: %d.%d\nFirmware version: %d.%d\n", - cap.tpm_version_1_2.Major, cap.tpm_version_1_2.Minor, - cap.tpm_version_1_2.revMajor, - cap.tpm_version_1_2.revMinor); + if (!rc) { + str += sprintf(str, + "TCG version: %d.%d\nFirmware version: %d.%d\n", + cap.tpm_version_1_2.Major, + cap.tpm_version_1_2.Minor, + cap.tpm_version_1_2.revMajor, + cap.tpm_version_1_2.revMinor); + } else { + /* Otherwise just use TPM_STRUCT_VER */ + rc = tpm_getcap(dev, CAP_VERSION_1_1, &cap, + "attempting to determine the 1.1 version"); + if (rc) + return 0; + str += sprintf(str, + "TCG version: %d.%d\nFirmware version: %d.%d\n", + cap.tpm_version.Major, + cap.tpm_version.Minor, + cap.tpm_version.revMajor, + cap.tpm_version.revMinor); + } + return str - buf; } -EXPORT_SYMBOL_GPL(tpm_show_caps_1_2); +EXPORT_SYMBOL_GPL(tpm_show_caps); ssize_t tpm_show_durations(struct device *dev, struct device_attribute *attr, char *buf) @@ -1102,8 +1092,8 @@ ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr, } EXPORT_SYMBOL_GPL(tpm_store_cancel); -static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask, bool check_cancel, - bool *canceled) +static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask, + bool check_cancel, bool *canceled) { u8 status = chip->vendor.status(chip); @@ -1170,38 +1160,25 @@ EXPORT_SYMBOL_GPL(wait_for_tpm_stat); */ int tpm_open(struct inode *inode, struct file *file) { - int minor = iminor(inode); - struct tpm_chip *chip = NULL, *pos; - - rcu_read_lock(); - list_for_each_entry_rcu(pos, &tpm_chip_list, list) { - if (pos->vendor.miscdev.minor == minor) { - chip = pos; - get_device(chip->dev); - break; - } - } - rcu_read_unlock(); - - if (!chip) - return -ENODEV; + struct miscdevice *misc = file->private_data; + struct tpm_chip *chip = container_of(misc, struct tpm_chip, + vendor.miscdev); if (test_and_set_bit(0, &chip->is_open)) { dev_dbg(chip->dev, "Another process owns this TPM\n"); - put_device(chip->dev); return -EBUSY; } chip->data_buffer = kzalloc(TPM_BUFSIZE, GFP_KERNEL); if (chip->data_buffer == NULL) { clear_bit(0, &chip->is_open); - put_device(chip->dev); return -ENOMEM; } atomic_set(&chip->data_pending, 0); file->private_data = chip; + get_device(chip->dev); return 0; } EXPORT_SYMBOL_GPL(tpm_open); @@ -1463,7 +1440,6 @@ void tpm_dev_vendor_release(struct tpm_chip *chip) chip->vendor.release(chip->dev); clear_bit(chip->dev_num, dev_mask); - kfree(chip->vendor.miscdev.name); } EXPORT_SYMBOL_GPL(tpm_dev_vendor_release); @@ -1487,7 +1463,7 @@ void tpm_dev_release(struct device *dev) EXPORT_SYMBOL_GPL(tpm_dev_release); /* - * Called from tpm_<specific>.c probe function only for devices + * Called from tpm_<specific>.c probe function only for devices * the driver has determined it should claim. Prior to calling * this function the specific probe function has called pci_enable_device * upon errant exit from this function specific probe function should call @@ -1496,17 +1472,13 @@ EXPORT_SYMBOL_GPL(tpm_dev_release); struct tpm_chip *tpm_register_hardware(struct device *dev, const struct tpm_vendor_specific *entry) { -#define DEVNAME_SIZE 7 - - char *devname; struct tpm_chip *chip; /* Driver specific per-device data */ chip = kzalloc(sizeof(*chip), GFP_KERNEL); - devname = kmalloc(DEVNAME_SIZE, GFP_KERNEL); - if (chip == NULL || devname == NULL) - goto out_free; + if (chip == NULL) + return NULL; mutex_init(&chip->buffer_mutex); mutex_init(&chip->tpm_mutex); @@ -1531,8 +1503,9 @@ struct tpm_chip *tpm_register_hardware(struct device *dev, set_bit(chip->dev_num, dev_mask); - scnprintf(devname, DEVNAME_SIZE, "%s%d", "tpm", chip->dev_num); - chip->vendor.miscdev.name = devname; + scnprintf(chip->devname, sizeof(chip->devname), "%s%d", "tpm", + chip->dev_num); + chip->vendor.miscdev.name = chip->devname; chip->vendor.miscdev.parent = dev; chip->dev = get_device(dev); @@ -1558,7 +1531,7 @@ struct tpm_chip *tpm_register_hardware(struct device *dev, goto put_device; } - chip->bios_dir = tpm_bios_log_setup(devname); + chip->bios_dir = tpm_bios_log_setup(chip->devname); /* Make chip available */ spin_lock(&driver_lock); @@ -1571,7 +1544,6 @@ put_device: put_device(chip->dev); out_free: kfree(chip); - kfree(devname); return NULL; } EXPORT_SYMBOL_GPL(tpm_register_hardware); diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index a7bfc176ed43..f32847872193 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -59,8 +59,6 @@ extern ssize_t tpm_show_pcrs(struct device *, struct device_attribute *attr, char *); extern ssize_t tpm_show_caps(struct device *, struct device_attribute *attr, char *); -extern ssize_t tpm_show_caps_1_2(struct device *, struct device_attribute *attr, - char *); extern ssize_t tpm_store_cancel(struct device *, struct device_attribute *attr, const char *, size_t); extern ssize_t tpm_show_enabled(struct device *, struct device_attribute *attr, @@ -122,6 +120,7 @@ struct tpm_chip { struct device *dev; /* Device stuff */ int dev_num; /* /dev/tpm# */ + char devname[7]; unsigned long is_open; /* only one allowed */ int time_expired; diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c index 99d6820c611d..c9a528d25d22 100644 --- a/drivers/char/tpm/tpm_atmel.c +++ b/drivers/char/tpm/tpm_atmel.c @@ -202,7 +202,7 @@ static int __init init_atmel(void) have_region = (atmel_request_region - (tpm_atmel.base, region_size, "tpm_atmel0") == NULL) ? 0 : 1; + (base, region_size, "tpm_atmel0") == NULL) ? 0 : 1; pdev = platform_device_register_simple("tpm_atmel", -1, NULL, 0); if (IS_ERR(pdev)) { diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c index 84ddc557b8f8..59f7cb28260b 100644 --- a/drivers/char/tpm/tpm_eventlog.c +++ b/drivers/char/tpm/tpm_eventlog.c @@ -406,7 +406,6 @@ out_tpm: out: return NULL; } -EXPORT_SYMBOL_GPL(tpm_bios_log_setup); void tpm_bios_log_teardown(struct dentry **lst) { @@ -415,5 +414,3 @@ void tpm_bios_log_teardown(struct dentry **lst) for (i = 0; i < 3; i++) securityfs_remove(lst[i]); } -EXPORT_SYMBOL_GPL(tpm_bios_log_teardown); -MODULE_LICENSE("GPL"); diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c new file mode 100644 index 000000000000..c3cd7fe481a1 --- /dev/null +++ b/drivers/char/tpm/tpm_i2c_atmel.c @@ -0,0 +1,284 @@ +/* + * ATMEL I2C TPM AT97SC3204T + * + * Copyright (C) 2012 V Lab Technologies + * Teddy Reed <teddy@prosauce.org> + * Copyright (C) 2013, Obsidian Research Corp. + * Jason Gunthorpe <jgunthorpe@obsidianresearch.com> + * Device driver for ATMEL I2C TPMs. + * + * Teddy Reed determined the basic I2C command flow, unlike other I2C TPM + * devices the raw TCG formatted TPM command data is written via I2C and then + * raw TCG formatted TPM command data is returned via I2C. + * + * TGC status/locality/etc functions seen in the LPC implementation do not + * seem to be present. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/>. + */ +#include <linux/init.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/slab.h> +#include <linux/i2c.h> +#include "tpm.h" + +#define I2C_DRIVER_NAME "tpm_i2c_atmel" + +#define TPM_I2C_SHORT_TIMEOUT 750 /* ms */ +#define TPM_I2C_LONG_TIMEOUT 2000 /* 2 sec */ + +#define ATMEL_STS_OK 1 + +struct priv_data { + size_t len; + /* This is the amount we read on the first try. 25 was chosen to fit a + * fair number of read responses in the buffer so a 2nd retry can be + * avoided in small message cases. */ + u8 buffer[sizeof(struct tpm_output_header) + 25]; +}; + +static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len) +{ + struct priv_data *priv = chip->vendor.priv; + struct i2c_client *client = to_i2c_client(chip->dev); + s32 status; + + priv->len = 0; + + if (len <= 2) + return -EIO; + + status = i2c_master_send(client, buf, len); + + dev_dbg(chip->dev, + "%s(buf=%*ph len=%0zx) -> sts=%d\n", __func__, + (int)min_t(size_t, 64, len), buf, len, status); + return status; +} + +static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count) +{ + struct priv_data *priv = chip->vendor.priv; + struct i2c_client *client = to_i2c_client(chip->dev); + struct tpm_output_header *hdr = + (struct tpm_output_header *)priv->buffer; + u32 expected_len; + int rc; + + if (priv->len == 0) + return -EIO; + + /* Get the message size from the message header, if we didn't get the + * whole message in read_status then we need to re-read the + * message. */ + expected_len = be32_to_cpu(hdr->length); + if (expected_len > count) + return -ENOMEM; + + if (priv->len >= expected_len) { + dev_dbg(chip->dev, + "%s early(buf=%*ph count=%0zx) -> ret=%d\n", __func__, + (int)min_t(size_t, 64, expected_len), buf, count, + expected_len); + memcpy(buf, priv->buffer, expected_len); + return expected_len; + } + + rc = i2c_master_recv(client, buf, expected_len); + dev_dbg(chip->dev, + "%s reread(buf=%*ph count=%0zx) -> ret=%d\n", __func__, + (int)min_t(size_t, 64, expected_len), buf, count, + expected_len); + return rc; +} + +static void i2c_atmel_cancel(struct tpm_chip *chip) +{ + dev_err(chip->dev, "TPM operation cancellation was requested, but is not supported"); +} + +static u8 i2c_atmel_read_status(struct tpm_chip *chip) +{ + struct priv_data *priv = chip->vendor.priv; + struct i2c_client *client = to_i2c_client(chip->dev); + int rc; + + /* The TPM fails the I2C read until it is ready, so we do the entire + * transfer here and buffer it locally. This way the common code can + * properly handle the timeouts. */ + priv->len = 0; + memset(priv->buffer, 0, sizeof(priv->buffer)); + + + /* Once the TPM has completed the command the command remains readable + * until another command is issued. */ + rc = i2c_master_recv(client, priv->buffer, sizeof(priv->buffer)); + dev_dbg(chip->dev, + "%s: sts=%d", __func__, rc); + if (rc <= 0) + return 0; + + priv->len = rc; + + return ATMEL_STS_OK; +} + +static const struct file_operations i2c_atmel_ops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .open = tpm_open, + .read = tpm_read, + .write = tpm_write, + .release = tpm_release, +}; + +static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL); +static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL); +static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL); +static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL); +static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL); +static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL); +static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL); +static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); +static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL); +static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); + +static struct attribute *i2c_atmel_attrs[] = { + &dev_attr_pubek.attr, + &dev_attr_pcrs.attr, + &dev_attr_enabled.attr, + &dev_attr_active.attr, + &dev_attr_owned.attr, + &dev_attr_temp_deactivated.attr, + &dev_attr_caps.attr, + &dev_attr_cancel.attr, + &dev_attr_durations.attr, + &dev_attr_timeouts.attr, + NULL, +}; + +static struct attribute_group i2c_atmel_attr_grp = { + .attrs = i2c_atmel_attrs +}; + +static bool i2c_atmel_req_canceled(struct tpm_chip *chip, u8 status) +{ + return 0; +} + +static const struct tpm_vendor_specific i2c_atmel = { + .status = i2c_atmel_read_status, + .recv = i2c_atmel_recv, + .send = i2c_atmel_send, + .cancel = i2c_atmel_cancel, + .req_complete_mask = ATMEL_STS_OK, + .req_complete_val = ATMEL_STS_OK, + .req_canceled = i2c_atmel_req_canceled, + .attr_group = &i2c_atmel_attr_grp, + .miscdev.fops = &i2c_atmel_ops, +}; + +static int i2c_atmel_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int rc; + struct tpm_chip *chip; + struct device *dev = &client->dev; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) + return -ENODEV; + + chip = tpm_register_hardware(dev, &i2c_atmel); + if (!chip) { + dev_err(dev, "%s() error in tpm_register_hardware\n", __func__); + return -ENODEV; + } + + chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data), + GFP_KERNEL); + + /* Default timeouts */ + chip->vendor.timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); + chip->vendor.timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT); + chip->vendor.timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); + chip->vendor.timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); + chip->vendor.irq = 0; + + /* There is no known way to probe for this device, and all version + * information seems to be read via TPM commands. Thus we rely on the + * TPM startup process in the common code to detect the device. */ + if (tpm_get_timeouts(chip)) { + rc = -ENODEV; + goto out_err; + } + + if (tpm_do_selftest(chip)) { + rc = -ENODEV; + goto out_err; + } + + return 0; + +out_err: + tpm_dev_vendor_release(chip); + tpm_remove_hardware(chip->dev); + return rc; +} + +static int i2c_atmel_remove(struct i2c_client *client) +{ + struct device *dev = &(client->dev); + struct tpm_chip *chip = dev_get_drvdata(dev); + + if (chip) + tpm_dev_vendor_release(chip); + tpm_remove_hardware(dev); + kfree(chip); + return 0; +} + +static const struct i2c_device_id i2c_atmel_id[] = { + {I2C_DRIVER_NAME, 0}, + {} +}; +MODULE_DEVICE_TABLE(i2c, i2c_atmel_id); + +#ifdef CONFIG_OF +static const struct of_device_id i2c_atmel_of_match[] = { + {.compatible = "atmel,at97sc3204t"}, + {}, +}; +MODULE_DEVICE_TABLE(of, i2c_atmel_of_match); +#endif + +static SIMPLE_DEV_PM_OPS(i2c_atmel_pm_ops, tpm_pm_suspend, tpm_pm_resume); + +static struct i2c_driver i2c_atmel_driver = { + .id_table = i2c_atmel_id, + .probe = i2c_atmel_probe, + .remove = i2c_atmel_remove, + .driver = { + .name = I2C_DRIVER_NAME, + .owner = THIS_MODULE, + .pm = &i2c_atmel_pm_ops, + .of_match_table = of_match_ptr(i2c_atmel_of_match), + }, +}; + +module_i2c_driver(i2c_atmel_driver); + +MODULE_AUTHOR("Jason Gunthorpe <jgunthorpe@obsidianresearch.com>"); +MODULE_DESCRIPTION("Atmel TPM I2C Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c index b8735de8ce95..fefd2aa5c81e 100644 --- a/drivers/char/tpm/tpm_i2c_infineon.c +++ b/drivers/char/tpm/tpm_i2c_infineon.c @@ -581,7 +581,7 @@ static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL); static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL); static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL); static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL); -static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL); +static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL); static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL); static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); @@ -685,7 +685,6 @@ out_vendor: chip->dev->release = NULL; chip->release = NULL; tpm_dev.client = NULL; - dev_set_drvdata(chip->dev, chip); out_err: return rc; } @@ -766,7 +765,6 @@ static int tpm_tis_i2c_remove(struct i2c_client *client) chip->dev->release = NULL; chip->release = NULL; tpm_dev.client = NULL; - dev_set_drvdata(chip->dev, chip); return 0; } diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c new file mode 100644 index 000000000000..6276fea01ff0 --- /dev/null +++ b/drivers/char/tpm/tpm_i2c_nuvoton.c @@ -0,0 +1,710 @@ +/****************************************************************************** + * Nuvoton TPM I2C Device Driver Interface for WPCT301/NPCT501, + * based on the TCG TPM Interface Spec version 1.2. + * Specifications at www.trustedcomputinggroup.org + * + * Copyright (C) 2011, Nuvoton Technology Corporation. + * Dan Morav <dan.morav@nuvoton.com> + * Copyright (C) 2013, Obsidian Research Corp. + * Jason Gunthorpe <jgunthorpe@obsidianresearch.com> + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/>. + * + * Nuvoton contact information: APC.Support@nuvoton.com + *****************************************************************************/ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/wait.h> +#include <linux/i2c.h> +#include "tpm.h" + +/* I2C interface offsets */ +#define TPM_STS 0x00 +#define TPM_BURST_COUNT 0x01 +#define TPM_DATA_FIFO_W 0x20 +#define TPM_DATA_FIFO_R 0x40 +#define TPM_VID_DID_RID 0x60 +/* TPM command header size */ +#define TPM_HEADER_SIZE 10 +#define TPM_RETRY 5 +/* + * I2C bus device maximum buffer size w/o counting I2C address or command + * i.e. max size required for I2C write is 34 = addr, command, 32 bytes data + */ +#define TPM_I2C_MAX_BUF_SIZE 32 +#define TPM_I2C_RETRY_COUNT 32 +#define TPM_I2C_BUS_DELAY 1 /* msec */ +#define TPM_I2C_RETRY_DELAY_SHORT 2 /* msec */ +#define TPM_I2C_RETRY_DELAY_LONG 10 /* msec */ + +#define I2C_DRIVER_NAME "tpm_i2c_nuvoton" + +struct priv_data { + unsigned int intrs; +}; + +static s32 i2c_nuvoton_read_buf(struct i2c_client *client, u8 offset, u8 size, + u8 *data) +{ + s32 status; + + status = i2c_smbus_read_i2c_block_data(client, offset, size, data); + dev_dbg(&client->dev, + "%s(offset=%u size=%u data=%*ph) -> sts=%d\n", __func__, + offset, size, (int)size, data, status); + return status; +} + +static s32 i2c_nuvoton_write_buf(struct i2c_client *client, u8 offset, u8 size, + u8 *data) +{ + s32 status; + + status = i2c_smbus_write_i2c_block_data(client, offset, size, data); + dev_dbg(&client->dev, + "%s(offset=%u size=%u data=%*ph) -> sts=%d\n", __func__, + offset, size, (int)size, data, status); + return status; +} + +#define TPM_STS_VALID 0x80 +#define TPM_STS_COMMAND_READY 0x40 +#define TPM_STS_GO 0x20 +#define TPM_STS_DATA_AVAIL 0x10 +#define TPM_STS_EXPECT 0x08 +#define TPM_STS_RESPONSE_RETRY 0x02 +#define TPM_STS_ERR_VAL 0x07 /* bit2...bit0 reads always 0 */ + +#define TPM_I2C_SHORT_TIMEOUT 750 /* ms */ +#define TPM_I2C_LONG_TIMEOUT 2000 /* 2 sec */ + +/* read TPM_STS register */ +static u8 i2c_nuvoton_read_status(struct tpm_chip *chip) +{ + struct i2c_client *client = to_i2c_client(chip->dev); + s32 status; + u8 data; + + status = i2c_nuvoton_read_buf(client, TPM_STS, 1, &data); + if (status <= 0) { + dev_err(chip->dev, "%s() error return %d\n", __func__, + status); + data = TPM_STS_ERR_VAL; + } + + return data; +} + +/* write byte to TPM_STS register */ +static s32 i2c_nuvoton_write_status(struct i2c_client *client, u8 data) +{ + s32 status; + int i; + + /* this causes the current command to be aborted */ + for (i = 0, status = -1; i < TPM_I2C_RETRY_COUNT && status < 0; i++) { + status = i2c_nuvoton_write_buf(client, TPM_STS, 1, &data); + msleep(TPM_I2C_BUS_DELAY); + } + return status; +} + +/* write commandReady to TPM_STS register */ +static void i2c_nuvoton_ready(struct tpm_chip *chip) +{ + struct i2c_client *client = to_i2c_client(chip->dev); + s32 status; + + /* this causes the current command to be aborted */ + status = i2c_nuvoton_write_status(client, TPM_STS_COMMAND_READY); + if (status < 0) + dev_err(chip->dev, + "%s() fail to write TPM_STS.commandReady\n", __func__); +} + +/* read burstCount field from TPM_STS register + * return -1 on fail to read */ +static int i2c_nuvoton_get_burstcount(struct i2c_client *client, + struct tpm_chip *chip) +{ + unsigned long stop = jiffies + chip->vendor.timeout_d; + s32 status; + int burst_count = -1; + u8 data; + + /* wait for burstcount to be non-zero */ + do { + /* in I2C burstCount is 1 byte */ + status = i2c_nuvoton_read_buf(client, TPM_BURST_COUNT, 1, + &data); + if (status > 0 && data > 0) { + burst_count = min_t(u8, TPM_I2C_MAX_BUF_SIZE, data); + break; + } + msleep(TPM_I2C_BUS_DELAY); + } while (time_before(jiffies, stop)); + + return burst_count; +} + +/* + * WPCT301/NPCT501 SINT# supports only dataAvail + * any call to this function which is not waiting for dataAvail will + * set queue to NULL to avoid waiting for interrupt + */ +static bool i2c_nuvoton_check_status(struct tpm_chip *chip, u8 mask, u8 value) +{ + u8 status = i2c_nuvoton_read_status(chip); + return (status != TPM_STS_ERR_VAL) && ((status & mask) == value); +} + +static int i2c_nuvoton_wait_for_stat(struct tpm_chip *chip, u8 mask, u8 value, + u32 timeout, wait_queue_head_t *queue) +{ + if (chip->vendor.irq && queue) { + s32 rc; + DEFINE_WAIT(wait); + struct priv_data *priv = chip->vendor.priv; + unsigned int cur_intrs = priv->intrs; + + enable_irq(chip->vendor.irq); + rc = wait_event_interruptible_timeout(*queue, + cur_intrs != priv->intrs, + timeout); + if (rc > 0) + return 0; + /* At this point we know that the SINT pin is asserted, so we + * do not need to do i2c_nuvoton_check_status */ + } else { + unsigned long ten_msec, stop; + bool status_valid; + + /* check current status */ + status_valid = i2c_nuvoton_check_status(chip, mask, value); + if (status_valid) + return 0; + + /* use polling to wait for the event */ + ten_msec = jiffies + msecs_to_jiffies(TPM_I2C_RETRY_DELAY_LONG); + stop = jiffies + timeout; + do { + if (time_before(jiffies, ten_msec)) + msleep(TPM_I2C_RETRY_DELAY_SHORT); + else + msleep(TPM_I2C_RETRY_DELAY_LONG); + status_valid = i2c_nuvoton_check_status(chip, mask, + value); + if (status_valid) + return 0; + } while (time_before(jiffies, stop)); + } + dev_err(chip->dev, "%s(%02x, %02x) -> timeout\n", __func__, mask, + value); + return -ETIMEDOUT; +} + +/* wait for dataAvail field to be set in the TPM_STS register */ +static int i2c_nuvoton_wait_for_data_avail(struct tpm_chip *chip, u32 timeout, + wait_queue_head_t *queue) +{ + return i2c_nuvoton_wait_for_stat(chip, + TPM_STS_DATA_AVAIL | TPM_STS_VALID, + TPM_STS_DATA_AVAIL | TPM_STS_VALID, + timeout, queue); +} + +/* Read @count bytes into @buf from TPM_RD_FIFO register */ +static int i2c_nuvoton_recv_data(struct i2c_client *client, + struct tpm_chip *chip, u8 *buf, size_t count) +{ + s32 rc; + int burst_count, bytes2read, size = 0; + + while (size < count && + i2c_nuvoton_wait_for_data_avail(chip, + chip->vendor.timeout_c, + &chip->vendor.read_queue) == 0) { + burst_count = i2c_nuvoton_get_burstcount(client, chip); + if (burst_count < 0) { + dev_err(chip->dev, + "%s() fail to read burstCount=%d\n", __func__, + burst_count); + return -EIO; + } + bytes2read = min_t(size_t, burst_count, count - size); + rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_R, + bytes2read, &buf[size]); + if (rc < 0) { + dev_err(chip->dev, + "%s() fail on i2c_nuvoton_read_buf()=%d\n", + __func__, rc); + return -EIO; + } + dev_dbg(chip->dev, "%s(%d):", __func__, bytes2read); + size += bytes2read; + } + + return size; +} + +/* Read TPM command results */ +static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) +{ + struct device *dev = chip->dev; + struct i2c_client *client = to_i2c_client(dev); + s32 rc; + int expected, status, burst_count, retries, size = 0; + + if (count < TPM_HEADER_SIZE) { + i2c_nuvoton_ready(chip); /* return to idle */ + dev_err(dev, "%s() count < header size\n", __func__); + return -EIO; + } + for (retries = 0; retries < TPM_RETRY; retries++) { + if (retries > 0) { + /* if this is not the first trial, set responseRetry */ + i2c_nuvoton_write_status(client, + TPM_STS_RESPONSE_RETRY); + } + /* + * read first available (> 10 bytes), including: + * tag, paramsize, and result + */ + status = i2c_nuvoton_wait_for_data_avail( + chip, chip->vendor.timeout_c, &chip->vendor.read_queue); + if (status != 0) { + dev_err(dev, "%s() timeout on dataAvail\n", __func__); + size = -ETIMEDOUT; + continue; + } + burst_count = i2c_nuvoton_get_burstcount(client, chip); + if (burst_count < 0) { + dev_err(dev, "%s() fail to get burstCount\n", __func__); + size = -EIO; + continue; + } + size = i2c_nuvoton_recv_data(client, chip, buf, + burst_count); + if (size < TPM_HEADER_SIZE) { + dev_err(dev, "%s() fail to read header\n", __func__); + size = -EIO; + continue; + } + /* + * convert number of expected bytes field from big endian 32 bit + * to machine native + */ + expected = be32_to_cpu(*(__be32 *) (buf + 2)); + if (expected > count) { + dev_err(dev, "%s() expected > count\n", __func__); + size = -EIO; + continue; + } + rc = i2c_nuvoton_recv_data(client, chip, &buf[size], + expected - size); + size += rc; + if (rc < 0 || size < expected) { + dev_err(dev, "%s() fail to read remainder of result\n", + __func__); + size = -EIO; + continue; + } + if (i2c_nuvoton_wait_for_stat( + chip, TPM_STS_VALID | TPM_STS_DATA_AVAIL, + TPM_STS_VALID, chip->vendor.timeout_c, + NULL)) { + dev_err(dev, "%s() error left over data\n", __func__); + size = -ETIMEDOUT; + continue; + } + break; + } + i2c_nuvoton_ready(chip); + dev_dbg(chip->dev, "%s() -> %d\n", __func__, size); + return size; +} + +/* + * Send TPM command. + * + * If interrupts are used (signaled by an irq set in the vendor structure) + * tpm.c can skip polling for the data to be available as the interrupt is + * waited for here + */ +static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len) +{ + struct device *dev = chip->dev; + struct i2c_client *client = to_i2c_client(dev); + u32 ordinal; + size_t count = 0; + int burst_count, bytes2write, retries, rc = -EIO; + + for (retries = 0; retries < TPM_RETRY; retries++) { + i2c_nuvoton_ready(chip); + if (i2c_nuvoton_wait_for_stat(chip, TPM_STS_COMMAND_READY, + TPM_STS_COMMAND_READY, + chip->vendor.timeout_b, NULL)) { + dev_err(dev, "%s() timeout on commandReady\n", + __func__); + rc = -EIO; + continue; + } + rc = 0; + while (count < len - 1) { + burst_count = i2c_nuvoton_get_burstcount(client, + chip); + if (burst_count < 0) { + dev_err(dev, "%s() fail get burstCount\n", + __func__); + rc = -EIO; + break; + } + bytes2write = min_t(size_t, burst_count, + len - 1 - count); + rc = i2c_nuvoton_write_buf(client, TPM_DATA_FIFO_W, + bytes2write, &buf[count]); + if (rc < 0) { + dev_err(dev, "%s() fail i2cWriteBuf\n", + __func__); + break; + } + dev_dbg(dev, "%s(%d):", __func__, bytes2write); + count += bytes2write; + rc = i2c_nuvoton_wait_for_stat(chip, + TPM_STS_VALID | + TPM_STS_EXPECT, + TPM_STS_VALID | + TPM_STS_EXPECT, + chip->vendor.timeout_c, + NULL); + if (rc < 0) { + dev_err(dev, "%s() timeout on Expect\n", + __func__); + rc = -ETIMEDOUT; + break; + } + } + if (rc < 0) + continue; + + /* write last byte */ + rc = i2c_nuvoton_write_buf(client, TPM_DATA_FIFO_W, 1, + &buf[count]); + if (rc < 0) { + dev_err(dev, "%s() fail to write last byte\n", + __func__); + rc = -EIO; + continue; + } + dev_dbg(dev, "%s(last): %02x", __func__, buf[count]); + rc = i2c_nuvoton_wait_for_stat(chip, + TPM_STS_VALID | TPM_STS_EXPECT, + TPM_STS_VALID, + chip->vendor.timeout_c, NULL); + if (rc) { + dev_err(dev, "%s() timeout on Expect to clear\n", + __func__); + rc = -ETIMEDOUT; + continue; + } + break; + } + if (rc < 0) { + /* retries == TPM_RETRY */ + i2c_nuvoton_ready(chip); + return rc; + } + /* execute the TPM command */ + rc = i2c_nuvoton_write_status(client, TPM_STS_GO); + if (rc < 0) { + dev_err(dev, "%s() fail to write Go\n", __func__); + i2c_nuvoton_ready(chip); + return rc; + } + ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); + rc = i2c_nuvoton_wait_for_data_avail(chip, + tpm_calc_ordinal_duration(chip, + ordinal), + &chip->vendor.read_queue); + if (rc) { + dev_err(dev, "%s() timeout command duration\n", __func__); + i2c_nuvoton_ready(chip); + return rc; + } + + dev_dbg(dev, "%s() -> %zd\n", __func__, len); + return len; +} + +static bool i2c_nuvoton_req_canceled(struct tpm_chip *chip, u8 status) +{ + return (status == TPM_STS_COMMAND_READY); +} + +static const struct file_operations i2c_nuvoton_ops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .open = tpm_open, + .read = tpm_read, + .write = tpm_write, + .release = tpm_release, +}; + +static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL); +static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL); +static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL); +static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL); +static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL); +static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL); +static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL); +static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); +static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL); +static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); + +static struct attribute *i2c_nuvoton_attrs[] = { + &dev_attr_pubek.attr, + &dev_attr_pcrs.attr, + &dev_attr_enabled.attr, + &dev_attr_active.attr, + &dev_attr_owned.attr, + &dev_attr_temp_deactivated.attr, + &dev_attr_caps.attr, + &dev_attr_cancel.attr, + &dev_attr_durations.attr, + &dev_attr_timeouts.attr, + NULL, +}; + +static struct attribute_group i2c_nuvoton_attr_grp = { + .attrs = i2c_nuvoton_attrs +}; + +static const struct tpm_vendor_specific tpm_i2c = { + .status = i2c_nuvoton_read_status, + .recv = i2c_nuvoton_recv, + .send = i2c_nuvoton_send, + .cancel = i2c_nuvoton_ready, + .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID, + .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID, + .req_canceled = i2c_nuvoton_req_canceled, + .attr_group = &i2c_nuvoton_attr_grp, + .miscdev.fops = &i2c_nuvoton_ops, +}; + +/* The only purpose for the handler is to signal to any waiting threads that + * the interrupt is currently being asserted. The driver does not do any + * processing triggered by interrupts, and the chip provides no way to mask at + * the source (plus that would be slow over I2C). Run the IRQ as a one-shot, + * this means it cannot be shared. */ +static irqreturn_t i2c_nuvoton_int_handler(int dummy, void *dev_id) +{ + struct tpm_chip *chip = dev_id; + struct priv_data *priv = chip->vendor.priv; + + priv->intrs++; + wake_up(&chip->vendor.read_queue); + disable_irq_nosync(chip->vendor.irq); + return IRQ_HANDLED; +} + +static int get_vid(struct i2c_client *client, u32 *res) +{ + static const u8 vid_did_rid_value[] = { 0x50, 0x10, 0xfe }; + u32 temp; + s32 rc; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) + return -ENODEV; + rc = i2c_nuvoton_read_buf(client, TPM_VID_DID_RID, 4, (u8 *)&temp); + if (rc < 0) + return rc; + + /* check WPCT301 values - ignore RID */ + if (memcmp(&temp, vid_did_rid_value, sizeof(vid_did_rid_value))) { + /* + * f/w rev 2.81 has an issue where the VID_DID_RID is not + * reporting the right value. so give it another chance at + * offset 0x20 (FIFO_W). + */ + rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_W, 4, + (u8 *) (&temp)); + if (rc < 0) + return rc; + + /* check WPCT301 values - ignore RID */ + if (memcmp(&temp, vid_did_rid_value, + sizeof(vid_did_rid_value))) + return -ENODEV; + } + + *res = temp; + return 0; +} + +static int i2c_nuvoton_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int rc; + struct tpm_chip *chip; + struct device *dev = &client->dev; + u32 vid = 0; + + rc = get_vid(client, &vid); + if (rc) + return rc; + + dev_info(dev, "VID: %04X DID: %02X RID: %02X\n", (u16) vid, + (u8) (vid >> 16), (u8) (vid >> 24)); + + chip = tpm_register_hardware(dev, &tpm_i2c); + if (!chip) { + dev_err(dev, "%s() error in tpm_register_hardware\n", __func__); + return -ENODEV; + } + + chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data), + GFP_KERNEL); + init_waitqueue_head(&chip->vendor.read_queue); + init_waitqueue_head(&chip->vendor.int_queue); + + /* Default timeouts */ + chip->vendor.timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); + chip->vendor.timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT); + chip->vendor.timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); + chip->vendor.timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); + + /* + * I2C intfcaps (interrupt capabilitieis) in the chip are hard coded to: + * TPM_INTF_INT_LEVEL_LOW | TPM_INTF_DATA_AVAIL_INT + * The IRQ should be set in the i2c_board_info (which is done + * automatically in of_i2c_register_devices, for device tree users */ + chip->vendor.irq = client->irq; + + if (chip->vendor.irq) { + dev_dbg(dev, "%s() chip-vendor.irq\n", __func__); + rc = devm_request_irq(dev, chip->vendor.irq, + i2c_nuvoton_int_handler, + IRQF_TRIGGER_LOW, + chip->vendor.miscdev.name, + chip); + if (rc) { + dev_err(dev, "%s() Unable to request irq: %d for use\n", + __func__, chip->vendor.irq); + chip->vendor.irq = 0; + } else { + /* Clear any pending interrupt */ + i2c_nuvoton_ready(chip); + /* - wait for TPM_STS==0xA0 (stsValid, commandReady) */ + rc = i2c_nuvoton_wait_for_stat(chip, + TPM_STS_COMMAND_READY, + TPM_STS_COMMAND_READY, + chip->vendor.timeout_b, + NULL); + if (rc == 0) { + /* + * TIS is in ready state + * write dummy byte to enter reception state + * TPM_DATA_FIFO_W <- rc (0) + */ + rc = i2c_nuvoton_write_buf(client, + TPM_DATA_FIFO_W, + 1, (u8 *) (&rc)); + if (rc < 0) + goto out_err; + /* TPM_STS <- 0x40 (commandReady) */ + i2c_nuvoton_ready(chip); + } else { + /* + * timeout_b reached - command was + * aborted. TIS should now be in idle state - + * only TPM_STS_VALID should be set + */ + if (i2c_nuvoton_read_status(chip) != + TPM_STS_VALID) { + rc = -EIO; + goto out_err; + } + } + } + } + + if (tpm_get_timeouts(chip)) { + rc = -ENODEV; + goto out_err; + } + + if (tpm_do_selftest(chip)) { + rc = -ENODEV; + goto out_err; + } + + return 0; + +out_err: + tpm_dev_vendor_release(chip); + tpm_remove_hardware(chip->dev); + return rc; +} + +static int i2c_nuvoton_remove(struct i2c_client *client) +{ + struct device *dev = &(client->dev); + struct tpm_chip *chip = dev_get_drvdata(dev); + + if (chip) + tpm_dev_vendor_release(chip); + tpm_remove_hardware(dev); + kfree(chip); + return 0; +} + + +static const struct i2c_device_id i2c_nuvoton_id[] = { + {I2C_DRIVER_NAME, 0}, + {} +}; +MODULE_DEVICE_TABLE(i2c, i2c_nuvoton_id); + +#ifdef CONFIG_OF +static const struct of_device_id i2c_nuvoton_of_match[] = { + {.compatible = "nuvoton,npct501"}, + {.compatible = "winbond,wpct301"}, + {}, +}; +MODULE_DEVICE_TABLE(of, i2c_nuvoton_of_match); +#endif + +static SIMPLE_DEV_PM_OPS(i2c_nuvoton_pm_ops, tpm_pm_suspend, tpm_pm_resume); + +static struct i2c_driver i2c_nuvoton_driver = { + .id_table = i2c_nuvoton_id, + .probe = i2c_nuvoton_probe, + .remove = i2c_nuvoton_remove, + .driver = { + .name = I2C_DRIVER_NAME, + .owner = THIS_MODULE, + .pm = &i2c_nuvoton_pm_ops, + .of_match_table = of_match_ptr(i2c_nuvoton_of_match), + }, +}; + +module_i2c_driver(i2c_nuvoton_driver); + +MODULE_AUTHOR("Dan Morav (dan.morav@nuvoton.com)"); +MODULE_DESCRIPTION("Nuvoton TPM I2C Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/char/tpm/tpm_i2c_stm_st33.c b/drivers/char/tpm/tpm_i2c_stm_st33.c index 5bb8e2ddd3b3..a0d6ceb5d005 100644 --- a/drivers/char/tpm/tpm_i2c_stm_st33.c +++ b/drivers/char/tpm/tpm_i2c_stm_st33.c @@ -584,7 +584,7 @@ static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL); static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL); static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL); static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL); -static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL); +static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL); static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); static struct attribute *stm_tpm_attrs[] = { @@ -746,8 +746,6 @@ tpm_st33_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) tpm_get_timeouts(chip); - i2c_set_clientdata(client, chip); - dev_info(chip->dev, "TPM I2C Initialized\n"); return 0; _irq_set: @@ -807,24 +805,18 @@ static int tpm_st33_i2c_remove(struct i2c_client *client) #ifdef CONFIG_PM_SLEEP /* * tpm_st33_i2c_pm_suspend suspend the TPM device - * Added: Work around when suspend and no tpm application is running, suspend - * may fail because chip->data_buffer is not set (only set in tpm_open in Linux - * TPM core) * @param: client, the i2c_client drescription (TPM I2C description). * @param: mesg, the power management message. * @return: 0 in case of success. */ static int tpm_st33_i2c_pm_suspend(struct device *dev) { - struct tpm_chip *chip = dev_get_drvdata(dev); struct st33zp24_platform_data *pin_infos = dev->platform_data; int ret = 0; if (power_mgt) { gpio_set_value(pin_infos->io_lpcpd, 0); } else { - if (chip->data_buffer == NULL) - chip->data_buffer = pin_infos->tpm_i2c_buffer[0]; ret = tpm_pm_suspend(dev); } return ret; @@ -849,8 +841,6 @@ static int tpm_st33_i2c_pm_resume(struct device *dev) TPM_STS_VALID) == TPM_STS_VALID, chip->vendor.timeout_b); } else { - if (chip->data_buffer == NULL) - chip->data_buffer = pin_infos->tpm_i2c_buffer[0]; ret = tpm_pm_resume(dev); if (!ret) tpm_do_selftest(chip); diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c index 56b07c35a13e..2783a42aa732 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.c +++ b/drivers/char/tpm/tpm_ibmvtpm.c @@ -98,7 +98,7 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) if (count < len) { dev_err(ibmvtpm->dev, - "Invalid size in recv: count=%ld, crq_size=%d\n", + "Invalid size in recv: count=%zd, crq_size=%d\n", count, len); return -EIO; } @@ -136,7 +136,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) if (count > ibmvtpm->rtce_size) { dev_err(ibmvtpm->dev, - "Invalid size in send: count=%ld, rtce_size=%d\n", + "Invalid size in send: count=%zd, rtce_size=%d\n", count, ibmvtpm->rtce_size); return -EIO; } @@ -419,7 +419,7 @@ static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL); static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL); static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL); -static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL); +static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL); static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL); static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c index 2168d15bc728..8e562dc65601 100644 --- a/drivers/char/tpm/tpm_ppi.c +++ b/drivers/char/tpm/tpm_ppi.c @@ -452,12 +452,8 @@ int tpm_add_ppi(struct kobject *parent) { return sysfs_create_group(parent, &ppi_attr_grp); } -EXPORT_SYMBOL_GPL(tpm_add_ppi); void tpm_remove_ppi(struct kobject *parent) { sysfs_remove_group(parent, &ppi_attr_grp); } -EXPORT_SYMBOL_GPL(tpm_remove_ppi); - -MODULE_LICENSE("GPL"); diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index 5796d0157ce0..1b74459c0723 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -448,7 +448,7 @@ static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL); static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL); static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL); -static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL); +static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL); static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL); static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c index 94c280d36e8b..c8ff4df81779 100644 --- a/drivers/char/tpm/xen-tpmfront.c +++ b/drivers/char/tpm/xen-tpmfront.c @@ -351,8 +351,6 @@ static int tpmfront_probe(struct xenbus_device *dev, tpm_get_timeouts(priv->chip); - dev_set_drvdata(&dev->dev, priv->chip); - return rv; } diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index bdb953e15d2a..5c07a56962db 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -87,6 +87,7 @@ config ARM_ARCH_TIMER config ARM_ARCH_TIMER_EVTSTREAM bool "Support for ARM architected timer event stream generation" default y if ARM_ARCH_TIMER + depends on ARM_ARCH_TIMER help This option enables support for event stream generation based on the ARM architected timer. It is used for waking up CPUs executing diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c index 4aac9ee0d0c0..3cf12834681e 100644 --- a/drivers/clocksource/sh_mtu2.c +++ b/drivers/clocksource/sh_mtu2.c @@ -313,8 +313,20 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev) goto err1; } - return sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev), - cfg->clockevent_rating); + ret = clk_prepare(p->clk); + if (ret < 0) + goto err2; + + ret = sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev), + cfg->clockevent_rating); + if (ret < 0) + goto err3; + + return 0; + err3: + clk_unprepare(p->clk); + err2: + clk_put(p->clk); err1: iounmap(p->mapbase); err0: diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index 78b8dae49628..63557cda0a7d 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c @@ -472,12 +472,26 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev) ret = PTR_ERR(p->clk); goto err1; } + + ret = clk_prepare(p->clk); + if (ret < 0) + goto err2; + p->cs_enabled = false; p->enable_count = 0; - return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev), - cfg->clockevent_rating, - cfg->clocksource_rating); + ret = sh_tmu_register(p, (char *)dev_name(&p->pdev->dev), + cfg->clockevent_rating, + cfg->clocksource_rating); + if (ret < 0) + goto err3; + + return 0; + + err3: + clk_unprepare(p->clk); + err2: + clk_put(p->clk); err1: iounmap(p->mapbase); err0: diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c index c73fc2b74de2..18c5b9b16645 100644 --- a/drivers/connector/cn_proc.c +++ b/drivers/connector/cn_proc.c @@ -32,11 +32,23 @@ #include <linux/atomic.h> #include <linux/pid_namespace.h> -#include <asm/unaligned.h> - #include <linux/cn_proc.h> -#define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event)) +/* + * Size of a cn_msg followed by a proc_event structure. Since the + * sizeof struct cn_msg is a multiple of 4 bytes, but not 8 bytes, we + * add one 4-byte word to the size here, and then start the actual + * cn_msg structure 4 bytes into the stack buffer. The result is that + * the immediately following proc_event structure is aligned to 8 bytes. + */ +#define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event) + 4) + +/* See comment above; we test our assumption about sizeof struct cn_msg here. */ +static inline struct cn_msg *buffer_to_cn_msg(__u8 *buffer) +{ + BUILD_BUG_ON(sizeof(struct cn_msg) != 20); + return (struct cn_msg *)(buffer + 4); +} static atomic_t proc_event_num_listeners = ATOMIC_INIT(0); static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC }; @@ -56,19 +68,19 @@ void proc_fork_connector(struct task_struct *task) { struct cn_msg *msg; struct proc_event *ev; - __u8 buffer[CN_PROC_MSG_SIZE]; + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); struct timespec ts; struct task_struct *parent; if (atomic_read(&proc_event_num_listeners) < 1) return; - msg = (struct cn_msg *)buffer; + msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); + ev->timestamp_ns = timespec_to_ns(&ts); ev->what = PROC_EVENT_FORK; rcu_read_lock(); parent = rcu_dereference(task->real_parent); @@ -91,17 +103,17 @@ void proc_exec_connector(struct task_struct *task) struct cn_msg *msg; struct proc_event *ev; struct timespec ts; - __u8 buffer[CN_PROC_MSG_SIZE]; + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); if (atomic_read(&proc_event_num_listeners) < 1) return; - msg = (struct cn_msg *)buffer; + msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); + ev->timestamp_ns = timespec_to_ns(&ts); ev->what = PROC_EVENT_EXEC; ev->event_data.exec.process_pid = task->pid; ev->event_data.exec.process_tgid = task->tgid; @@ -117,14 +129,14 @@ void proc_id_connector(struct task_struct *task, int which_id) { struct cn_msg *msg; struct proc_event *ev; - __u8 buffer[CN_PROC_MSG_SIZE]; + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); struct timespec ts; const struct cred *cred; if (atomic_read(&proc_event_num_listeners) < 1) return; - msg = (struct cn_msg *)buffer; + msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); ev->what = which_id; @@ -145,7 +157,7 @@ void proc_id_connector(struct task_struct *task, int which_id) rcu_read_unlock(); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); + ev->timestamp_ns = timespec_to_ns(&ts); memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = 0; /* not used */ @@ -159,17 +171,17 @@ void proc_sid_connector(struct task_struct *task) struct cn_msg *msg; struct proc_event *ev; struct timespec ts; - __u8 buffer[CN_PROC_MSG_SIZE]; + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); if (atomic_read(&proc_event_num_listeners) < 1) return; - msg = (struct cn_msg *)buffer; + msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); + ev->timestamp_ns = timespec_to_ns(&ts); ev->what = PROC_EVENT_SID; ev->event_data.sid.process_pid = task->pid; ev->event_data.sid.process_tgid = task->tgid; @@ -186,17 +198,17 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id) struct cn_msg *msg; struct proc_event *ev; struct timespec ts; - __u8 buffer[CN_PROC_MSG_SIZE]; + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); if (atomic_read(&proc_event_num_listeners) < 1) return; - msg = (struct cn_msg *)buffer; + msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); + ev->timestamp_ns = timespec_to_ns(&ts); ev->what = PROC_EVENT_PTRACE; ev->event_data.ptrace.process_pid = task->pid; ev->event_data.ptrace.process_tgid = task->tgid; @@ -221,17 +233,17 @@ void proc_comm_connector(struct task_struct *task) struct cn_msg *msg; struct proc_event *ev; struct timespec ts; - __u8 buffer[CN_PROC_MSG_SIZE]; + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); if (atomic_read(&proc_event_num_listeners) < 1) return; - msg = (struct cn_msg *)buffer; + msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); + ev->timestamp_ns = timespec_to_ns(&ts); ev->what = PROC_EVENT_COMM; ev->event_data.comm.process_pid = task->pid; ev->event_data.comm.process_tgid = task->tgid; @@ -248,18 +260,18 @@ void proc_coredump_connector(struct task_struct *task) { struct cn_msg *msg; struct proc_event *ev; - __u8 buffer[CN_PROC_MSG_SIZE]; + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); struct timespec ts; if (atomic_read(&proc_event_num_listeners) < 1) return; - msg = (struct cn_msg *)buffer; + msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); + ev->timestamp_ns = timespec_to_ns(&ts); ev->what = PROC_EVENT_COREDUMP; ev->event_data.coredump.process_pid = task->pid; ev->event_data.coredump.process_tgid = task->tgid; @@ -275,18 +287,18 @@ void proc_exit_connector(struct task_struct *task) { struct cn_msg *msg; struct proc_event *ev; - __u8 buffer[CN_PROC_MSG_SIZE]; + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); struct timespec ts; if (atomic_read(&proc_event_num_listeners) < 1) return; - msg = (struct cn_msg *)buffer; + msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); + ev->timestamp_ns = timespec_to_ns(&ts); ev->what = PROC_EVENT_EXIT; ev->event_data.exit.process_pid = task->pid; ev->event_data.exit.process_tgid = task->tgid; @@ -312,18 +324,18 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack) { struct cn_msg *msg; struct proc_event *ev; - __u8 buffer[CN_PROC_MSG_SIZE]; + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); struct timespec ts; if (atomic_read(&proc_event_num_listeners) < 1) return; - msg = (struct cn_msg *)buffer; + msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); msg->seq = rcvd_seq; ktime_get_ts(&ts); /* get high res monotonic timestamp */ - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); + ev->timestamp_ns = timespec_to_ns(&ts); ev->cpu = -1; ev->what = PROC_EVENT_NONE; ev->event_data.ack.err = err; diff --git a/drivers/cpufreq/at32ap-cpufreq.c b/drivers/cpufreq/at32ap-cpufreq.c index 856ad80418ae..7c03dd84f66a 100644 --- a/drivers/cpufreq/at32ap-cpufreq.c +++ b/drivers/cpufreq/at32ap-cpufreq.c @@ -58,7 +58,7 @@ static int at32_set_target(struct cpufreq_policy *policy, unsigned int index) return 0; } -static int __init at32_cpufreq_driver_init(struct cpufreq_policy *policy) +static int at32_cpufreq_driver_init(struct cpufreq_policy *policy) { unsigned int frequency, rate, min_freq; int retval, steps, i; diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 218460fcd2e4..25a70d06c5bf 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -68,6 +68,9 @@ static void cs_check_cpu(int cpu, unsigned int load) dbs_info->requested_freq += get_freq_target(cs_tuners, policy); + if (dbs_info->requested_freq > policy->max) + dbs_info->requested_freq = policy->max; + __cpufreq_driver_target(policy, dbs_info->requested_freq, CPUFREQ_RELATION_H); return; diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 0806c31e5764..e6be63561fa6 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -328,10 +328,6 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, dbs_data->cdata->gov_dbs_timer); } - /* - * conservative does not implement micro like ondemand - * governor, thus we are bound to jiffes/HZ - */ if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { cs_dbs_info->down_skip = 0; cs_dbs_info->enable = 1; diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c index f2c75065ce19..dfd1643b0b2f 100644 --- a/drivers/cpufreq/exynos4210-cpufreq.c +++ b/drivers/cpufreq/exynos4210-cpufreq.c @@ -157,4 +157,3 @@ err_moutcore: pr_debug("%s: failed initialization\n", __func__); return -EINVAL; } -EXPORT_SYMBOL(exynos4210_cpufreq_init); diff --git a/drivers/cpufreq/exynos4x12-cpufreq.c b/drivers/cpufreq/exynos4x12-cpufreq.c index 8683304ce62c..efad5e657f6f 100644 --- a/drivers/cpufreq/exynos4x12-cpufreq.c +++ b/drivers/cpufreq/exynos4x12-cpufreq.c @@ -211,4 +211,3 @@ err_moutcore: pr_debug("%s: failed initialization\n", __func__); return -EINVAL; } -EXPORT_SYMBOL(exynos4x12_cpufreq_init); diff --git a/drivers/cpufreq/exynos5250-cpufreq.c b/drivers/cpufreq/exynos5250-cpufreq.c index 9fae466d7746..8feda86fe42c 100644 --- a/drivers/cpufreq/exynos5250-cpufreq.c +++ b/drivers/cpufreq/exynos5250-cpufreq.c @@ -236,4 +236,3 @@ err_moutcore: pr_err("%s: failed initialization\n", __func__); return -EINVAL; } -EXPORT_SYMBOL(exynos5250_cpufreq_init); diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index be6d14307aa8..a0acd0bfba40 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c @@ -53,6 +53,7 @@ static unsigned int omap_getspeed(unsigned int cpu) static int omap_target(struct cpufreq_policy *policy, unsigned int index) { + int r, ret; struct dev_pm_opp *opp; unsigned long freq, volt = 0, volt_old = 0, tol = 0; unsigned int old_freq, new_freq; diff --git a/drivers/cpufreq/tegra-cpufreq.c b/drivers/cpufreq/tegra-cpufreq.c index f42df7ec03c5..b7309c37033d 100644 --- a/drivers/cpufreq/tegra-cpufreq.c +++ b/drivers/cpufreq/tegra-cpufreq.c @@ -142,10 +142,8 @@ static int tegra_target(struct cpufreq_policy *policy, unsigned int index) mutex_lock(&tegra_cpu_lock); - if (is_suspended) { - ret = -EBUSY; + if (is_suspended) goto out; - } freq = freq_table[index].frequency; diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 2a991e468f78..a55e68f2cfc8 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -400,7 +400,7 @@ EXPORT_SYMBOL_GPL(cpuidle_register_device); */ void cpuidle_unregister_device(struct cpuidle_device *dev) { - if (dev->registered == 0) + if (!dev || dev->registered == 0) return; cpuidle_pause_and_lock(); diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig index ca89f6b84b06..e7555ff4cafd 100644 --- a/drivers/crypto/caam/Kconfig +++ b/drivers/crypto/caam/Kconfig @@ -4,16 +4,29 @@ config CRYPTO_DEV_FSL_CAAM help Enables the driver module for Freescale's Cryptographic Accelerator and Assurance Module (CAAM), also known as the SEC version 4 (SEC4). - This module adds a job ring operation interface, and configures h/w + This module creates job ring devices, and configures h/w to operate as a DPAA component automatically, depending on h/w feature availability. To compile this driver as a module, choose M here: the module will be called caam. +config CRYPTO_DEV_FSL_CAAM_JR + tristate "Freescale CAAM Job Ring driver backend" + depends on CRYPTO_DEV_FSL_CAAM + default y + help + Enables the driver module for Job Rings which are part of + Freescale's Cryptographic Accelerator + and Assurance Module (CAAM). This module adds a job ring operation + interface. + + To compile this driver as a module, choose M here: the module + will be called caam_jr. + config CRYPTO_DEV_FSL_CAAM_RINGSIZE int "Job Ring size" - depends on CRYPTO_DEV_FSL_CAAM + depends on CRYPTO_DEV_FSL_CAAM_JR range 2 9 default "9" help @@ -31,7 +44,7 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE config CRYPTO_DEV_FSL_CAAM_INTC bool "Job Ring interrupt coalescing" - depends on CRYPTO_DEV_FSL_CAAM + depends on CRYPTO_DEV_FSL_CAAM_JR default n help Enable the Job Ring's interrupt coalescing feature. @@ -62,7 +75,7 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD config CRYPTO_DEV_FSL_CAAM_CRYPTO_API tristate "Register algorithm implementations with the Crypto API" - depends on CRYPTO_DEV_FSL_CAAM + depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR default y select CRYPTO_ALGAPI select CRYPTO_AUTHENC @@ -76,7 +89,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API config CRYPTO_DEV_FSL_CAAM_AHASH_API tristate "Register hash algorithm implementations with Crypto API" - depends on CRYPTO_DEV_FSL_CAAM + depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR default y select CRYPTO_HASH help @@ -88,7 +101,7 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API config CRYPTO_DEV_FSL_CAAM_RNG_API tristate "Register caam device for hwrng API" - depends on CRYPTO_DEV_FSL_CAAM + depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR default y select CRYPTO_RNG select HW_RANDOM diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile index d56bd0ec65d8..550758a333e7 100644 --- a/drivers/crypto/caam/Makefile +++ b/drivers/crypto/caam/Makefile @@ -6,8 +6,10 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y) endif obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o -caam-objs := ctrl.o jr.o error.o key_gen.o +caam-objs := ctrl.o +caam_jr-objs := jr.o key_gen.o error.o diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 7c63b72ecd75..4cf5dec826e1 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -86,6 +86,7 @@ #else #define debug(format, arg...) #endif +static struct list_head alg_list; /* Set DK bit in class 1 operation if shared */ static inline void append_dec_op1(u32 *desc, u32 type) @@ -817,7 +818,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, ivsize, 1); print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst), - req->cryptlen, 1); + req->cryptlen - ctx->authsize, 1); #endif if (err) { @@ -971,12 +972,9 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, (edesc->src_nents ? : 1); in_options = LDST_SGF; } - if (encrypt) - append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + - req->cryptlen - authsize, in_options); - else - append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + - req->cryptlen, in_options); + + append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen, + in_options); if (likely(req->src == req->dst)) { if (all_contig) { @@ -997,7 +995,8 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, } } if (encrypt) - append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options); + append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize, + out_options); else append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize, out_options); @@ -1047,8 +1046,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents; in_options = LDST_SGF; } - append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + - req->cryptlen - authsize, in_options); + append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen, + in_options); if (contig & GIV_DST_CONTIG) { dst_dma = edesc->iv_dma; @@ -1065,7 +1064,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, } } - append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options); + append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize, + out_options); } /* @@ -1129,7 +1129,8 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, * allocate and map the aead extended descriptor */ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, - int desc_bytes, bool *all_contig_ptr) + int desc_bytes, bool *all_contig_ptr, + bool encrypt) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct caam_ctx *ctx = crypto_aead_ctx(aead); @@ -1144,12 +1145,22 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, bool assoc_chained = false, src_chained = false, dst_chained = false; int ivsize = crypto_aead_ivsize(aead); int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; + unsigned int authsize = ctx->authsize; assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); - src_nents = sg_count(req->src, req->cryptlen, &src_chained); - if (unlikely(req->dst != req->src)) - dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); + if (unlikely(req->dst != req->src)) { + src_nents = sg_count(req->src, req->cryptlen, &src_chained); + dst_nents = sg_count(req->dst, + req->cryptlen + + (encrypt ? authsize : (-authsize)), + &dst_chained); + } else { + src_nents = sg_count(req->src, + req->cryptlen + + (encrypt ? authsize : 0), + &src_chained); + } sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, DMA_TO_DEVICE, assoc_chained); @@ -1233,11 +1244,9 @@ static int aead_encrypt(struct aead_request *req) u32 *desc; int ret = 0; - req->cryptlen += ctx->authsize; - /* allocate extended descriptor */ edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * - CAAM_CMD_SZ, &all_contig); + CAAM_CMD_SZ, &all_contig, true); if (IS_ERR(edesc)) return PTR_ERR(edesc); @@ -1274,7 +1283,7 @@ static int aead_decrypt(struct aead_request *req) /* allocate extended descriptor */ edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * - CAAM_CMD_SZ, &all_contig); + CAAM_CMD_SZ, &all_contig, false); if (IS_ERR(edesc)) return PTR_ERR(edesc); @@ -1331,7 +1340,8 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request src_nents = sg_count(req->src, req->cryptlen, &src_chained); if (unlikely(req->dst != req->src)) - dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); + dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize, + &dst_chained); sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, DMA_TO_DEVICE, assoc_chained); @@ -1425,8 +1435,6 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq) u32 *desc; int ret = 0; - req->cryptlen += ctx->authsize; - /* allocate extended descriptor */ edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN * CAAM_CMD_SZ, &contig); @@ -2057,7 +2065,6 @@ static struct caam_alg_template driver_algs[] = { struct caam_crypto_alg { struct list_head entry; - struct device *ctrldev; int class1_alg_type; int class2_alg_type; int alg_op; @@ -2070,14 +2077,12 @@ static int caam_cra_init(struct crypto_tfm *tfm) struct caam_crypto_alg *caam_alg = container_of(alg, struct caam_crypto_alg, crypto_alg); struct caam_ctx *ctx = crypto_tfm_ctx(tfm); - struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev); - int tgt_jr = atomic_inc_return(&priv->tfm_count); - /* - * distribute tfms across job rings to ensure in-order - * crypto request processing per tfm - */ - ctx->jrdev = priv->jrdev[(tgt_jr / 2) % priv->total_jobrs]; + ctx->jrdev = caam_jr_alloc(); + if (IS_ERR(ctx->jrdev)) { + pr_err("Job Ring Device allocation for transform failed\n"); + return PTR_ERR(ctx->jrdev); + } /* copy descriptor header template value */ ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type; @@ -2104,44 +2109,26 @@ static void caam_cra_exit(struct crypto_tfm *tfm) dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma, desc_bytes(ctx->sh_desc_givenc), DMA_TO_DEVICE); + + caam_jr_free(ctx->jrdev); } static void __exit caam_algapi_exit(void) { - struct device_node *dev_node; - struct platform_device *pdev; - struct device *ctrldev; - struct caam_drv_private *priv; struct caam_crypto_alg *t_alg, *n; - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); - if (!dev_node) { - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); - if (!dev_node) - return; - } - - pdev = of_find_device_by_node(dev_node); - if (!pdev) - return; - - ctrldev = &pdev->dev; - of_node_put(dev_node); - priv = dev_get_drvdata(ctrldev); - - if (!priv->alg_list.next) + if (!alg_list.next) return; - list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { + list_for_each_entry_safe(t_alg, n, &alg_list, entry) { crypto_unregister_alg(&t_alg->crypto_alg); list_del(&t_alg->entry); kfree(t_alg); } } -static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev, - struct caam_alg_template +static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template *template) { struct caam_crypto_alg *t_alg; @@ -2149,7 +2136,7 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev, t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL); if (!t_alg) { - dev_err(ctrldev, "failed to allocate t_alg\n"); + pr_err("failed to allocate t_alg\n"); return ERR_PTR(-ENOMEM); } @@ -2181,62 +2168,39 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev, t_alg->class1_alg_type = template->class1_alg_type; t_alg->class2_alg_type = template->class2_alg_type; t_alg->alg_op = template->alg_op; - t_alg->ctrldev = ctrldev; return t_alg; } static int __init caam_algapi_init(void) { - struct device_node *dev_node; - struct platform_device *pdev; - struct device *ctrldev; - struct caam_drv_private *priv; int i = 0, err = 0; - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); - if (!dev_node) { - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); - if (!dev_node) - return -ENODEV; - } - - pdev = of_find_device_by_node(dev_node); - if (!pdev) - return -ENODEV; - - ctrldev = &pdev->dev; - priv = dev_get_drvdata(ctrldev); - of_node_put(dev_node); - - INIT_LIST_HEAD(&priv->alg_list); - - atomic_set(&priv->tfm_count, -1); + INIT_LIST_HEAD(&alg_list); /* register crypto algorithms the device supports */ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { /* TODO: check if h/w supports alg */ struct caam_crypto_alg *t_alg; - t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]); + t_alg = caam_alg_alloc(&driver_algs[i]); if (IS_ERR(t_alg)) { err = PTR_ERR(t_alg); - dev_warn(ctrldev, "%s alg allocation failed\n", - driver_algs[i].driver_name); + pr_warn("%s alg allocation failed\n", + driver_algs[i].driver_name); continue; } err = crypto_register_alg(&t_alg->crypto_alg); if (err) { - dev_warn(ctrldev, "%s alg registration failed\n", + pr_warn("%s alg registration failed\n", t_alg->crypto_alg.cra_driver_name); kfree(t_alg); } else - list_add_tail(&t_alg->entry, &priv->alg_list); + list_add_tail(&t_alg->entry, &alg_list); } - if (!list_empty(&priv->alg_list)) - dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n", - (char *)of_get_property(dev_node, "compatible", NULL)); + if (!list_empty(&alg_list)) + pr_info("caam algorithms registered in /proc/crypto\n"); return err; } diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index e732bd962e98..0378328f47a7 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -94,6 +94,9 @@ #define debug(format, arg...) #endif + +static struct list_head hash_list; + /* ahash per-session context */ struct caam_hash_ctx { struct device *jrdev; @@ -1653,7 +1656,6 @@ static struct caam_hash_template driver_hash[] = { struct caam_hash_alg { struct list_head entry; - struct device *ctrldev; int alg_type; int alg_op; struct ahash_alg ahash_alg; @@ -1670,7 +1672,6 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) struct caam_hash_alg *caam_hash = container_of(alg, struct caam_hash_alg, ahash_alg); struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); - struct caam_drv_private *priv = dev_get_drvdata(caam_hash->ctrldev); /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, HASH_MSG_LEN + SHA1_DIGEST_SIZE, @@ -1678,15 +1679,17 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) HASH_MSG_LEN + SHA256_DIGEST_SIZE, HASH_MSG_LEN + 64, HASH_MSG_LEN + SHA512_DIGEST_SIZE }; - int tgt_jr = atomic_inc_return(&priv->tfm_count); int ret = 0; /* - * distribute tfms across job rings to ensure in-order + * Get a Job ring from Job Ring driver to ensure in-order * crypto request processing per tfm */ - ctx->jrdev = priv->jrdev[tgt_jr % priv->total_jobrs]; - + ctx->jrdev = caam_jr_alloc(); + if (IS_ERR(ctx->jrdev)) { + pr_err("Job Ring Device allocation for transform failed\n"); + return PTR_ERR(ctx->jrdev); + } /* copy descriptor header template value */ ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op; @@ -1729,35 +1732,18 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm) !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma)) dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma, desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE); + + caam_jr_free(ctx->jrdev); } static void __exit caam_algapi_hash_exit(void) { - struct device_node *dev_node; - struct platform_device *pdev; - struct device *ctrldev; - struct caam_drv_private *priv; struct caam_hash_alg *t_alg, *n; - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); - if (!dev_node) { - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); - if (!dev_node) - return; - } - - pdev = of_find_device_by_node(dev_node); - if (!pdev) + if (!hash_list.next) return; - ctrldev = &pdev->dev; - of_node_put(dev_node); - priv = dev_get_drvdata(ctrldev); - - if (!priv->hash_list.next) - return; - - list_for_each_entry_safe(t_alg, n, &priv->hash_list, entry) { + list_for_each_entry_safe(t_alg, n, &hash_list, entry) { crypto_unregister_ahash(&t_alg->ahash_alg); list_del(&t_alg->entry); kfree(t_alg); @@ -1765,7 +1751,7 @@ static void __exit caam_algapi_hash_exit(void) } static struct caam_hash_alg * -caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template, +caam_hash_alloc(struct caam_hash_template *template, bool keyed) { struct caam_hash_alg *t_alg; @@ -1774,7 +1760,7 @@ caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template, t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL); if (!t_alg) { - dev_err(ctrldev, "failed to allocate t_alg\n"); + pr_err("failed to allocate t_alg\n"); return ERR_PTR(-ENOMEM); } @@ -1805,37 +1791,15 @@ caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template, t_alg->alg_type = template->alg_type; t_alg->alg_op = template->alg_op; - t_alg->ctrldev = ctrldev; return t_alg; } static int __init caam_algapi_hash_init(void) { - struct device_node *dev_node; - struct platform_device *pdev; - struct device *ctrldev; - struct caam_drv_private *priv; int i = 0, err = 0; - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); - if (!dev_node) { - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); - if (!dev_node) - return -ENODEV; - } - - pdev = of_find_device_by_node(dev_node); - if (!pdev) - return -ENODEV; - - ctrldev = &pdev->dev; - priv = dev_get_drvdata(ctrldev); - of_node_put(dev_node); - - INIT_LIST_HEAD(&priv->hash_list); - - atomic_set(&priv->tfm_count, -1); + INIT_LIST_HEAD(&hash_list); /* register crypto algorithms the device supports */ for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { @@ -1843,38 +1807,38 @@ static int __init caam_algapi_hash_init(void) struct caam_hash_alg *t_alg; /* register hmac version */ - t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], true); + t_alg = caam_hash_alloc(&driver_hash[i], true); if (IS_ERR(t_alg)) { err = PTR_ERR(t_alg); - dev_warn(ctrldev, "%s alg allocation failed\n", - driver_hash[i].driver_name); + pr_warn("%s alg allocation failed\n", + driver_hash[i].driver_name); continue; } err = crypto_register_ahash(&t_alg->ahash_alg); if (err) { - dev_warn(ctrldev, "%s alg registration failed\n", + pr_warn("%s alg registration failed\n", t_alg->ahash_alg.halg.base.cra_driver_name); kfree(t_alg); } else - list_add_tail(&t_alg->entry, &priv->hash_list); + list_add_tail(&t_alg->entry, &hash_list); /* register unkeyed version */ - t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], false); + t_alg = caam_hash_alloc(&driver_hash[i], false); if (IS_ERR(t_alg)) { err = PTR_ERR(t_alg); - dev_warn(ctrldev, "%s alg allocation failed\n", - driver_hash[i].driver_name); + pr_warn("%s alg allocation failed\n", + driver_hash[i].driver_name); continue; } err = crypto_register_ahash(&t_alg->ahash_alg); if (err) { - dev_warn(ctrldev, "%s alg registration failed\n", + pr_warn("%s alg registration failed\n", t_alg->ahash_alg.halg.base.cra_driver_name); kfree(t_alg); } else - list_add_tail(&t_alg->entry, &priv->hash_list); + list_add_tail(&t_alg->entry, &hash_list); } return err; diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c index d1939a9539c0..28486b19fc36 100644 --- a/drivers/crypto/caam/caamrng.c +++ b/drivers/crypto/caam/caamrng.c @@ -273,34 +273,23 @@ static struct hwrng caam_rng = { static void __exit caam_rng_exit(void) { + caam_jr_free(rng_ctx.jrdev); hwrng_unregister(&caam_rng); } static int __init caam_rng_init(void) { - struct device_node *dev_node; - struct platform_device *pdev; - struct device *ctrldev; - struct caam_drv_private *priv; - - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); - if (!dev_node) { - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); - if (!dev_node) - return -ENODEV; - } - - pdev = of_find_device_by_node(dev_node); - if (!pdev) - return -ENODEV; + struct device *dev; - ctrldev = &pdev->dev; - priv = dev_get_drvdata(ctrldev); - of_node_put(dev_node); + dev = caam_jr_alloc(); + if (IS_ERR(dev)) { + pr_err("Job Ring Device allocation for transform failed\n"); + return PTR_ERR(dev); + } - caam_init_rng(&rng_ctx, priv->jrdev[0]); + caam_init_rng(&rng_ctx, dev); - dev_info(priv->jrdev[0], "registering rng-caam\n"); + dev_info(dev, "registering rng-caam\n"); return hwrng_register(&caam_rng); } diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index bc6d820812b6..63fb1af2c431 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -16,82 +16,75 @@ #include "error.h" #include "ctrl.h" -static int caam_remove(struct platform_device *pdev) -{ - struct device *ctrldev; - struct caam_drv_private *ctrlpriv; - struct caam_drv_private_jr *jrpriv; - struct caam_full __iomem *topregs; - int ring, ret = 0; - - ctrldev = &pdev->dev; - ctrlpriv = dev_get_drvdata(ctrldev); - topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; - - /* shut down JobRs */ - for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) { - ret |= caam_jr_shutdown(ctrlpriv->jrdev[ring]); - jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]); - irq_dispose_mapping(jrpriv->irq); - } - - /* Shut down debug views */ -#ifdef CONFIG_DEBUG_FS - debugfs_remove_recursive(ctrlpriv->dfs_root); -#endif - - /* Unmap controller region */ - iounmap(&topregs->ctrl); - - kfree(ctrlpriv->jrdev); - kfree(ctrlpriv); - - return ret; -} - /* * Descriptor to instantiate RNG State Handle 0 in normal mode and * load the JDKEK, TDKEK and TDSK registers */ -static void build_instantiation_desc(u32 *desc) +static void build_instantiation_desc(u32 *desc, int handle, int do_sk) { - u32 *jump_cmd; + u32 *jump_cmd, op_flags; init_job_desc(desc, 0); + op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | + (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT; + /* INIT RNG in non-test mode */ - append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | - OP_ALG_AS_INIT); + append_operation(desc, op_flags); + + if (!handle && do_sk) { + /* + * For SH0, Secure Keys must be generated as well + */ + + /* wait for done */ + jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1); + set_jump_tgt_here(desc, jump_cmd); + + /* + * load 1 to clear written reg: + * resets the done interrrupt and returns the RNG to idle. + */ + append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW); + + /* Initialize State Handle */ + append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | + OP_ALG_AAI_RNG4_SK); + } - /* wait for done */ - jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1); - set_jump_tgt_here(desc, jump_cmd); + append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT); +} - /* - * load 1 to clear written reg: - * resets the done interrupt and returns the RNG to idle. - */ - append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW); +/* Descriptor for deinstantiation of State Handle 0 of the RNG block. */ +static void build_deinstantiation_desc(u32 *desc, int handle) +{ + init_job_desc(desc, 0); - /* generate secure keys (non-test) */ + /* Uninstantiate State Handle 0 */ append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | - OP_ALG_RNG4_SK); + (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INITFINAL); + + append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT); } -static int instantiate_rng(struct device *ctrldev) +/* + * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of + * the software (no JR/QI used). + * @ctrldev - pointer to device + * @status - descriptor status, after being run + * + * Return: - 0 if no error occurred + * - -ENODEV if the DECO couldn't be acquired + * - -EAGAIN if an error occurred while executing the descriptor + */ +static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc, + u32 *status) { struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); struct caam_full __iomem *topregs; unsigned int timeout = 100000; - u32 *desc; - int i, ret = 0; - - desc = kmalloc(CAAM_CMD_SZ * 6, GFP_KERNEL | GFP_DMA); - if (!desc) { - dev_err(ctrldev, "can't allocate RNG init descriptor memory\n"); - return -ENOMEM; - } - build_instantiation_desc(desc); + u32 deco_dbg_reg, flags; + int i; /* Set the bit to request direct access to DECO0 */ topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; @@ -103,36 +96,219 @@ static int instantiate_rng(struct device *ctrldev) if (!timeout) { dev_err(ctrldev, "failed to acquire DECO 0\n"); - ret = -EIO; - goto out; + clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE); + return -ENODEV; } for (i = 0; i < desc_len(desc); i++) - topregs->deco.descbuf[i] = *(desc + i); + wr_reg32(&topregs->deco.descbuf[i], *(desc + i)); + + flags = DECO_JQCR_WHL; + /* + * If the descriptor length is longer than 4 words, then the + * FOUR bit in JRCTRL register must be set. + */ + if (desc_len(desc) >= 4) + flags |= DECO_JQCR_FOUR; - wr_reg32(&topregs->deco.jr_ctl_hi, DECO_JQCR_WHL | DECO_JQCR_FOUR); + /* Instruct the DECO to execute it */ + wr_reg32(&topregs->deco.jr_ctl_hi, flags); timeout = 10000000; - while ((rd_reg32(&topregs->deco.desc_dbg) & DECO_DBG_VALID) && - --timeout) + do { + deco_dbg_reg = rd_reg32(&topregs->deco.desc_dbg); + /* + * If an error occured in the descriptor, then + * the DECO status field will be set to 0x0D + */ + if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) == + DESC_DBG_DECO_STAT_HOST_ERR) + break; cpu_relax(); + } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout); - if (!timeout) { - dev_err(ctrldev, "failed to instantiate RNG\n"); - ret = -EIO; - } + *status = rd_reg32(&topregs->deco.op_status_hi) & + DECO_OP_STATUS_HI_ERR_MASK; + /* Mark the DECO as free */ clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE); -out: + + if (!timeout) + return -EAGAIN; + + return 0; +} + +/* + * instantiate_rng - builds and executes a descriptor on DECO0, + * which initializes the RNG block. + * @ctrldev - pointer to device + * @state_handle_mask - bitmask containing the instantiation status + * for the RNG4 state handles which exist in + * the RNG4 block: 1 if it's been instantiated + * by an external entry, 0 otherwise. + * @gen_sk - generate data to be loaded into the JDKEK, TDKEK and TDSK; + * Caution: this can be done only once; if the keys need to be + * regenerated, a POR is required + * + * Return: - 0 if no error occurred + * - -ENOMEM if there isn't enough memory to allocate the descriptor + * - -ENODEV if DECO0 couldn't be acquired + * - -EAGAIN if an error occurred when executing the descriptor + * f.i. there was a RNG hardware error due to not "good enough" + * entropy being aquired. + */ +static int instantiate_rng(struct device *ctrldev, int state_handle_mask, + int gen_sk) +{ + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); + struct caam_full __iomem *topregs; + struct rng4tst __iomem *r4tst; + u32 *desc, status, rdsta_val; + int ret = 0, sh_idx; + + topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; + r4tst = &topregs->ctrl.r4tst[0]; + + desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL); + if (!desc) + return -ENOMEM; + + for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) { + /* + * If the corresponding bit is set, this state handle + * was initialized by somebody else, so it's left alone. + */ + if ((1 << sh_idx) & state_handle_mask) + continue; + + /* Create the descriptor for instantiating RNG State Handle */ + build_instantiation_desc(desc, sh_idx, gen_sk); + + /* Try to run it through DECO0 */ + ret = run_descriptor_deco0(ctrldev, desc, &status); + + /* + * If ret is not 0, or descriptor status is not 0, then + * something went wrong. No need to try the next state + * handle (if available), bail out here. + * Also, if for some reason, the State Handle didn't get + * instantiated although the descriptor has finished + * without any error (HW optimizations for later + * CAAM eras), then try again. + */ + rdsta_val = + rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IFMASK; + if (status || !(rdsta_val & (1 << sh_idx))) + ret = -EAGAIN; + if (ret) + break; + + dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx); + /* Clear the contents before recreating the descriptor */ + memset(desc, 0x00, CAAM_CMD_SZ * 7); + } + kfree(desc); + return ret; } /* - * By default, the TRNG runs for 200 clocks per sample; - * 1600 clocks per sample generates better entropy. + * deinstantiate_rng - builds and executes a descriptor on DECO0, + * which deinitializes the RNG block. + * @ctrldev - pointer to device + * @state_handle_mask - bitmask containing the instantiation status + * for the RNG4 state handles which exist in + * the RNG4 block: 1 if it's been instantiated + * + * Return: - 0 if no error occurred + * - -ENOMEM if there isn't enough memory to allocate the descriptor + * - -ENODEV if DECO0 couldn't be acquired + * - -EAGAIN if an error occurred when executing the descriptor */ -static void kick_trng(struct platform_device *pdev) +static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask) +{ + u32 *desc, status; + int sh_idx, ret = 0; + + desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL); + if (!desc) + return -ENOMEM; + + for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) { + /* + * If the corresponding bit is set, then it means the state + * handle was initialized by us, and thus it needs to be + * deintialized as well + */ + if ((1 << sh_idx) & state_handle_mask) { + /* + * Create the descriptor for deinstantating this state + * handle + */ + build_deinstantiation_desc(desc, sh_idx); + + /* Try to run it through DECO0 */ + ret = run_descriptor_deco0(ctrldev, desc, &status); + + if (ret || status) { + dev_err(ctrldev, + "Failed to deinstantiate RNG4 SH%d\n", + sh_idx); + break; + } + dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx); + } + } + + kfree(desc); + + return ret; +} + +static int caam_remove(struct platform_device *pdev) +{ + struct device *ctrldev; + struct caam_drv_private *ctrlpriv; + struct caam_full __iomem *topregs; + int ring, ret = 0; + + ctrldev = &pdev->dev; + ctrlpriv = dev_get_drvdata(ctrldev); + topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; + + /* Remove platform devices for JobRs */ + for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) { + if (ctrlpriv->jrpdev[ring]) + of_device_unregister(ctrlpriv->jrpdev[ring]); + } + + /* De-initialize RNG state handles initialized by this driver. */ + if (ctrlpriv->rng4_sh_init) + deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init); + + /* Shut down debug views */ +#ifdef CONFIG_DEBUG_FS + debugfs_remove_recursive(ctrlpriv->dfs_root); +#endif + + /* Unmap controller region */ + iounmap(&topregs->ctrl); + + kfree(ctrlpriv->jrpdev); + kfree(ctrlpriv); + + return ret; +} + +/* + * kick_trng - sets the various parameters for enabling the initialization + * of the RNG4 block in CAAM + * @pdev - pointer to the platform device + * @ent_delay - Defines the length (in system clocks) of each entropy sample. + */ +static void kick_trng(struct platform_device *pdev, int ent_delay) { struct device *ctrldev = &pdev->dev; struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); @@ -145,14 +321,31 @@ static void kick_trng(struct platform_device *pdev) /* put RNG4 into program mode */ setbits32(&r4tst->rtmctl, RTMCTL_PRGM); - /* 1600 clocks per sample */ + + /* + * Performance-wise, it does not make sense to + * set the delay to a value that is lower + * than the last one that worked (i.e. the state handles + * were instantiated properly. Thus, instead of wasting + * time trying to set the values controlling the sample + * frequency, the function simply returns. + */ + val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK) + >> RTSDCTL_ENT_DLY_SHIFT; + if (ent_delay <= val) { + /* put RNG4 into run mode */ + clrbits32(&r4tst->rtmctl, RTMCTL_PRGM); + return; + } + val = rd_reg32(&r4tst->rtsdctl); - val = (val & ~RTSDCTL_ENT_DLY_MASK) | (1600 << RTSDCTL_ENT_DLY_SHIFT); + val = (val & ~RTSDCTL_ENT_DLY_MASK) | + (ent_delay << RTSDCTL_ENT_DLY_SHIFT); wr_reg32(&r4tst->rtsdctl, val); - /* min. freq. count */ - wr_reg32(&r4tst->rtfrqmin, 400); - /* max. freq. count */ - wr_reg32(&r4tst->rtfrqmax, 6400); + /* min. freq. count, equal to 1/4 of the entropy sample length */ + wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2); + /* max. freq. count, equal to 8 times the entropy sample length */ + wr_reg32(&r4tst->rtfrqmax, ent_delay << 3); /* put RNG4 into run mode */ clrbits32(&r4tst->rtmctl, RTMCTL_PRGM); } @@ -193,7 +386,7 @@ EXPORT_SYMBOL(caam_get_era); /* Probe routine for CAAM top (controller) level */ static int caam_probe(struct platform_device *pdev) { - int ret, ring, rspec; + int ret, ring, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN; u64 caam_id; struct device *dev; struct device_node *nprop, *np; @@ -258,8 +451,9 @@ static int caam_probe(struct platform_device *pdev) rspec++; } - ctrlpriv->jrdev = kzalloc(sizeof(struct device *) * rspec, GFP_KERNEL); - if (ctrlpriv->jrdev == NULL) { + ctrlpriv->jrpdev = kzalloc(sizeof(struct platform_device *) * rspec, + GFP_KERNEL); + if (ctrlpriv->jrpdev == NULL) { iounmap(&topregs->ctrl); return -ENOMEM; } @@ -267,13 +461,24 @@ static int caam_probe(struct platform_device *pdev) ring = 0; ctrlpriv->total_jobrs = 0; for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") { - caam_jr_probe(pdev, np, ring); + ctrlpriv->jrpdev[ring] = + of_platform_device_create(np, NULL, dev); + if (!ctrlpriv->jrpdev[ring]) { + pr_warn("JR%d Platform device creation error\n", ring); + continue; + } ctrlpriv->total_jobrs++; ring++; } if (!ring) { for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring") { - caam_jr_probe(pdev, np, ring); + ctrlpriv->jrpdev[ring] = + of_platform_device_create(np, NULL, dev); + if (!ctrlpriv->jrpdev[ring]) { + pr_warn("JR%d Platform device creation error\n", + ring); + continue; + } ctrlpriv->total_jobrs++; ring++; } @@ -299,16 +504,55 @@ static int caam_probe(struct platform_device *pdev) /* * If SEC has RNG version >= 4 and RNG state handle has not been - * already instantiated ,do RNG instantiation + * already instantiated, do RNG instantiation */ - if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4 && - !(rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IF0)) { - kick_trng(pdev); - ret = instantiate_rng(dev); + if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4) { + ctrlpriv->rng4_sh_init = + rd_reg32(&topregs->ctrl.r4tst[0].rdsta); + /* + * If the secure keys (TDKEK, JDKEK, TDSK), were already + * generated, signal this to the function that is instantiating + * the state handles. An error would occur if RNG4 attempts + * to regenerate these keys before the next POR. + */ + gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1; + ctrlpriv->rng4_sh_init &= RDSTA_IFMASK; + do { + int inst_handles = + rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & + RDSTA_IFMASK; + /* + * If either SH were instantiated by somebody else + * (e.g. u-boot) then it is assumed that the entropy + * parameters are properly set and thus the function + * setting these (kick_trng(...)) is skipped. + * Also, if a handle was instantiated, do not change + * the TRNG parameters. + */ + if (!(ctrlpriv->rng4_sh_init || inst_handles)) { + kick_trng(pdev, ent_delay); + ent_delay += 400; + } + /* + * if instantiate_rng(...) fails, the loop will rerun + * and the kick_trng(...) function will modfiy the + * upper and lower limits of the entropy sampling + * interval, leading to a sucessful initialization of + * the RNG. + */ + ret = instantiate_rng(dev, inst_handles, + gen_sk); + } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX)); if (ret) { + dev_err(dev, "failed to instantiate RNG"); caam_remove(pdev); return ret; } + /* + * Set handles init'ed by this module as the complement of the + * already initialized ones + */ + ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK; /* Enable RDB bit so that RNG works faster */ setbits32(&topregs->ctrl.scfgr, SCFGR_RDBENABLE); diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h index 53b296f78b0d..7e4500f18df6 100644 --- a/drivers/crypto/caam/desc.h +++ b/drivers/crypto/caam/desc.h @@ -1155,8 +1155,15 @@ struct sec4_sg_entry { /* randomizer AAI set */ #define OP_ALG_AAI_RNG (0x00 << OP_ALG_AAI_SHIFT) -#define OP_ALG_AAI_RNG_NOZERO (0x10 << OP_ALG_AAI_SHIFT) -#define OP_ALG_AAI_RNG_ODD (0x20 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_RNG_NZB (0x10 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_RNG_OBP (0x20 << OP_ALG_AAI_SHIFT) + +/* RNG4 AAI set */ +#define OP_ALG_AAI_RNG4_SH_0 (0x00 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_RNG4_SH_1 (0x01 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_RNG4_PS (0x40 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_RNG4_AI (0x80 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_RNG4_SK (0x100 << OP_ALG_AAI_SHIFT) /* hmac/smac AAI set */ #define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT) @@ -1178,12 +1185,6 @@ struct sec4_sg_entry { #define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT) #define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT) -/* RNG4 set */ -#define OP_ALG_RNG4_SHIFT 4 -#define OP_ALG_RNG4_MASK (0x1f3 << OP_ALG_RNG4_SHIFT) - -#define OP_ALG_RNG4_SK (0x100 << OP_ALG_RNG4_SHIFT) - #define OP_ALG_AS_SHIFT 2 #define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT) #define OP_ALG_AS_UPDATE (0 << OP_ALG_AS_SHIFT) diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h index 34c4b9f7fbfa..6d85fcc5bd0a 100644 --- a/drivers/crypto/caam/intern.h +++ b/drivers/crypto/caam/intern.h @@ -37,13 +37,16 @@ struct caam_jrentry_info { /* Private sub-storage for a single JobR */ struct caam_drv_private_jr { - struct device *parentdev; /* points back to controller dev */ - struct platform_device *jr_pdev;/* points to platform device for JR */ + struct list_head list_node; /* Job Ring device list */ + struct device *dev; int ridx; struct caam_job_ring __iomem *rregs; /* JobR's register space */ struct tasklet_struct irqtask; int irq; /* One per queue */ + /* Number of scatterlist crypt transforms active on the JobR */ + atomic_t tfm_count ____cacheline_aligned; + /* Job ring info */ int ringsize; /* Size of rings (assume input = output) */ struct caam_jrentry_info *entinfo; /* Alloc'ed 1 per ring entry */ @@ -63,7 +66,7 @@ struct caam_drv_private_jr { struct caam_drv_private { struct device *dev; - struct device **jrdev; /* Alloc'ed array per sub-device */ + struct platform_device **jrpdev; /* Alloc'ed array per sub-device */ struct platform_device *pdev; /* Physical-presence section */ @@ -80,12 +83,11 @@ struct caam_drv_private { u8 qi_present; /* Nonzero if QI present in device */ int secvio_irq; /* Security violation interrupt number */ - /* which jr allocated to scatterlist crypto */ - atomic_t tfm_count ____cacheline_aligned; - /* list of registered crypto algorithms (mk generic context handle?) */ - struct list_head alg_list; - /* list of registered hash algorithms (mk generic context handle?) */ - struct list_head hash_list; +#define RNG4_MAX_HANDLES 2 + /* RNG4 block */ + u32 rng4_sh_init; /* This bitmap shows which of the State + Handles of the RNG4 block are initialized + by this driver */ /* * debugfs entries for developer view into driver/device diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index bdb786d5a5e5..1d80bd3636c5 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -6,6 +6,7 @@ */ #include <linux/of_irq.h> +#include <linux/of_address.h> #include "compat.h" #include "regs.h" @@ -13,6 +14,113 @@ #include "desc.h" #include "intern.h" +struct jr_driver_data { + /* List of Physical JobR's with the Driver */ + struct list_head jr_list; + spinlock_t jr_alloc_lock; /* jr_list lock */ +} ____cacheline_aligned; + +static struct jr_driver_data driver_data; + +static int caam_reset_hw_jr(struct device *dev) +{ + struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); + unsigned int timeout = 100000; + + /* + * mask interrupts since we are going to poll + * for reset completion status + */ + setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); + + /* initiate flush (required prior to reset) */ + wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET); + while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) == + JRINT_ERR_HALT_INPROGRESS) && --timeout) + cpu_relax(); + + if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) != + JRINT_ERR_HALT_COMPLETE || timeout == 0) { + dev_err(dev, "failed to flush job ring %d\n", jrp->ridx); + return -EIO; + } + + /* initiate reset */ + timeout = 100000; + wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET); + while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout) + cpu_relax(); + + if (timeout == 0) { + dev_err(dev, "failed to reset job ring %d\n", jrp->ridx); + return -EIO; + } + + /* unmask interrupts */ + clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); + + return 0; +} + +/* + * Shutdown JobR independent of platform property code + */ +int caam_jr_shutdown(struct device *dev) +{ + struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); + dma_addr_t inpbusaddr, outbusaddr; + int ret; + + ret = caam_reset_hw_jr(dev); + + tasklet_kill(&jrp->irqtask); + + /* Release interrupt */ + free_irq(jrp->irq, dev); + + /* Free rings */ + inpbusaddr = rd_reg64(&jrp->rregs->inpring_base); + outbusaddr = rd_reg64(&jrp->rregs->outring_base); + dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH, + jrp->inpring, inpbusaddr); + dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH, + jrp->outring, outbusaddr); + kfree(jrp->entinfo); + + return ret; +} + +static int caam_jr_remove(struct platform_device *pdev) +{ + int ret; + struct device *jrdev; + struct caam_drv_private_jr *jrpriv; + + jrdev = &pdev->dev; + jrpriv = dev_get_drvdata(jrdev); + + /* + * Return EBUSY if job ring already allocated. + */ + if (atomic_read(&jrpriv->tfm_count)) { + dev_err(jrdev, "Device is busy\n"); + return -EBUSY; + } + + /* Remove the node from Physical JobR list maintained by driver */ + spin_lock(&driver_data.jr_alloc_lock); + list_del(&jrpriv->list_node); + spin_unlock(&driver_data.jr_alloc_lock); + + /* Release ring */ + ret = caam_jr_shutdown(jrdev); + if (ret) + dev_err(jrdev, "Failed to shut down job ring\n"); + irq_dispose_mapping(jrpriv->irq); + + return ret; +} + /* Main per-ring interrupt handler */ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev) { @@ -128,6 +236,59 @@ static void caam_jr_dequeue(unsigned long devarg) } /** + * caam_jr_alloc() - Alloc a job ring for someone to use as needed. + * + * returns : pointer to the newly allocated physical + * JobR dev can be written to if successful. + **/ +struct device *caam_jr_alloc(void) +{ + struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL; + struct device *dev = NULL; + int min_tfm_cnt = INT_MAX; + int tfm_cnt; + + spin_lock(&driver_data.jr_alloc_lock); + + if (list_empty(&driver_data.jr_list)) { + spin_unlock(&driver_data.jr_alloc_lock); + return ERR_PTR(-ENODEV); + } + + list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) { + tfm_cnt = atomic_read(&jrpriv->tfm_count); + if (tfm_cnt < min_tfm_cnt) { + min_tfm_cnt = tfm_cnt; + min_jrpriv = jrpriv; + } + if (!min_tfm_cnt) + break; + } + + if (min_jrpriv) { + atomic_inc(&min_jrpriv->tfm_count); + dev = min_jrpriv->dev; + } + spin_unlock(&driver_data.jr_alloc_lock); + + return dev; +} +EXPORT_SYMBOL(caam_jr_alloc); + +/** + * caam_jr_free() - Free the Job Ring + * @rdev - points to the dev that identifies the Job ring to + * be released. + **/ +void caam_jr_free(struct device *rdev) +{ + struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev); + + atomic_dec(&jrpriv->tfm_count); +} +EXPORT_SYMBOL(caam_jr_free); + +/** * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK, * -EBUSY if the queue is full, -EIO if it cannot map the caller's * descriptor. @@ -207,46 +368,6 @@ int caam_jr_enqueue(struct device *dev, u32 *desc, } EXPORT_SYMBOL(caam_jr_enqueue); -static int caam_reset_hw_jr(struct device *dev) -{ - struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); - unsigned int timeout = 100000; - - /* - * mask interrupts since we are going to poll - * for reset completion status - */ - setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); - - /* initiate flush (required prior to reset) */ - wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET); - while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) == - JRINT_ERR_HALT_INPROGRESS) && --timeout) - cpu_relax(); - - if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) != - JRINT_ERR_HALT_COMPLETE || timeout == 0) { - dev_err(dev, "failed to flush job ring %d\n", jrp->ridx); - return -EIO; - } - - /* initiate reset */ - timeout = 100000; - wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET); - while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout) - cpu_relax(); - - if (timeout == 0) { - dev_err(dev, "failed to reset job ring %d\n", jrp->ridx); - return -EIO; - } - - /* unmask interrupts */ - clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); - - return 0; -} - /* * Init JobR independent of platform property detection */ @@ -262,7 +383,7 @@ static int caam_jr_init(struct device *dev) /* Connect job ring interrupt handler. */ error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED, - "caam-jobr", dev); + dev_name(dev), dev); if (error) { dev_err(dev, "can't connect JobR %d interrupt (%d)\n", jrp->ridx, jrp->irq); @@ -318,86 +439,43 @@ static int caam_jr_init(struct device *dev) return 0; } -/* - * Shutdown JobR independent of platform property code - */ -int caam_jr_shutdown(struct device *dev) -{ - struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); - dma_addr_t inpbusaddr, outbusaddr; - int ret; - - ret = caam_reset_hw_jr(dev); - - tasklet_kill(&jrp->irqtask); - - /* Release interrupt */ - free_irq(jrp->irq, dev); - - /* Free rings */ - inpbusaddr = rd_reg64(&jrp->rregs->inpring_base); - outbusaddr = rd_reg64(&jrp->rregs->outring_base); - dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH, - jrp->inpring, inpbusaddr); - dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH, - jrp->outring, outbusaddr); - kfree(jrp->entinfo); - of_device_unregister(jrp->jr_pdev); - - return ret; -} /* - * Probe routine for each detected JobR subsystem. It assumes that - * property detection was picked up externally. + * Probe routine for each detected JobR subsystem. */ -int caam_jr_probe(struct platform_device *pdev, struct device_node *np, - int ring) +static int caam_jr_probe(struct platform_device *pdev) { - struct device *ctrldev, *jrdev; - struct platform_device *jr_pdev; - struct caam_drv_private *ctrlpriv; + struct device *jrdev; + struct device_node *nprop; + struct caam_job_ring __iomem *ctrl; struct caam_drv_private_jr *jrpriv; - u32 *jroffset; + static int total_jobrs; int error; - ctrldev = &pdev->dev; - ctrlpriv = dev_get_drvdata(ctrldev); - + jrdev = &pdev->dev; jrpriv = kmalloc(sizeof(struct caam_drv_private_jr), GFP_KERNEL); - if (jrpriv == NULL) { - dev_err(ctrldev, "can't alloc private mem for job ring %d\n", - ring); + if (!jrpriv) return -ENOMEM; - } - jrpriv->parentdev = ctrldev; /* point back to parent */ - jrpriv->ridx = ring; /* save ring identity relative to detection */ - /* - * Derive a pointer to the detected JobRs regs - * Driver has already iomapped the entire space, we just - * need to add in the offset to this JobR. Don't know if I - * like this long-term, but it'll run - */ - jroffset = (u32 *)of_get_property(np, "reg", NULL); - jrpriv->rregs = (struct caam_job_ring __iomem *)((void *)ctrlpriv->ctrl - + *jroffset); + dev_set_drvdata(jrdev, jrpriv); - /* Build a local dev for each detected queue */ - jr_pdev = of_platform_device_create(np, NULL, ctrldev); - if (jr_pdev == NULL) { - kfree(jrpriv); - return -EINVAL; + /* save ring identity relative to detection */ + jrpriv->ridx = total_jobrs++; + + nprop = pdev->dev.of_node; + /* Get configuration properties from device tree */ + /* First, get register page */ + ctrl = of_iomap(nprop, 0); + if (!ctrl) { + dev_err(jrdev, "of_iomap() failed\n"); + return -ENOMEM; } - jrpriv->jr_pdev = jr_pdev; - jrdev = &jr_pdev->dev; - dev_set_drvdata(jrdev, jrpriv); - ctrlpriv->jrdev[ring] = jrdev; + jrpriv->rregs = (struct caam_job_ring __force *)ctrl; if (sizeof(dma_addr_t) == sizeof(u64)) - if (of_device_is_compatible(np, "fsl,sec-v5.0-job-ring")) + if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring")) dma_set_mask(jrdev, DMA_BIT_MASK(40)); else dma_set_mask(jrdev, DMA_BIT_MASK(36)); @@ -405,15 +483,61 @@ int caam_jr_probe(struct platform_device *pdev, struct device_node *np, dma_set_mask(jrdev, DMA_BIT_MASK(32)); /* Identify the interrupt */ - jrpriv->irq = irq_of_parse_and_map(np, 0); + jrpriv->irq = irq_of_parse_and_map(nprop, 0); /* Now do the platform independent part */ error = caam_jr_init(jrdev); /* now turn on hardware */ if (error) { - of_device_unregister(jr_pdev); kfree(jrpriv); return error; } - return error; + jrpriv->dev = jrdev; + spin_lock(&driver_data.jr_alloc_lock); + list_add_tail(&jrpriv->list_node, &driver_data.jr_list); + spin_unlock(&driver_data.jr_alloc_lock); + + atomic_set(&jrpriv->tfm_count, 0); + + return 0; +} + +static struct of_device_id caam_jr_match[] = { + { + .compatible = "fsl,sec-v4.0-job-ring", + }, + { + .compatible = "fsl,sec4.0-job-ring", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, caam_jr_match); + +static struct platform_driver caam_jr_driver = { + .driver = { + .name = "caam_jr", + .owner = THIS_MODULE, + .of_match_table = caam_jr_match, + }, + .probe = caam_jr_probe, + .remove = caam_jr_remove, +}; + +static int __init jr_driver_init(void) +{ + spin_lock_init(&driver_data.jr_alloc_lock); + INIT_LIST_HEAD(&driver_data.jr_list); + return platform_driver_register(&caam_jr_driver); +} + +static void __exit jr_driver_exit(void) +{ + platform_driver_unregister(&caam_jr_driver); } + +module_init(jr_driver_init); +module_exit(jr_driver_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("FSL CAAM JR request backend"); +MODULE_AUTHOR("Freescale Semiconductor - NMG/STC"); diff --git a/drivers/crypto/caam/jr.h b/drivers/crypto/caam/jr.h index 9d8741a59037..97113a6d6c58 100644 --- a/drivers/crypto/caam/jr.h +++ b/drivers/crypto/caam/jr.h @@ -8,12 +8,11 @@ #define JR_H /* Prototypes for backend-level services exposed to APIs */ +struct device *caam_jr_alloc(void); +void caam_jr_free(struct device *rdev); int caam_jr_enqueue(struct device *dev, u32 *desc, void (*cbk)(struct device *dev, u32 *desc, u32 status, void *areq), void *areq); -extern int caam_jr_probe(struct platform_device *pdev, struct device_node *np, - int ring); -extern int caam_jr_shutdown(struct device *dev); #endif /* JR_H */ diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 4455396918de..d50174f45b21 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -245,7 +245,7 @@ struct rngtst { /* RNG4 TRNG test registers */ struct rng4tst { -#define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */ +#define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */ u32 rtmctl; /* misc. control register */ u32 rtscmisc; /* statistical check misc. register */ u32 rtpkrrng; /* poker range register */ @@ -255,6 +255,8 @@ struct rng4tst { }; #define RTSDCTL_ENT_DLY_SHIFT 16 #define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT) +#define RTSDCTL_ENT_DLY_MIN 1200 +#define RTSDCTL_ENT_DLY_MAX 12800 u32 rtsdctl; /* seed control register */ union { u32 rtsblim; /* PRGM=1: sparse bit limit register */ @@ -266,7 +268,11 @@ struct rng4tst { u32 rtfrqcnt; /* PRGM=0: freq. count register */ }; u32 rsvd1[40]; +#define RDSTA_SKVT 0x80000000 +#define RDSTA_SKVN 0x40000000 #define RDSTA_IF0 0x00000001 +#define RDSTA_IF1 0x00000002 +#define RDSTA_IFMASK (RDSTA_IF1 | RDSTA_IF0) u32 rdsta; u32 rsvd2[15]; }; @@ -692,6 +698,7 @@ struct caam_deco { u32 jr_ctl_hi; /* CxJRR - JobR Control Register @800 */ u32 jr_ctl_lo; u64 jr_descaddr; /* CxDADR - JobR Descriptor Address */ +#define DECO_OP_STATUS_HI_ERR_MASK 0xF00000FF u32 op_status_hi; /* DxOPSTA - DECO Operation Status */ u32 op_status_lo; u32 rsvd24[2]; @@ -706,12 +713,13 @@ struct caam_deco { u32 rsvd29[48]; u32 descbuf[64]; /* DxDESB - Descriptor buffer */ u32 rscvd30[193]; +#define DESC_DBG_DECO_STAT_HOST_ERR 0x00D00000 +#define DESC_DBG_DECO_STAT_VALID 0x80000000 +#define DESC_DBG_DECO_STAT_MASK 0x00F00000 u32 desc_dbg; /* DxDDR - DECO Debug Register */ u32 rsvd31[126]; }; -/* DECO DBG Register Valid Bit*/ -#define DECO_DBG_VALID 0x80000000 #define DECO_JQCR_WHL 0x20000000 #define DECO_JQCR_FOUR 0x10000000 diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h index e0037c8ee243..b12ff85f4241 100644 --- a/drivers/crypto/caam/sg_sw_sec4.h +++ b/drivers/crypto/caam/sg_sw_sec4.h @@ -117,6 +117,21 @@ static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg, return nents; } +/* Map SG page in kernel virtual address space and copy */ +static inline void sg_map_copy(u8 *dest, struct scatterlist *sg, + int len, int offset) +{ + u8 *mapped_addr; + + /* + * Page here can be user-space pinned using get_user_pages + * Same must be kmapped before use and kunmapped subsequently + */ + mapped_addr = kmap_atomic(sg_page(sg)); + memcpy(dest, mapped_addr + offset, len); + kunmap_atomic(mapped_addr); +} + /* Copy from len bytes of sg to dest, starting from beginning */ static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len) { @@ -124,15 +139,15 @@ static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len) int cpy_index = 0, next_cpy_index = current_sg->length; while (next_cpy_index < len) { - memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg), - current_sg->length); + sg_map_copy(dest + cpy_index, current_sg, current_sg->length, + current_sg->offset); current_sg = scatterwalk_sg_next(current_sg); cpy_index = next_cpy_index; next_cpy_index += current_sg->length; } if (cpy_index < len) - memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg), - len - cpy_index); + sg_map_copy(dest + cpy_index, current_sg, len-cpy_index, + current_sg->offset); } /* Copy sg data, from to_skip to end, to dest */ @@ -140,7 +155,7 @@ static inline void sg_copy_part(u8 *dest, struct scatterlist *sg, int to_skip, unsigned int end) { struct scatterlist *current_sg = sg; - int sg_index, cpy_index; + int sg_index, cpy_index, offset; sg_index = current_sg->length; while (sg_index <= to_skip) { @@ -148,9 +163,10 @@ static inline void sg_copy_part(u8 *dest, struct scatterlist *sg, sg_index += current_sg->length; } cpy_index = sg_index - to_skip; - memcpy(dest, (u8 *) sg_virt(current_sg) + - current_sg->length - cpy_index, cpy_index); - current_sg = scatterwalk_sg_next(current_sg); - if (end - sg_index) + offset = current_sg->offset + current_sg->length - cpy_index; + sg_map_copy(dest, current_sg, cpy_index, offset); + if (end - sg_index) { + current_sg = scatterwalk_sg_next(current_sg); sg_copy(dest + cpy_index, current_sg, end - sg_index); + } } diff --git a/drivers/crypto/dcp.c b/drivers/crypto/dcp.c index a8a7dd4b0d25..247ab8048f5b 100644 --- a/drivers/crypto/dcp.c +++ b/drivers/crypto/dcp.c @@ -733,12 +733,9 @@ static int dcp_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dev); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!r) { - dev_err(&pdev->dev, "failed to get IORESOURCE_MEM\n"); - return -ENXIO; - } - dev->dcp_regs_base = devm_ioremap(&pdev->dev, r->start, - resource_size(r)); + dev->dcp_regs_base = devm_ioremap_resource(&pdev->dev, r); + if (IS_ERR(dev->dcp_regs_base)) + return PTR_ERR(dev->dcp_regs_base); dcp_set(dev, DCP_CTRL_SFRST, DCP_REG_CTRL); udelay(10); @@ -762,7 +759,8 @@ static int dcp_probe(struct platform_device *pdev) return -EIO; } dev->dcp_vmi_irq = r->start; - ret = request_irq(dev->dcp_vmi_irq, dcp_vmi_irq, 0, "dcp", dev); + ret = devm_request_irq(&pdev->dev, dev->dcp_vmi_irq, dcp_vmi_irq, 0, + "dcp", dev); if (ret != 0) { dev_err(&pdev->dev, "can't request_irq (0)\n"); return -EIO; @@ -771,15 +769,14 @@ static int dcp_probe(struct platform_device *pdev) r = platform_get_resource(pdev, IORESOURCE_IRQ, 1); if (!r) { dev_err(&pdev->dev, "can't get IRQ resource (1)\n"); - ret = -EIO; - goto err_free_irq0; + return -EIO; } dev->dcp_irq = r->start; - ret = request_irq(dev->dcp_irq, dcp_irq, 0, "dcp", dev); + ret = devm_request_irq(&pdev->dev, dev->dcp_irq, dcp_irq, 0, "dcp", + dev); if (ret != 0) { dev_err(&pdev->dev, "can't request_irq (1)\n"); - ret = -EIO; - goto err_free_irq0; + return -EIO; } dev->hw_pkg[0] = dma_alloc_coherent(&pdev->dev, @@ -788,8 +785,7 @@ static int dcp_probe(struct platform_device *pdev) GFP_KERNEL); if (!dev->hw_pkg[0]) { dev_err(&pdev->dev, "Could not allocate hw descriptors\n"); - ret = -ENOMEM; - goto err_free_irq1; + return -ENOMEM; } for (i = 1; i < DCP_MAX_PKG; i++) { @@ -848,16 +844,14 @@ err_unregister: for (j = 0; j < i; j++) crypto_unregister_alg(&algs[j]); err_free_key_iv: + tasklet_kill(&dev->done_task); + tasklet_kill(&dev->queue_task); dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base, dev->payload_base_dma); err_free_hw_packet: dma_free_coherent(&pdev->dev, DCP_MAX_PKG * sizeof(struct dcp_hw_packet), dev->hw_pkg[0], dev->hw_phys_pkg); -err_free_irq1: - free_irq(dev->dcp_irq, dev); -err_free_irq0: - free_irq(dev->dcp_vmi_irq, dev); return ret; } @@ -868,23 +862,20 @@ static int dcp_remove(struct platform_device *pdev) int j; dev = platform_get_drvdata(pdev); - dma_free_coherent(&pdev->dev, - DCP_MAX_PKG * sizeof(struct dcp_hw_packet), - dev->hw_pkg[0], dev->hw_phys_pkg); - - dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base, - dev->payload_base_dma); + misc_deregister(&dev->dcp_bootstream_misc); - free_irq(dev->dcp_irq, dev); - free_irq(dev->dcp_vmi_irq, dev); + for (j = 0; j < ARRAY_SIZE(algs); j++) + crypto_unregister_alg(&algs[j]); tasklet_kill(&dev->done_task); tasklet_kill(&dev->queue_task); - for (j = 0; j < ARRAY_SIZE(algs); j++) - crypto_unregister_alg(&algs[j]); + dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base, + dev->payload_base_dma); - misc_deregister(&dev->dcp_bootstream_misc); + dma_free_coherent(&pdev->dev, + DCP_MAX_PKG * sizeof(struct dcp_hw_packet), + dev->hw_pkg[0], dev->hw_phys_pkg); return 0; } diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 214357e12dc0..9dd6e01eac33 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -1149,32 +1149,24 @@ static int aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { struct ixp_ctx *ctx = crypto_aead_ctx(tfm); - struct rtattr *rta = (struct rtattr *)key; - struct crypto_authenc_key_param *param; + struct crypto_authenc_keys keys; - if (!RTA_OK(rta, keylen)) - goto badkey; - if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) - goto badkey; - if (RTA_PAYLOAD(rta) < sizeof(*param)) + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) goto badkey; - param = RTA_DATA(rta); - ctx->enckey_len = be32_to_cpu(param->enckeylen); - - key += RTA_ALIGN(rta->rta_len); - keylen -= RTA_ALIGN(rta->rta_len); + if (keys.authkeylen > sizeof(ctx->authkey)) + goto badkey; - if (keylen < ctx->enckey_len) + if (keys.enckeylen > sizeof(ctx->enckey)) goto badkey; - ctx->authkey_len = keylen - ctx->enckey_len; - memcpy(ctx->enckey, key + ctx->authkey_len, ctx->enckey_len); - memcpy(ctx->authkey, key, ctx->authkey_len); + memcpy(ctx->authkey, keys.authkey, keys.authkeylen); + memcpy(ctx->enckey, keys.enckey, keys.enckeylen); + ctx->authkey_len = keys.authkeylen; + ctx->enckey_len = keys.enckeylen; return aead_setup(tfm, crypto_aead_authsize(tfm)); badkey: - ctx->enckey_len = 0; crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c index 3374a3ebe4c7..8d1e6f8e9e9c 100644 --- a/drivers/crypto/mv_cesa.c +++ b/drivers/crypto/mv_cesa.c @@ -907,7 +907,7 @@ static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm) return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE); } -irqreturn_t crypto_int(int irq, void *priv) +static irqreturn_t crypto_int(int irq, void *priv) { u32 val; @@ -928,7 +928,7 @@ irqreturn_t crypto_int(int irq, void *priv) return IRQ_HANDLED; } -struct crypto_alg mv_aes_alg_ecb = { +static struct crypto_alg mv_aes_alg_ecb = { .cra_name = "ecb(aes)", .cra_driver_name = "mv-ecb-aes", .cra_priority = 300, @@ -951,7 +951,7 @@ struct crypto_alg mv_aes_alg_ecb = { }, }; -struct crypto_alg mv_aes_alg_cbc = { +static struct crypto_alg mv_aes_alg_cbc = { .cra_name = "cbc(aes)", .cra_driver_name = "mv-cbc-aes", .cra_priority = 300, @@ -975,7 +975,7 @@ struct crypto_alg mv_aes_alg_cbc = { }, }; -struct ahash_alg mv_sha1_alg = { +static struct ahash_alg mv_sha1_alg = { .init = mv_hash_init, .update = mv_hash_update, .final = mv_hash_final, @@ -999,7 +999,7 @@ struct ahash_alg mv_sha1_alg = { } }; -struct ahash_alg mv_hmac_sha1_alg = { +static struct ahash_alg mv_hmac_sha1_alg = { .init = mv_hash_init, .update = mv_hash_update, .final = mv_hash_final, @@ -1084,7 +1084,7 @@ static int mv_probe(struct platform_device *pdev) goto err_unmap_sram; } - ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev), + ret = request_irq(irq, crypto_int, 0, dev_name(&pdev->dev), cp); if (ret) goto err_thread; @@ -1187,7 +1187,7 @@ static struct platform_driver marvell_crypto = { .driver = { .owner = THIS_MODULE, .name = "mv_crypto", - .of_match_table = of_match_ptr(mv_cesa_of_match_table), + .of_match_table = mv_cesa_of_match_table, }, }; MODULE_ALIAS("platform:mv_crypto"); diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index ce791c2f81f7..a9ccbf14096e 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -275,7 +275,7 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd) if (dd->flags & FLAGS_CBC) val |= AES_REG_CTRL_CBC; if (dd->flags & FLAGS_CTR) { - val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_32; + val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128; mask = AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_MASK; } if (dd->flags & FLAGS_ENCRYPT) @@ -554,7 +554,7 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) return err; } -int omap_aes_check_aligned(struct scatterlist *sg) +static int omap_aes_check_aligned(struct scatterlist *sg) { while (sg) { if (!IS_ALIGNED(sg->offset, 4)) @@ -566,7 +566,7 @@ int omap_aes_check_aligned(struct scatterlist *sg) return 0; } -int omap_aes_copy_sgs(struct omap_aes_dev *dd) +static int omap_aes_copy_sgs(struct omap_aes_dev *dd) { void *buf_in, *buf_out; int pages; diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index e28104b4aab0..e45aaaf0db30 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -2033,3 +2033,4 @@ module_platform_driver(omap_sham_driver); MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support."); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Dmitry Kasatkin"); +MODULE_ALIAS("platform:omap-sham"); diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index 888f7f4a6d3f..a6175ba6d238 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c @@ -495,45 +495,29 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key, { struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm); struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg); - struct rtattr *rta = (void *)key; - struct crypto_authenc_key_param *param; - unsigned int authkeylen, enckeylen; + struct crypto_authenc_keys keys; int err = -EINVAL; - if (!RTA_OK(rta, keylen)) + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) goto badkey; - if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) + if (keys.enckeylen > AES_MAX_KEY_SIZE) goto badkey; - if (RTA_PAYLOAD(rta) < sizeof(*param)) - goto badkey; - - param = RTA_DATA(rta); - enckeylen = be32_to_cpu(param->enckeylen); - - key += RTA_ALIGN(rta->rta_len); - keylen -= RTA_ALIGN(rta->rta_len); - - if (keylen < enckeylen) - goto badkey; - - authkeylen = keylen - enckeylen; - - if (enckeylen > AES_MAX_KEY_SIZE) + if (keys.authkeylen > sizeof(ctx->hash_ctx)) goto badkey; if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) == SPA_CTRL_CIPH_ALG_AES) - err = spacc_aead_aes_setkey(tfm, key + authkeylen, enckeylen); + err = spacc_aead_aes_setkey(tfm, keys.enckey, keys.enckeylen); else - err = spacc_aead_des_setkey(tfm, key + authkeylen, enckeylen); + err = spacc_aead_des_setkey(tfm, keys.enckey, keys.enckeylen); if (err) goto badkey; - memcpy(ctx->hash_ctx, key, authkeylen); - ctx->hash_key_len = authkeylen; + memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen); + ctx->hash_key_len = keys.authkeylen; return 0; diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index d7bb8bac36e9..785a9ded7bdf 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -1058,7 +1058,7 @@ static struct platform_driver sahara_driver = { .driver = { .name = SAHARA_NAME, .owner = THIS_MODULE, - .of_match_table = of_match_ptr(sahara_dt_ids), + .of_match_table = sahara_dt_ids, }, .id_table = sahara_platform_ids, }; diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 6cd0e6038583..b44f4ddc565c 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -673,39 +673,20 @@ static int aead_setkey(struct crypto_aead *authenc, const u8 *key, unsigned int keylen) { struct talitos_ctx *ctx = crypto_aead_ctx(authenc); - struct rtattr *rta = (void *)key; - struct crypto_authenc_key_param *param; - unsigned int authkeylen; - unsigned int enckeylen; - - if (!RTA_OK(rta, keylen)) - goto badkey; + struct crypto_authenc_keys keys; - if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) goto badkey; - if (RTA_PAYLOAD(rta) < sizeof(*param)) + if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE) goto badkey; - param = RTA_DATA(rta); - enckeylen = be32_to_cpu(param->enckeylen); - - key += RTA_ALIGN(rta->rta_len); - keylen -= RTA_ALIGN(rta->rta_len); + memcpy(ctx->key, keys.authkey, keys.authkeylen); + memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen); - if (keylen < enckeylen) - goto badkey; - - authkeylen = keylen - enckeylen; - - if (keylen > TALITOS_MAX_KEY_SIZE) - goto badkey; - - memcpy(&ctx->key, key, keylen); - - ctx->keylen = keylen; - ctx->enckeylen = enckeylen; - ctx->authkeylen = authkeylen; + ctx->keylen = keys.authkeylen + keys.enckeylen; + ctx->enckeylen = keys.enckeylen; + ctx->authkeylen = keys.authkeylen; return 0; @@ -809,7 +790,7 @@ static void ipsec_esp_unmap(struct device *dev, if (edesc->assoc_chained) talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE); - else + else if (areq->assoclen) /* assoc_nents counts also for IV in non-contiguous cases */ dma_unmap_sg(dev, areq->assoc, edesc->assoc_nents ? edesc->assoc_nents - 1 : 1, @@ -992,7 +973,11 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, dma_sync_single_for_device(dev, edesc->dma_link_tbl, edesc->dma_len, DMA_BIDIRECTIONAL); } else { - to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->assoc)); + if (areq->assoclen) + to_talitos_ptr(&desc->ptr[1], + sg_dma_address(areq->assoc)); + else + to_talitos_ptr(&desc->ptr[1], edesc->iv_dma); desc->ptr[1].j_extent = 0; } @@ -1127,7 +1112,8 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, unsigned int authsize, unsigned int ivsize, int icv_stashing, - u32 cryptoflags) + u32 cryptoflags, + bool encrypt) { struct talitos_edesc *edesc; int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len; @@ -1141,10 +1127,10 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, return ERR_PTR(-EINVAL); } - if (iv) + if (ivsize) iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); - if (assoc) { + if (assoclen) { /* * Currently it is assumed that iv is provided whenever assoc * is. @@ -1160,19 +1146,17 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, assoc_nents = assoc_nents ? assoc_nents + 1 : 2; } - src_nents = sg_count(src, cryptlen + authsize, &src_chained); - src_nents = (src_nents == 1) ? 0 : src_nents; - - if (!dst) { - dst_nents = 0; - } else { - if (dst == src) { - dst_nents = src_nents; - } else { - dst_nents = sg_count(dst, cryptlen + authsize, - &dst_chained); - dst_nents = (dst_nents == 1) ? 0 : dst_nents; - } + if (!dst || dst == src) { + src_nents = sg_count(src, cryptlen + authsize, &src_chained); + src_nents = (src_nents == 1) ? 0 : src_nents; + dst_nents = dst ? src_nents : 0; + } else { /* dst && dst != src*/ + src_nents = sg_count(src, cryptlen + (encrypt ? 0 : authsize), + &src_chained); + src_nents = (src_nents == 1) ? 0 : src_nents; + dst_nents = sg_count(dst, cryptlen + (encrypt ? authsize : 0), + &dst_chained); + dst_nents = (dst_nents == 1) ? 0 : dst_nents; } /* @@ -1192,9 +1176,16 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, edesc = kmalloc(alloc_len, GFP_DMA | flags); if (!edesc) { - talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE); + if (assoc_chained) + talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE); + else if (assoclen) + dma_unmap_sg(dev, assoc, + assoc_nents ? assoc_nents - 1 : 1, + DMA_TO_DEVICE); + if (iv_dma) dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); + dev_err(dev, "could not allocate edescriptor\n"); return ERR_PTR(-ENOMEM); } @@ -1216,7 +1207,7 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, } static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, - int icv_stashing) + int icv_stashing, bool encrypt) { struct crypto_aead *authenc = crypto_aead_reqtfm(areq); struct talitos_ctx *ctx = crypto_aead_ctx(authenc); @@ -1225,7 +1216,7 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst, iv, areq->assoclen, areq->cryptlen, ctx->authsize, ivsize, icv_stashing, - areq->base.flags); + areq->base.flags, encrypt); } static int aead_encrypt(struct aead_request *req) @@ -1235,7 +1226,7 @@ static int aead_encrypt(struct aead_request *req) struct talitos_edesc *edesc; /* allocate extended descriptor */ - edesc = aead_edesc_alloc(req, req->iv, 0); + edesc = aead_edesc_alloc(req, req->iv, 0, true); if (IS_ERR(edesc)) return PTR_ERR(edesc); @@ -1258,7 +1249,7 @@ static int aead_decrypt(struct aead_request *req) req->cryptlen -= authsize; /* allocate extended descriptor */ - edesc = aead_edesc_alloc(req, req->iv, 1); + edesc = aead_edesc_alloc(req, req->iv, 1, false); if (IS_ERR(edesc)) return PTR_ERR(edesc); @@ -1304,7 +1295,7 @@ static int aead_givencrypt(struct aead_givcrypt_request *req) struct talitos_edesc *edesc; /* allocate extended descriptor */ - edesc = aead_edesc_alloc(areq, req->giv, 0); + edesc = aead_edesc_alloc(areq, req->giv, 0, true); if (IS_ERR(edesc)) return PTR_ERR(edesc); @@ -1460,7 +1451,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc, } static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request * - areq) + areq, bool encrypt) { struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); @@ -1468,7 +1459,7 @@ static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request * return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst, areq->info, 0, areq->nbytes, 0, ivsize, 0, - areq->base.flags); + areq->base.flags, encrypt); } static int ablkcipher_encrypt(struct ablkcipher_request *areq) @@ -1478,7 +1469,7 @@ static int ablkcipher_encrypt(struct ablkcipher_request *areq) struct talitos_edesc *edesc; /* allocate extended descriptor */ - edesc = ablkcipher_edesc_alloc(areq); + edesc = ablkcipher_edesc_alloc(areq, true); if (IS_ERR(edesc)) return PTR_ERR(edesc); @@ -1495,7 +1486,7 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq) struct talitos_edesc *edesc; /* allocate extended descriptor */ - edesc = ablkcipher_edesc_alloc(areq); + edesc = ablkcipher_edesc_alloc(areq, false); if (IS_ERR(edesc)) return PTR_ERR(edesc); @@ -1647,7 +1638,7 @@ static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq, struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0, - nbytes, 0, 0, 0, areq->base.flags); + nbytes, 0, 0, 0, areq->base.flags, false); } static int ahash_init(struct ahash_request *areq) diff --git a/drivers/crypto/tegra-aes.c b/drivers/crypto/tegra-aes.c index fa05e3c329bd..060eecc5dbc3 100644 --- a/drivers/crypto/tegra-aes.c +++ b/drivers/crypto/tegra-aes.c @@ -27,6 +27,8 @@ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> @@ -199,8 +201,6 @@ static void aes_workqueue_handler(struct work_struct *work); static DECLARE_WORK(aes_work, aes_workqueue_handler); static struct workqueue_struct *aes_wq; -extern unsigned long long tegra_chip_uid(void); - static inline u32 aes_readl(struct tegra_aes_dev *dd, u32 offset) { return readl(dd->io_base + offset); @@ -713,13 +713,12 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed, struct tegra_aes_dev *dd = aes_dev; struct tegra_aes_ctx *ctx = &rng_ctx; struct tegra_aes_slot *key_slot; - struct timespec ts; int ret = 0; - u64 nsec, tmp[2]; + u8 tmp[16]; /* 16 bytes = 128 bits of entropy */ u8 *dt; if (!ctx || !dd) { - dev_err(dd->dev, "ctx=0x%x, dd=0x%x\n", + pr_err("ctx=0x%x, dd=0x%x\n", (unsigned int)ctx, (unsigned int)dd); return -EINVAL; } @@ -778,14 +777,8 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed, if (dd->ivlen >= (2 * DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) { dt = dd->iv + DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128; } else { - getnstimeofday(&ts); - nsec = timespec_to_ns(&ts); - do_div(nsec, 1000); - nsec ^= dd->ctr << 56; - dd->ctr++; - tmp[0] = nsec; - tmp[1] = tegra_chip_uid(); - dt = (u8 *)tmp; + get_random_bytes(tmp, sizeof(tmp)); + dt = tmp; } memcpy(dd->dt, dt, DEFAULT_RNG_BLK_SZ); @@ -804,7 +797,7 @@ static int tegra_aes_cra_init(struct crypto_tfm *tfm) return 0; } -void tegra_aes_cra_exit(struct crypto_tfm *tfm) +static void tegra_aes_cra_exit(struct crypto_tfm *tfm) { struct tegra_aes_ctx *ctx = crypto_ablkcipher_ctx((struct crypto_ablkcipher *)tfm); @@ -924,7 +917,7 @@ static int tegra_aes_probe(struct platform_device *pdev) } /* Initialize the vde clock */ - dd->aes_clk = clk_get(dev, "vde"); + dd->aes_clk = devm_clk_get(dev, "vde"); if (IS_ERR(dd->aes_clk)) { dev_err(dev, "iclock intialization failed.\n"); err = -ENODEV; @@ -1033,8 +1026,6 @@ out: if (dd->buf_out) dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, dd->buf_out, dd->dma_buf_out); - if (!IS_ERR(dd->aes_clk)) - clk_put(dd->aes_clk); if (aes_wq) destroy_workqueue(aes_wq); spin_lock(&list_lock); @@ -1068,7 +1059,6 @@ static int tegra_aes_remove(struct platform_device *pdev) dd->buf_in, dd->dma_buf_in); dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, dd->buf_out, dd->dma_buf_out); - clk_put(dd->aes_clk); aes_dev = NULL; return 0; diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index dd2874ec1927..446687cc2334 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -89,14 +89,15 @@ config AT_HDMAC Support the Atmel AHB DMA controller. config FSL_DMA - tristate "Freescale Elo and Elo Plus DMA support" + tristate "Freescale Elo series DMA support" depends on FSL_SOC select DMA_ENGINE select ASYNC_TX_ENABLE_CHANNEL_SWITCH ---help--- - Enable support for the Freescale Elo and Elo Plus DMA controllers. - The Elo is the DMA controller on some 82xx and 83xx parts, and the - Elo Plus is the DMA controller on 85xx and 86xx parts. + Enable support for the Freescale Elo series DMA controllers. + The Elo is the DMA controller on some mpc82xx and mpc83xx parts, the + EloPlus is on mpc85xx and mpc86xx and Pxxx parts, and the Elo3 is on + some Txxx and Bxxx parts. config MPC512X_DMA tristate "Freescale MPC512x built-in DMA engine support" diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index e51a9832ef0d..ec4ee5c1fe9d 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -1164,42 +1164,12 @@ static void pl08x_free_txd(struct pl08x_driver_data *pl08x, kfree(txd); } -static void pl08x_unmap_buffers(struct pl08x_txd *txd) -{ - struct device *dev = txd->vd.tx.chan->device->dev; - struct pl08x_sg *dsg; - - if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_single(dev, dsg->src_addr, dsg->len, - DMA_TO_DEVICE); - else { - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_page(dev, dsg->src_addr, dsg->len, - DMA_TO_DEVICE); - } - } - if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_single(dev, dsg->dst_addr, dsg->len, - DMA_FROM_DEVICE); - else - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_page(dev, dsg->dst_addr, dsg->len, - DMA_FROM_DEVICE); - } -} - static void pl08x_desc_free(struct virt_dma_desc *vd) { struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); - if (!plchan->slave) - pl08x_unmap_buffers(txd); - + dma_descriptor_unmap(&vd->tx); if (!txd->done) pl08x_release_mux(plchan); @@ -1252,7 +1222,7 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, size_t bytes = 0; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; /* @@ -1267,7 +1237,7 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, spin_lock_irqsave(&plchan->vc.lock, flags); ret = dma_cookie_status(chan, cookie, txstate); - if (ret != DMA_SUCCESS) { + if (ret != DMA_COMPLETE) { vd = vchan_find_desc(&plchan->vc, cookie); if (vd) { /* On the issued list, so hasn't been processed yet */ @@ -2138,8 +2108,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); - ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED, - DRIVER_NAME, pl08x); + ret = request_irq(adev->irq[0], pl08x_irq, 0, DRIVER_NAME, pl08x); if (ret) { dev_err(&adev->dev, "%s failed to request interrupt %d\n", __func__, adev->irq[0]); diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index c787f38a186a..e2c04dc81e2a 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -344,31 +344,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) /* move myself to free_list */ list_move(&desc->desc_node, &atchan->free_list); - /* unmap dma addresses (not on slave channels) */ - if (!atchan->chan_common.private) { - struct device *parent = chan2parent(&atchan->chan_common); - if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) - dma_unmap_single(parent, - desc->lli.daddr, - desc->len, DMA_FROM_DEVICE); - else - dma_unmap_page(parent, - desc->lli.daddr, - desc->len, DMA_FROM_DEVICE); - } - if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) - dma_unmap_single(parent, - desc->lli.saddr, - desc->len, DMA_TO_DEVICE); - else - dma_unmap_page(parent, - desc->lli.saddr, - desc->len, DMA_TO_DEVICE); - } - } - + dma_descriptor_unmap(txd); /* for cyclic transfers, * no need to replay callback function while stopping */ if (!atc_chan_is_cyclic(atchan)) { @@ -1102,7 +1078,7 @@ atc_tx_status(struct dma_chan *chan, int bytes = 0; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; /* * There's no point calculating the residue if there's diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index 31011d2a26fc..3c6716e0b78e 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -2369,7 +2369,7 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie, enum dma_status ret; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; dma_set_residue(txstate, coh901318_get_bytes_left(chan)); @@ -2694,7 +2694,7 @@ static int __init coh901318_probe(struct platform_device *pdev) if (irq < 0) return irq; - err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, IRQF_DISABLED, + err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, 0, "coh901318", base); if (err) return err; diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c index 7c82b92f9b16..c29dacff66fa 100644 --- a/drivers/dma/cppi41.c +++ b/drivers/dma/cppi41.c @@ -141,6 +141,9 @@ struct cppi41_dd { const struct chan_queues *queues_rx; const struct chan_queues *queues_tx; struct chan_queues td_queue; + + /* context for suspend/resume */ + unsigned int dma_tdfdq; }; #define FIST_COMPLETION_QUEUE 93 @@ -263,6 +266,15 @@ static u32 pd_trans_len(u32 val) return val & ((1 << (DESC_LENGTH_BITS_NUM + 1)) - 1); } +static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num) +{ + u32 desc; + + desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num)); + desc &= ~0x1f; + return desc; +} + static irqreturn_t cppi41_irq(int irq, void *data) { struct cppi41_dd *cdd = data; @@ -300,8 +312,7 @@ static irqreturn_t cppi41_irq(int irq, void *data) q_num = __fls(val); val &= ~(1 << q_num); q_num += 32 * i; - desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(q_num)); - desc &= ~0x1f; + desc = cppi41_pop_desc(cdd, q_num); c = desc_to_chan(cdd, desc); if (WARN_ON(!c)) { pr_err("%s() q %d desc %08x\n", __func__, @@ -353,7 +364,7 @@ static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan, /* lock */ ret = dma_cookie_status(chan, cookie, txstate); - if (txstate && ret == DMA_SUCCESS) + if (txstate && ret == DMA_COMPLETE) txstate->residue = c->residue; /* unlock */ @@ -517,15 +528,6 @@ static void cppi41_compute_td_desc(struct cppi41_desc *d) d->pd0 = DESC_TYPE_TEARD << DESC_TYPE; } -static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num) -{ - u32 desc; - - desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num)); - desc &= ~0x1f; - return desc; -} - static int cppi41_tear_down_chan(struct cppi41_channel *c) { struct cppi41_dd *cdd = c->cdd; @@ -561,36 +563,26 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c) c->td_retry = 100; } - if (!c->td_seen) { - unsigned td_comp_queue; + if (!c->td_seen || !c->td_desc_seen) { - if (c->is_tx) - td_comp_queue = cdd->td_queue.complete; - else - td_comp_queue = c->q_comp_num; + desc_phys = cppi41_pop_desc(cdd, cdd->td_queue.complete); + if (!desc_phys) + desc_phys = cppi41_pop_desc(cdd, c->q_comp_num); - desc_phys = cppi41_pop_desc(cdd, td_comp_queue); - if (desc_phys) { - __iormb(); + if (desc_phys == c->desc_phys) { + c->td_desc_seen = 1; + + } else if (desc_phys == td_desc_phys) { + u32 pd0; - if (desc_phys == td_desc_phys) { - u32 pd0; - pd0 = td->pd0; - WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD); - WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX)); - WARN_ON((pd0 & 0x1f) != c->port_num); - } else { - WARN_ON_ONCE(1); - } - c->td_seen = 1; - } - } - if (!c->td_desc_seen) { - desc_phys = cppi41_pop_desc(cdd, c->q_comp_num); - if (desc_phys) { __iormb(); - WARN_ON(c->desc_phys != desc_phys); - c->td_desc_seen = 1; + pd0 = td->pd0; + WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD); + WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX)); + WARN_ON((pd0 & 0x1f) != c->port_num); + c->td_seen = 1; + } else if (desc_phys) { + WARN_ON_ONCE(1); } } c->td_retry--; @@ -609,7 +601,7 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c) WARN_ON(!c->td_retry); if (!c->td_desc_seen) { - desc_phys = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num)); + desc_phys = cppi41_pop_desc(cdd, c->q_num); WARN_ON(!desc_phys); } @@ -674,14 +666,14 @@ static void cleanup_chans(struct cppi41_dd *cdd) } } -static int cppi41_add_chans(struct platform_device *pdev, struct cppi41_dd *cdd) +static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd) { struct cppi41_channel *cchan; int i; int ret; u32 n_chans; - ret = of_property_read_u32(pdev->dev.of_node, "#dma-channels", + ret = of_property_read_u32(dev->of_node, "#dma-channels", &n_chans); if (ret) return ret; @@ -719,7 +711,7 @@ err: return -ENOMEM; } -static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd) +static void purge_descs(struct device *dev, struct cppi41_dd *cdd) { unsigned int mem_decs; int i; @@ -731,7 +723,7 @@ static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd) cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i)); cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i)); - dma_free_coherent(&pdev->dev, mem_decs, cdd->cd, + dma_free_coherent(dev, mem_decs, cdd->cd, cdd->descs_phys); } } @@ -741,19 +733,19 @@ static void disable_sched(struct cppi41_dd *cdd) cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL); } -static void deinit_cpii41(struct platform_device *pdev, struct cppi41_dd *cdd) +static void deinit_cppi41(struct device *dev, struct cppi41_dd *cdd) { disable_sched(cdd); - purge_descs(pdev, cdd); + purge_descs(dev, cdd); cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE); cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE); - dma_free_coherent(&pdev->dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch, + dma_free_coherent(dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch, cdd->scratch_phys); } -static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd) +static int init_descs(struct device *dev, struct cppi41_dd *cdd) { unsigned int desc_size; unsigned int mem_decs; @@ -777,7 +769,7 @@ static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd) reg |= ilog2(ALLOC_DECS_NUM) - 5; BUILD_BUG_ON(DESCS_AREAS != 1); - cdd->cd = dma_alloc_coherent(&pdev->dev, mem_decs, + cdd->cd = dma_alloc_coherent(dev, mem_decs, &cdd->descs_phys, GFP_KERNEL); if (!cdd->cd) return -ENOMEM; @@ -813,12 +805,12 @@ static void init_sched(struct cppi41_dd *cdd) cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL); } -static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd) +static int init_cppi41(struct device *dev, struct cppi41_dd *cdd) { int ret; BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1)); - cdd->qmgr_scratch = dma_alloc_coherent(&pdev->dev, QMGR_SCRATCH_SIZE, + cdd->qmgr_scratch = dma_alloc_coherent(dev, QMGR_SCRATCH_SIZE, &cdd->scratch_phys, GFP_KERNEL); if (!cdd->qmgr_scratch) return -ENOMEM; @@ -827,7 +819,7 @@ static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd) cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE); cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE); - ret = init_descs(pdev, cdd); + ret = init_descs(dev, cdd); if (ret) goto err_td; @@ -835,7 +827,7 @@ static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd) init_sched(cdd); return 0; err_td: - deinit_cpii41(pdev, cdd); + deinit_cppi41(dev, cdd); return ret; } @@ -914,11 +906,11 @@ static const struct of_device_id cppi41_dma_ids[] = { }; MODULE_DEVICE_TABLE(of, cppi41_dma_ids); -static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev) +static const struct cppi_glue_infos *get_glue_info(struct device *dev) { const struct of_device_id *of_id; - of_id = of_match_node(cppi41_dma_ids, pdev->dev.of_node); + of_id = of_match_node(cppi41_dma_ids, dev->of_node); if (!of_id) return NULL; return of_id->data; @@ -927,11 +919,12 @@ static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev) static int cppi41_dma_probe(struct platform_device *pdev) { struct cppi41_dd *cdd; + struct device *dev = &pdev->dev; const struct cppi_glue_infos *glue_info; int irq; int ret; - glue_info = get_glue_info(pdev); + glue_info = get_glue_info(dev); if (!glue_info) return -EINVAL; @@ -946,14 +939,14 @@ static int cppi41_dma_probe(struct platform_device *pdev) cdd->ddev.device_issue_pending = cppi41_dma_issue_pending; cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg; cdd->ddev.device_control = cppi41_dma_control; - cdd->ddev.dev = &pdev->dev; + cdd->ddev.dev = dev; INIT_LIST_HEAD(&cdd->ddev.channels); cpp41_dma_info.dma_cap = cdd->ddev.cap_mask; - cdd->usbss_mem = of_iomap(pdev->dev.of_node, 0); - cdd->ctrl_mem = of_iomap(pdev->dev.of_node, 1); - cdd->sched_mem = of_iomap(pdev->dev.of_node, 2); - cdd->qmgr_mem = of_iomap(pdev->dev.of_node, 3); + cdd->usbss_mem = of_iomap(dev->of_node, 0); + cdd->ctrl_mem = of_iomap(dev->of_node, 1); + cdd->sched_mem = of_iomap(dev->of_node, 2); + cdd->qmgr_mem = of_iomap(dev->of_node, 3); if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem || !cdd->qmgr_mem) { @@ -961,31 +954,31 @@ static int cppi41_dma_probe(struct platform_device *pdev) goto err_remap; } - pm_runtime_enable(&pdev->dev); - ret = pm_runtime_get_sync(&pdev->dev); - if (ret) + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) goto err_get_sync; cdd->queues_rx = glue_info->queues_rx; cdd->queues_tx = glue_info->queues_tx; cdd->td_queue = glue_info->td_queue; - ret = init_cppi41(pdev, cdd); + ret = init_cppi41(dev, cdd); if (ret) goto err_init_cppi; - ret = cppi41_add_chans(pdev, cdd); + ret = cppi41_add_chans(dev, cdd); if (ret) goto err_chans; - irq = irq_of_parse_and_map(pdev->dev.of_node, 0); + irq = irq_of_parse_and_map(dev->of_node, 0); if (!irq) goto err_irq; cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER); ret = request_irq(irq, glue_info->isr, IRQF_SHARED, - dev_name(&pdev->dev), cdd); + dev_name(dev), cdd); if (ret) goto err_irq; cdd->irq = irq; @@ -994,7 +987,7 @@ static int cppi41_dma_probe(struct platform_device *pdev) if (ret) goto err_dma_reg; - ret = of_dma_controller_register(pdev->dev.of_node, + ret = of_dma_controller_register(dev->of_node, cppi41_dma_xlate, &cpp41_dma_info); if (ret) goto err_of; @@ -1009,11 +1002,11 @@ err_irq: cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); cleanup_chans(cdd); err_chans: - deinit_cpii41(pdev, cdd); + deinit_cppi41(dev, cdd); err_init_cppi: - pm_runtime_put(&pdev->dev); + pm_runtime_put(dev); err_get_sync: - pm_runtime_disable(&pdev->dev); + pm_runtime_disable(dev); iounmap(cdd->usbss_mem); iounmap(cdd->ctrl_mem); iounmap(cdd->sched_mem); @@ -1033,7 +1026,7 @@ static int cppi41_dma_remove(struct platform_device *pdev) cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); free_irq(cdd->irq, cdd); cleanup_chans(cdd); - deinit_cpii41(pdev, cdd); + deinit_cppi41(&pdev->dev, cdd); iounmap(cdd->usbss_mem); iounmap(cdd->ctrl_mem); iounmap(cdd->sched_mem); @@ -1044,12 +1037,53 @@ static int cppi41_dma_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM_SLEEP +static int cppi41_suspend(struct device *dev) +{ + struct cppi41_dd *cdd = dev_get_drvdata(dev); + + cdd->dma_tdfdq = cppi_readl(cdd->ctrl_mem + DMA_TDFDQ); + cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); + disable_sched(cdd); + + return 0; +} + +static int cppi41_resume(struct device *dev) +{ + struct cppi41_dd *cdd = dev_get_drvdata(dev); + struct cppi41_channel *c; + int i; + + for (i = 0; i < DESCS_AREAS; i++) + cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i)); + + list_for_each_entry(c, &cdd->ddev.channels, chan.device_node) + if (!c->is_tx) + cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0); + + init_sched(cdd); + + cppi_writel(cdd->dma_tdfdq, cdd->ctrl_mem + DMA_TDFDQ); + cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE); + cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE); + cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE); + + cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(cppi41_pm_ops, cppi41_suspend, cppi41_resume); + static struct platform_driver cpp41_dma_driver = { .probe = cppi41_dma_probe, .remove = cppi41_dma_remove, .driver = { .name = "cppi41-dma-engine", .owner = THIS_MODULE, + .pm = &cppi41_pm_ops, .of_match_table = of_match_ptr(cppi41_dma_ids), }, }; diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c index b0c0c8268d42..94c380f07538 100644 --- a/drivers/dma/dma-jz4740.c +++ b/drivers/dma/dma-jz4740.c @@ -491,7 +491,7 @@ static enum dma_status jz4740_dma_tx_status(struct dma_chan *c, unsigned long flags; status = dma_cookie_status(c, cookie, state); - if (status == DMA_SUCCESS || !state) + if (status == DMA_COMPLETE || !state) return status; spin_lock_irqsave(&chan->vchan.lock, flags); diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 9162ac80c18f..ea806bdc12ef 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -65,6 +65,7 @@ #include <linux/acpi.h> #include <linux/acpi_dma.h> #include <linux/of_dma.h> +#include <linux/mempool.h> static DEFINE_MUTEX(dma_list_mutex); static DEFINE_IDR(dma_idr); @@ -901,98 +902,132 @@ void dma_async_device_unregister(struct dma_device *device) } EXPORT_SYMBOL(dma_async_device_unregister); -/** - * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses - * @chan: DMA channel to offload copy to - * @dest: destination address (virtual) - * @src: source address (virtual) - * @len: length - * - * Both @dest and @src must be mappable to a bus address according to the - * DMA mapping API rules for streaming mappings. - * Both @dest and @src must stay memory resident (kernel memory or locked - * user space pages). - */ -dma_cookie_t -dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, - void *src, size_t len) -{ - struct dma_device *dev = chan->device; - struct dma_async_tx_descriptor *tx; - dma_addr_t dma_dest, dma_src; - dma_cookie_t cookie; - unsigned long flags; +struct dmaengine_unmap_pool { + struct kmem_cache *cache; + const char *name; + mempool_t *pool; + size_t size; +}; - dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); - dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); - flags = DMA_CTRL_ACK | - DMA_COMPL_SRC_UNMAP_SINGLE | - DMA_COMPL_DEST_UNMAP_SINGLE; - tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); +#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) } +static struct dmaengine_unmap_pool unmap_pool[] = { + __UNMAP_POOL(2), + #if IS_ENABLED(CONFIG_ASYNC_TX_DMA) + __UNMAP_POOL(16), + __UNMAP_POOL(128), + __UNMAP_POOL(256), + #endif +}; - if (!tx) { - dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); - dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE); - return -ENOMEM; +static struct dmaengine_unmap_pool *__get_unmap_pool(int nr) +{ + int order = get_count_order(nr); + + switch (order) { + case 0 ... 1: + return &unmap_pool[0]; + case 2 ... 4: + return &unmap_pool[1]; + case 5 ... 7: + return &unmap_pool[2]; + case 8: + return &unmap_pool[3]; + default: + BUG(); + return NULL; } +} - tx->callback = NULL; - cookie = tx->tx_submit(tx); +static void dmaengine_unmap(struct kref *kref) +{ + struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref); + struct device *dev = unmap->dev; + int cnt, i; + + cnt = unmap->to_cnt; + for (i = 0; i < cnt; i++) + dma_unmap_page(dev, unmap->addr[i], unmap->len, + DMA_TO_DEVICE); + cnt += unmap->from_cnt; + for (; i < cnt; i++) + dma_unmap_page(dev, unmap->addr[i], unmap->len, + DMA_FROM_DEVICE); + cnt += unmap->bidi_cnt; + for (; i < cnt; i++) { + if (unmap->addr[i] == 0) + continue; + dma_unmap_page(dev, unmap->addr[i], unmap->len, + DMA_BIDIRECTIONAL); + } + mempool_free(unmap, __get_unmap_pool(cnt)->pool); +} - preempt_disable(); - __this_cpu_add(chan->local->bytes_transferred, len); - __this_cpu_inc(chan->local->memcpy_count); - preempt_enable(); +void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) +{ + if (unmap) + kref_put(&unmap->kref, dmaengine_unmap); +} +EXPORT_SYMBOL_GPL(dmaengine_unmap_put); - return cookie; +static void dmaengine_destroy_unmap_pool(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { + struct dmaengine_unmap_pool *p = &unmap_pool[i]; + + if (p->pool) + mempool_destroy(p->pool); + p->pool = NULL; + if (p->cache) + kmem_cache_destroy(p->cache); + p->cache = NULL; + } } -EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); -/** - * dma_async_memcpy_buf_to_pg - offloaded copy from address to page - * @chan: DMA channel to offload copy to - * @page: destination page - * @offset: offset in page to copy to - * @kdata: source address (virtual) - * @len: length - * - * Both @page/@offset and @kdata must be mappable to a bus address according - * to the DMA mapping API rules for streaming mappings. - * Both @page/@offset and @kdata must stay memory resident (kernel memory or - * locked user space pages) - */ -dma_cookie_t -dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, - unsigned int offset, void *kdata, size_t len) +static int __init dmaengine_init_unmap_pool(void) { - struct dma_device *dev = chan->device; - struct dma_async_tx_descriptor *tx; - dma_addr_t dma_dest, dma_src; - dma_cookie_t cookie; - unsigned long flags; + int i; - dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); - dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); - flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE; - tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); + for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { + struct dmaengine_unmap_pool *p = &unmap_pool[i]; + size_t size; - if (!tx) { - dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); - dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); - return -ENOMEM; + size = sizeof(struct dmaengine_unmap_data) + + sizeof(dma_addr_t) * p->size; + + p->cache = kmem_cache_create(p->name, size, 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!p->cache) + break; + p->pool = mempool_create_slab_pool(1, p->cache); + if (!p->pool) + break; } - tx->callback = NULL; - cookie = tx->tx_submit(tx); + if (i == ARRAY_SIZE(unmap_pool)) + return 0; - preempt_disable(); - __this_cpu_add(chan->local->bytes_transferred, len); - __this_cpu_inc(chan->local->memcpy_count); - preempt_enable(); + dmaengine_destroy_unmap_pool(); + return -ENOMEM; +} - return cookie; +struct dmaengine_unmap_data * +dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) +{ + struct dmaengine_unmap_data *unmap; + + unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags); + if (!unmap) + return NULL; + + memset(unmap, 0, sizeof(*unmap)); + kref_init(&unmap->kref); + unmap->dev = dev; + + return unmap; } -EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); +EXPORT_SYMBOL(dmaengine_get_unmap_data); /** * dma_async_memcpy_pg_to_pg - offloaded copy from page to page @@ -1015,24 +1050,33 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, { struct dma_device *dev = chan->device; struct dma_async_tx_descriptor *tx; - dma_addr_t dma_dest, dma_src; + struct dmaengine_unmap_data *unmap; dma_cookie_t cookie; unsigned long flags; - dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); - dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, - DMA_FROM_DEVICE); + unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOIO); + if (!unmap) + return -ENOMEM; + + unmap->to_cnt = 1; + unmap->from_cnt = 1; + unmap->addr[0] = dma_map_page(dev->dev, src_pg, src_off, len, + DMA_TO_DEVICE); + unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len, + DMA_FROM_DEVICE); + unmap->len = len; flags = DMA_CTRL_ACK; - tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); + tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0], + len, flags); if (!tx) { - dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); - dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); + dmaengine_unmap_put(unmap); return -ENOMEM; } - tx->callback = NULL; + dma_set_unmap(tx, unmap); cookie = tx->tx_submit(tx); + dmaengine_unmap_put(unmap); preempt_disable(); __this_cpu_add(chan->local->bytes_transferred, len); @@ -1043,6 +1087,52 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, } EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg); +/** + * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses + * @chan: DMA channel to offload copy to + * @dest: destination address (virtual) + * @src: source address (virtual) + * @len: length + * + * Both @dest and @src must be mappable to a bus address according to the + * DMA mapping API rules for streaming mappings. + * Both @dest and @src must stay memory resident (kernel memory or locked + * user space pages). + */ +dma_cookie_t +dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, + void *src, size_t len) +{ + return dma_async_memcpy_pg_to_pg(chan, virt_to_page(dest), + (unsigned long) dest & ~PAGE_MASK, + virt_to_page(src), + (unsigned long) src & ~PAGE_MASK, len); +} +EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); + +/** + * dma_async_memcpy_buf_to_pg - offloaded copy from address to page + * @chan: DMA channel to offload copy to + * @page: destination page + * @offset: offset in page to copy to + * @kdata: source address (virtual) + * @len: length + * + * Both @page/@offset and @kdata must be mappable to a bus address according + * to the DMA mapping API rules for streaming mappings. + * Both @page/@offset and @kdata must stay memory resident (kernel memory or + * locked user space pages) + */ +dma_cookie_t +dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, + unsigned int offset, void *kdata, size_t len) +{ + return dma_async_memcpy_pg_to_pg(chan, page, offset, + virt_to_page(kdata), + (unsigned long) kdata & ~PAGE_MASK, len); +} +EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); + void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, struct dma_chan *chan) { @@ -1062,7 +1152,7 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); if (!tx) - return DMA_SUCCESS; + return DMA_COMPLETE; while (tx->cookie == -EBUSY) { if (time_after_eq(jiffies, dma_sync_wait_timeout)) { @@ -1116,6 +1206,10 @@ EXPORT_SYMBOL_GPL(dma_run_dependencies); static int __init dma_bus_init(void) { + int err = dmaengine_init_unmap_pool(); + + if (err) + return err; return class_register(&dma_devclass); } arch_initcall(dma_bus_init); diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 92f796cdc6ab..20f9a3aaf926 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -8,6 +8,8 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> @@ -19,10 +21,6 @@ #include <linux/random.h> #include <linux/slab.h> #include <linux/wait.h> -#include <linux/ctype.h> -#include <linux/debugfs.h> -#include <linux/uaccess.h> -#include <linux/seq_file.h> static unsigned int test_buf_size = 16384; module_param(test_buf_size, uint, S_IRUGO | S_IWUSR); @@ -68,92 +66,13 @@ module_param(timeout, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " "Pass -1 for infinite timeout"); -/* Maximum amount of mismatched bytes in buffer to print */ -#define MAX_ERROR_COUNT 32 - -/* - * Initialization patterns. All bytes in the source buffer has bit 7 - * set, all bytes in the destination buffer has bit 7 cleared. - * - * Bit 6 is set for all bytes which are to be copied by the DMA - * engine. Bit 5 is set for all bytes which are to be overwritten by - * the DMA engine. - * - * The remaining bits are the inverse of a counter which increments by - * one for each byte address. - */ -#define PATTERN_SRC 0x80 -#define PATTERN_DST 0x00 -#define PATTERN_COPY 0x40 -#define PATTERN_OVERWRITE 0x20 -#define PATTERN_COUNT_MASK 0x1f - -enum dmatest_error_type { - DMATEST_ET_OK, - DMATEST_ET_MAP_SRC, - DMATEST_ET_MAP_DST, - DMATEST_ET_PREP, - DMATEST_ET_SUBMIT, - DMATEST_ET_TIMEOUT, - DMATEST_ET_DMA_ERROR, - DMATEST_ET_DMA_IN_PROGRESS, - DMATEST_ET_VERIFY, - DMATEST_ET_VERIFY_BUF, -}; - -struct dmatest_verify_buffer { - unsigned int index; - u8 expected; - u8 actual; -}; - -struct dmatest_verify_result { - unsigned int error_count; - struct dmatest_verify_buffer data[MAX_ERROR_COUNT]; - u8 pattern; - bool is_srcbuf; -}; - -struct dmatest_thread_result { - struct list_head node; - unsigned int n; - unsigned int src_off; - unsigned int dst_off; - unsigned int len; - enum dmatest_error_type type; - union { - unsigned long data; - dma_cookie_t cookie; - enum dma_status status; - int error; - struct dmatest_verify_result *vr; - }; -}; - -struct dmatest_result { - struct list_head node; - char *name; - struct list_head results; -}; - -struct dmatest_info; - -struct dmatest_thread { - struct list_head node; - struct dmatest_info *info; - struct task_struct *task; - struct dma_chan *chan; - u8 **srcs; - u8 **dsts; - enum dma_transaction_type type; - bool done; -}; +static bool noverify; +module_param(noverify, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(noverify, "Disable random data setup and verification"); -struct dmatest_chan { - struct list_head node; - struct dma_chan *chan; - struct list_head threads; -}; +static bool verbose; +module_param(verbose, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)"); /** * struct dmatest_params - test parameters. @@ -177,6 +96,7 @@ struct dmatest_params { unsigned int xor_sources; unsigned int pq_sources; int timeout; + bool noverify; }; /** @@ -184,7 +104,7 @@ struct dmatest_params { * @params: test parameters * @lock: access protection to the fields of this structure */ -struct dmatest_info { +static struct dmatest_info { /* Test parameters */ struct dmatest_params params; @@ -192,16 +112,95 @@ struct dmatest_info { struct list_head channels; unsigned int nr_channels; struct mutex lock; + bool did_init; +} test_info = { + .channels = LIST_HEAD_INIT(test_info.channels), + .lock = __MUTEX_INITIALIZER(test_info.lock), +}; + +static int dmatest_run_set(const char *val, const struct kernel_param *kp); +static int dmatest_run_get(char *val, const struct kernel_param *kp); +static struct kernel_param_ops run_ops = { + .set = dmatest_run_set, + .get = dmatest_run_get, +}; +static bool dmatest_run; +module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(run, "Run the test (default: false)"); + +/* Maximum amount of mismatched bytes in buffer to print */ +#define MAX_ERROR_COUNT 32 + +/* + * Initialization patterns. All bytes in the source buffer has bit 7 + * set, all bytes in the destination buffer has bit 7 cleared. + * + * Bit 6 is set for all bytes which are to be copied by the DMA + * engine. Bit 5 is set for all bytes which are to be overwritten by + * the DMA engine. + * + * The remaining bits are the inverse of a counter which increments by + * one for each byte address. + */ +#define PATTERN_SRC 0x80 +#define PATTERN_DST 0x00 +#define PATTERN_COPY 0x40 +#define PATTERN_OVERWRITE 0x20 +#define PATTERN_COUNT_MASK 0x1f - /* debugfs related stuff */ - struct dentry *root; +struct dmatest_thread { + struct list_head node; + struct dmatest_info *info; + struct task_struct *task; + struct dma_chan *chan; + u8 **srcs; + u8 **dsts; + enum dma_transaction_type type; + bool done; +}; - /* Test results */ - struct list_head results; - struct mutex results_lock; +struct dmatest_chan { + struct list_head node; + struct dma_chan *chan; + struct list_head threads; }; -static struct dmatest_info test_info; +static DECLARE_WAIT_QUEUE_HEAD(thread_wait); +static bool wait; + +static bool is_threaded_test_run(struct dmatest_info *info) +{ + struct dmatest_chan *dtc; + + list_for_each_entry(dtc, &info->channels, node) { + struct dmatest_thread *thread; + + list_for_each_entry(thread, &dtc->threads, node) { + if (!thread->done) + return true; + } + } + + return false; +} + +static int dmatest_wait_get(char *val, const struct kernel_param *kp) +{ + struct dmatest_info *info = &test_info; + struct dmatest_params *params = &info->params; + + if (params->iterations) + wait_event(thread_wait, !is_threaded_test_run(info)); + wait = true; + return param_get_bool(val, kp); +} + +static struct kernel_param_ops wait_ops = { + .get = dmatest_wait_get, + .set = param_set_bool, +}; +module_param_cb(wait, &wait_ops, &wait, S_IRUGO); +MODULE_PARM_DESC(wait, "Wait for tests to complete (default: false)"); static bool dmatest_match_channel(struct dmatest_params *params, struct dma_chan *chan) @@ -223,7 +222,7 @@ static unsigned long dmatest_random(void) { unsigned long buf; - get_random_bytes(&buf, sizeof(buf)); + prandom_bytes(&buf, sizeof(buf)); return buf; } @@ -262,9 +261,31 @@ static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len, } } -static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs, - unsigned int start, unsigned int end, unsigned int counter, - u8 pattern, bool is_srcbuf) +static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index, + unsigned int counter, bool is_srcbuf) +{ + u8 diff = actual ^ pattern; + u8 expected = pattern | (~counter & PATTERN_COUNT_MASK); + const char *thread_name = current->comm; + + if (is_srcbuf) + pr_warn("%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n", + thread_name, index, expected, actual); + else if ((pattern & PATTERN_COPY) + && (diff & (PATTERN_COPY | PATTERN_OVERWRITE))) + pr_warn("%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n", + thread_name, index, expected, actual); + else if (diff & PATTERN_SRC) + pr_warn("%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n", + thread_name, index, expected, actual); + else + pr_warn("%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n", + thread_name, index, expected, actual); +} + +static unsigned int dmatest_verify(u8 **bufs, unsigned int start, + unsigned int end, unsigned int counter, u8 pattern, + bool is_srcbuf) { unsigned int i; unsigned int error_count = 0; @@ -272,7 +293,6 @@ static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs, u8 expected; u8 *buf; unsigned int counter_orig = counter; - struct dmatest_verify_buffer *vb; for (; (buf = *bufs); bufs++) { counter = counter_orig; @@ -280,12 +300,9 @@ static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs, actual = buf[i]; expected = pattern | (~counter & PATTERN_COUNT_MASK); if (actual != expected) { - if (error_count < MAX_ERROR_COUNT && vr) { - vb = &vr->data[error_count]; - vb->index = i; - vb->expected = expected; - vb->actual = actual; - } + if (error_count < MAX_ERROR_COUNT) + dmatest_mismatch(actual, pattern, i, + counter, is_srcbuf); error_count++; } counter++; @@ -293,7 +310,7 @@ static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs, } if (error_count > MAX_ERROR_COUNT) - pr_warning("%s: %u errors suppressed\n", + pr_warn("%s: %u errors suppressed\n", current->comm, error_count - MAX_ERROR_COUNT); return error_count; @@ -313,20 +330,6 @@ static void dmatest_callback(void *arg) wake_up_all(done->wait); } -static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len, - unsigned int count) -{ - while (count--) - dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE); -} - -static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len, - unsigned int count) -{ - while (count--) - dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL); -} - static unsigned int min_odd(unsigned int x, unsigned int y) { unsigned int val = min(x, y); @@ -334,172 +337,49 @@ static unsigned int min_odd(unsigned int x, unsigned int y) return val % 2 ? val : val - 1; } -static char *verify_result_get_one(struct dmatest_verify_result *vr, - unsigned int i) +static void result(const char *err, unsigned int n, unsigned int src_off, + unsigned int dst_off, unsigned int len, unsigned long data) { - struct dmatest_verify_buffer *vb = &vr->data[i]; - u8 diff = vb->actual ^ vr->pattern; - static char buf[512]; - char *msg; - - if (vr->is_srcbuf) - msg = "srcbuf overwritten!"; - else if ((vr->pattern & PATTERN_COPY) - && (diff & (PATTERN_COPY | PATTERN_OVERWRITE))) - msg = "dstbuf not copied!"; - else if (diff & PATTERN_SRC) - msg = "dstbuf was copied!"; - else - msg = "dstbuf mismatch!"; - - snprintf(buf, sizeof(buf) - 1, "%s [0x%x] Expected %02x, got %02x", msg, - vb->index, vb->expected, vb->actual); - - return buf; + pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)", + current->comm, n, err, src_off, dst_off, len, data); } -static char *thread_result_get(const char *name, - struct dmatest_thread_result *tr) +static void dbg_result(const char *err, unsigned int n, unsigned int src_off, + unsigned int dst_off, unsigned int len, + unsigned long data) { - static const char * const messages[] = { - [DMATEST_ET_OK] = "No errors", - [DMATEST_ET_MAP_SRC] = "src mapping error", - [DMATEST_ET_MAP_DST] = "dst mapping error", - [DMATEST_ET_PREP] = "prep error", - [DMATEST_ET_SUBMIT] = "submit error", - [DMATEST_ET_TIMEOUT] = "test timed out", - [DMATEST_ET_DMA_ERROR] = - "got completion callback (DMA_ERROR)", - [DMATEST_ET_DMA_IN_PROGRESS] = - "got completion callback (DMA_IN_PROGRESS)", - [DMATEST_ET_VERIFY] = "errors", - [DMATEST_ET_VERIFY_BUF] = "verify errors", - }; - static char buf[512]; - - snprintf(buf, sizeof(buf) - 1, - "%s: #%u: %s with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)", - name, tr->n, messages[tr->type], tr->src_off, tr->dst_off, - tr->len, tr->data); - - return buf; + pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)", + current->comm, n, err, src_off, dst_off, len, data); } -static int thread_result_add(struct dmatest_info *info, - struct dmatest_result *r, enum dmatest_error_type type, - unsigned int n, unsigned int src_off, unsigned int dst_off, - unsigned int len, unsigned long data) -{ - struct dmatest_thread_result *tr; - - tr = kzalloc(sizeof(*tr), GFP_KERNEL); - if (!tr) - return -ENOMEM; - - tr->type = type; - tr->n = n; - tr->src_off = src_off; - tr->dst_off = dst_off; - tr->len = len; - tr->data = data; +#define verbose_result(err, n, src_off, dst_off, len, data) ({ \ + if (verbose) \ + result(err, n, src_off, dst_off, len, data); \ + else \ + dbg_result(err, n, src_off, dst_off, len, data); \ +}) - mutex_lock(&info->results_lock); - list_add_tail(&tr->node, &r->results); - mutex_unlock(&info->results_lock); - - if (tr->type == DMATEST_ET_OK) - pr_debug("%s\n", thread_result_get(r->name, tr)); - else - pr_warn("%s\n", thread_result_get(r->name, tr)); - - return 0; -} - -static unsigned int verify_result_add(struct dmatest_info *info, - struct dmatest_result *r, unsigned int n, - unsigned int src_off, unsigned int dst_off, unsigned int len, - u8 **bufs, int whence, unsigned int counter, u8 pattern, - bool is_srcbuf) +static unsigned long long dmatest_persec(s64 runtime, unsigned int val) { - struct dmatest_verify_result *vr; - unsigned int error_count; - unsigned int buf_off = is_srcbuf ? src_off : dst_off; - unsigned int start, end; - - if (whence < 0) { - start = 0; - end = buf_off; - } else if (whence > 0) { - start = buf_off + len; - end = info->params.buf_size; - } else { - start = buf_off; - end = buf_off + len; - } + unsigned long long per_sec = 1000000; - vr = kmalloc(sizeof(*vr), GFP_KERNEL); - if (!vr) { - pr_warn("dmatest: No memory to store verify result\n"); - return dmatest_verify(NULL, bufs, start, end, counter, pattern, - is_srcbuf); - } - - vr->pattern = pattern; - vr->is_srcbuf = is_srcbuf; - - error_count = dmatest_verify(vr, bufs, start, end, counter, pattern, - is_srcbuf); - if (error_count) { - vr->error_count = error_count; - thread_result_add(info, r, DMATEST_ET_VERIFY_BUF, n, src_off, - dst_off, len, (unsigned long)vr); - return error_count; - } - - kfree(vr); - return 0; -} - -static void result_free(struct dmatest_info *info, const char *name) -{ - struct dmatest_result *r, *_r; - - mutex_lock(&info->results_lock); - list_for_each_entry_safe(r, _r, &info->results, node) { - struct dmatest_thread_result *tr, *_tr; - - if (name && strcmp(r->name, name)) - continue; - - list_for_each_entry_safe(tr, _tr, &r->results, node) { - if (tr->type == DMATEST_ET_VERIFY_BUF) - kfree(tr->vr); - list_del(&tr->node); - kfree(tr); - } + if (runtime <= 0) + return 0; - kfree(r->name); - list_del(&r->node); - kfree(r); + /* drop precision until runtime is 32-bits */ + while (runtime > UINT_MAX) { + runtime >>= 1; + per_sec <<= 1; } - mutex_unlock(&info->results_lock); + per_sec *= val; + do_div(per_sec, runtime); + return per_sec; } -static struct dmatest_result *result_init(struct dmatest_info *info, - const char *name) +static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len) { - struct dmatest_result *r; - - r = kzalloc(sizeof(*r), GFP_KERNEL); - if (r) { - r->name = kstrdup(name, GFP_KERNEL); - INIT_LIST_HEAD(&r->results); - mutex_lock(&info->results_lock); - list_add_tail(&r->node, &info->results); - mutex_unlock(&info->results_lock); - } - return r; + return dmatest_persec(runtime, len >> 10); } /* @@ -525,7 +405,6 @@ static int dmatest_func(void *data) struct dmatest_params *params; struct dma_chan *chan; struct dma_device *dev; - const char *thread_name; unsigned int src_off, dst_off, len; unsigned int error_count; unsigned int failed_tests = 0; @@ -538,9 +417,10 @@ static int dmatest_func(void *data) int src_cnt; int dst_cnt; int i; - struct dmatest_result *result; + ktime_t ktime; + s64 runtime = 0; + unsigned long long total_len = 0; - thread_name = current->comm; set_freezable(); ret = -ENOMEM; @@ -570,10 +450,6 @@ static int dmatest_func(void *data) } else goto err_thread_type; - result = result_init(info, thread_name); - if (!result) - goto err_srcs; - thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL); if (!thread->srcs) goto err_srcs; @@ -597,17 +473,17 @@ static int dmatest_func(void *data) set_user_nice(current, 10); /* - * src buffers are freed by the DMAEngine code with dma_unmap_single() - * dst buffers are freed by ourselves below + * src and dst buffers are freed by ourselves below */ - flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT - | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE; + flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; + ktime = ktime_get(); while (!kthread_should_stop() && !(params->iterations && total_tests >= params->iterations)) { struct dma_async_tx_descriptor *tx = NULL; - dma_addr_t dma_srcs[src_cnt]; - dma_addr_t dma_dsts[dst_cnt]; + struct dmaengine_unmap_data *um; + dma_addr_t srcs[src_cnt]; + dma_addr_t *dsts; u8 align = 0; total_tests++; @@ -626,81 +502,103 @@ static int dmatest_func(void *data) break; } - len = dmatest_random() % params->buf_size + 1; + if (params->noverify) { + len = params->buf_size; + src_off = 0; + dst_off = 0; + } else { + len = dmatest_random() % params->buf_size + 1; + len = (len >> align) << align; + if (!len) + len = 1 << align; + src_off = dmatest_random() % (params->buf_size - len + 1); + dst_off = dmatest_random() % (params->buf_size - len + 1); + + src_off = (src_off >> align) << align; + dst_off = (dst_off >> align) << align; + + dmatest_init_srcs(thread->srcs, src_off, len, + params->buf_size); + dmatest_init_dsts(thread->dsts, dst_off, len, + params->buf_size); + } + len = (len >> align) << align; if (!len) len = 1 << align; - src_off = dmatest_random() % (params->buf_size - len + 1); - dst_off = dmatest_random() % (params->buf_size - len + 1); + total_len += len; - src_off = (src_off >> align) << align; - dst_off = (dst_off >> align) << align; - - dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size); - dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size); + um = dmaengine_get_unmap_data(dev->dev, src_cnt+dst_cnt, + GFP_KERNEL); + if (!um) { + failed_tests++; + result("unmap data NULL", total_tests, + src_off, dst_off, len, ret); + continue; + } + um->len = params->buf_size; for (i = 0; i < src_cnt; i++) { - u8 *buf = thread->srcs[i] + src_off; - - dma_srcs[i] = dma_map_single(dev->dev, buf, len, - DMA_TO_DEVICE); - ret = dma_mapping_error(dev->dev, dma_srcs[i]); + unsigned long buf = (unsigned long) thread->srcs[i]; + struct page *pg = virt_to_page(buf); + unsigned pg_off = buf & ~PAGE_MASK; + + um->addr[i] = dma_map_page(dev->dev, pg, pg_off, + um->len, DMA_TO_DEVICE); + srcs[i] = um->addr[i] + src_off; + ret = dma_mapping_error(dev->dev, um->addr[i]); if (ret) { - unmap_src(dev->dev, dma_srcs, len, i); - thread_result_add(info, result, - DMATEST_ET_MAP_SRC, - total_tests, src_off, dst_off, - len, ret); + dmaengine_unmap_put(um); + result("src mapping error", total_tests, + src_off, dst_off, len, ret); failed_tests++; continue; } + um->to_cnt++; } /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ + dsts = &um->addr[src_cnt]; for (i = 0; i < dst_cnt; i++) { - dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i], - params->buf_size, - DMA_BIDIRECTIONAL); - ret = dma_mapping_error(dev->dev, dma_dsts[i]); + unsigned long buf = (unsigned long) thread->dsts[i]; + struct page *pg = virt_to_page(buf); + unsigned pg_off = buf & ~PAGE_MASK; + + dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len, + DMA_BIDIRECTIONAL); + ret = dma_mapping_error(dev->dev, dsts[i]); if (ret) { - unmap_src(dev->dev, dma_srcs, len, src_cnt); - unmap_dst(dev->dev, dma_dsts, params->buf_size, - i); - thread_result_add(info, result, - DMATEST_ET_MAP_DST, - total_tests, src_off, dst_off, - len, ret); + dmaengine_unmap_put(um); + result("dst mapping error", total_tests, + src_off, dst_off, len, ret); failed_tests++; continue; } + um->bidi_cnt++; } if (thread->type == DMA_MEMCPY) tx = dev->device_prep_dma_memcpy(chan, - dma_dsts[0] + dst_off, - dma_srcs[0], len, - flags); + dsts[0] + dst_off, + srcs[0], len, flags); else if (thread->type == DMA_XOR) tx = dev->device_prep_dma_xor(chan, - dma_dsts[0] + dst_off, - dma_srcs, src_cnt, + dsts[0] + dst_off, + srcs, src_cnt, len, flags); else if (thread->type == DMA_PQ) { dma_addr_t dma_pq[dst_cnt]; for (i = 0; i < dst_cnt; i++) - dma_pq[i] = dma_dsts[i] + dst_off; - tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs, + dma_pq[i] = dsts[i] + dst_off; + tx = dev->device_prep_dma_pq(chan, dma_pq, srcs, src_cnt, pq_coefs, len, flags); } if (!tx) { - unmap_src(dev->dev, dma_srcs, len, src_cnt); - unmap_dst(dev->dev, dma_dsts, params->buf_size, - dst_cnt); - thread_result_add(info, result, DMATEST_ET_PREP, - total_tests, src_off, dst_off, - len, 0); + dmaengine_unmap_put(um); + result("prep error", total_tests, src_off, + dst_off, len, ret); msleep(100); failed_tests++; continue; @@ -712,9 +610,9 @@ static int dmatest_func(void *data) cookie = tx->tx_submit(tx); if (dma_submit_error(cookie)) { - thread_result_add(info, result, DMATEST_ET_SUBMIT, - total_tests, src_off, dst_off, - len, cookie); + dmaengine_unmap_put(um); + result("submit error", total_tests, src_off, + dst_off, len, ret); msleep(100); failed_tests++; continue; @@ -735,59 +633,59 @@ static int dmatest_func(void *data) * free it this time?" dancing. For now, just * leave it dangling. */ - thread_result_add(info, result, DMATEST_ET_TIMEOUT, - total_tests, src_off, dst_off, - len, 0); + dmaengine_unmap_put(um); + result("test timed out", total_tests, src_off, dst_off, + len, 0); failed_tests++; continue; - } else if (status != DMA_SUCCESS) { - enum dmatest_error_type type = (status == DMA_ERROR) ? - DMATEST_ET_DMA_ERROR : DMATEST_ET_DMA_IN_PROGRESS; - thread_result_add(info, result, type, - total_tests, src_off, dst_off, - len, status); + } else if (status != DMA_COMPLETE) { + dmaengine_unmap_put(um); + result(status == DMA_ERROR ? + "completion error status" : + "completion busy status", total_tests, src_off, + dst_off, len, ret); failed_tests++; continue; } - /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */ - unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt); + dmaengine_unmap_put(um); - error_count = 0; + if (params->noverify) { + verbose_result("test passed", total_tests, src_off, + dst_off, len, 0); + continue; + } - pr_debug("%s: verifying source buffer...\n", thread_name); - error_count += verify_result_add(info, result, total_tests, - src_off, dst_off, len, thread->srcs, -1, + pr_debug("%s: verifying source buffer...\n", current->comm); + error_count = dmatest_verify(thread->srcs, 0, src_off, 0, PATTERN_SRC, true); - error_count += verify_result_add(info, result, total_tests, - src_off, dst_off, len, thread->srcs, 0, - src_off, PATTERN_SRC | PATTERN_COPY, true); - error_count += verify_result_add(info, result, total_tests, - src_off, dst_off, len, thread->srcs, 1, - src_off + len, PATTERN_SRC, true); - - pr_debug("%s: verifying dest buffer...\n", thread_name); - error_count += verify_result_add(info, result, total_tests, - src_off, dst_off, len, thread->dsts, -1, + error_count += dmatest_verify(thread->srcs, src_off, + src_off + len, src_off, + PATTERN_SRC | PATTERN_COPY, true); + error_count += dmatest_verify(thread->srcs, src_off + len, + params->buf_size, src_off + len, + PATTERN_SRC, true); + + pr_debug("%s: verifying dest buffer...\n", current->comm); + error_count += dmatest_verify(thread->dsts, 0, dst_off, 0, PATTERN_DST, false); - error_count += verify_result_add(info, result, total_tests, - src_off, dst_off, len, thread->dsts, 0, - src_off, PATTERN_SRC | PATTERN_COPY, false); - error_count += verify_result_add(info, result, total_tests, - src_off, dst_off, len, thread->dsts, 1, - dst_off + len, PATTERN_DST, false); + error_count += dmatest_verify(thread->dsts, dst_off, + dst_off + len, src_off, + PATTERN_SRC | PATTERN_COPY, false); + error_count += dmatest_verify(thread->dsts, dst_off + len, + params->buf_size, dst_off + len, + PATTERN_DST, false); if (error_count) { - thread_result_add(info, result, DMATEST_ET_VERIFY, - total_tests, src_off, dst_off, - len, error_count); + result("data error", total_tests, src_off, dst_off, + len, error_count); failed_tests++; } else { - thread_result_add(info, result, DMATEST_ET_OK, - total_tests, src_off, dst_off, - len, 0); + verbose_result("test passed", total_tests, src_off, + dst_off, len, 0); } } + runtime = ktime_us_delta(ktime_get(), ktime); ret = 0; for (i = 0; thread->dsts[i]; i++) @@ -802,20 +700,17 @@ err_srcbuf: err_srcs: kfree(pq_coefs); err_thread_type: - pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", - thread_name, total_tests, failed_tests, ret); + pr_info("%s: summary %u tests, %u failures %llu iops %llu KB/s (%d)\n", + current->comm, total_tests, failed_tests, + dmatest_persec(runtime, total_tests), + dmatest_KBs(runtime, total_len), ret); /* terminate all transfers on specified channels */ if (ret) dmaengine_terminate_all(chan); thread->done = true; - - if (params->iterations > 0) - while (!kthread_should_stop()) { - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); - interruptible_sleep_on(&wait_dmatest_exit); - } + wake_up(&thread_wait); return ret; } @@ -828,9 +723,10 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc) list_for_each_entry_safe(thread, _thread, &dtc->threads, node) { ret = kthread_stop(thread->task); - pr_debug("dmatest: thread %s exited with status %d\n", - thread->task->comm, ret); + pr_debug("thread %s exited with status %d\n", + thread->task->comm, ret); list_del(&thread->node); + put_task_struct(thread->task); kfree(thread); } @@ -861,27 +757,27 @@ static int dmatest_add_threads(struct dmatest_info *info, for (i = 0; i < params->threads_per_chan; i++) { thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); if (!thread) { - pr_warning("dmatest: No memory for %s-%s%u\n", - dma_chan_name(chan), op, i); - + pr_warn("No memory for %s-%s%u\n", + dma_chan_name(chan), op, i); break; } thread->info = info; thread->chan = dtc->chan; thread->type = type; smp_wmb(); - thread->task = kthread_run(dmatest_func, thread, "%s-%s%u", + thread->task = kthread_create(dmatest_func, thread, "%s-%s%u", dma_chan_name(chan), op, i); if (IS_ERR(thread->task)) { - pr_warning("dmatest: Failed to run thread %s-%s%u\n", - dma_chan_name(chan), op, i); + pr_warn("Failed to create thread %s-%s%u\n", + dma_chan_name(chan), op, i); kfree(thread); break; } /* srcbuf and dstbuf are allocated by the thread itself */ - + get_task_struct(thread->task); list_add_tail(&thread->node, &dtc->threads); + wake_up_process(thread->task); } return i; @@ -897,7 +793,7 @@ static int dmatest_add_channel(struct dmatest_info *info, dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL); if (!dtc) { - pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan)); + pr_warn("No memory for %s\n", dma_chan_name(chan)); return -ENOMEM; } @@ -917,7 +813,7 @@ static int dmatest_add_channel(struct dmatest_info *info, thread_count += cnt > 0 ? cnt : 0; } - pr_info("dmatest: Started %u threads using %s\n", + pr_info("Started %u threads using %s\n", thread_count, dma_chan_name(chan)); list_add_tail(&dtc->node, &info->channels); @@ -937,20 +833,20 @@ static bool filter(struct dma_chan *chan, void *param) return true; } -static int __run_threaded_test(struct dmatest_info *info) +static void request_channels(struct dmatest_info *info, + enum dma_transaction_type type) { dma_cap_mask_t mask; - struct dma_chan *chan; - struct dmatest_params *params = &info->params; - int err = 0; dma_cap_zero(mask); - dma_cap_set(DMA_MEMCPY, mask); + dma_cap_set(type, mask); for (;;) { + struct dmatest_params *params = &info->params; + struct dma_chan *chan; + chan = dma_request_channel(mask, filter, params); if (chan) { - err = dmatest_add_channel(info, chan); - if (err) { + if (dmatest_add_channel(info, chan)) { dma_release_channel(chan); break; /* add_channel failed, punt */ } @@ -960,22 +856,30 @@ static int __run_threaded_test(struct dmatest_info *info) info->nr_channels >= params->max_channels) break; /* we have all we need */ } - return err; } -#ifndef MODULE -static int run_threaded_test(struct dmatest_info *info) +static void run_threaded_test(struct dmatest_info *info) { - int ret; + struct dmatest_params *params = &info->params; - mutex_lock(&info->lock); - ret = __run_threaded_test(info); - mutex_unlock(&info->lock); - return ret; + /* Copy test parameters */ + params->buf_size = test_buf_size; + strlcpy(params->channel, strim(test_channel), sizeof(params->channel)); + strlcpy(params->device, strim(test_device), sizeof(params->device)); + params->threads_per_chan = threads_per_chan; + params->max_channels = max_channels; + params->iterations = iterations; + params->xor_sources = xor_sources; + params->pq_sources = pq_sources; + params->timeout = timeout; + params->noverify = noverify; + + request_channels(info, DMA_MEMCPY); + request_channels(info, DMA_XOR); + request_channels(info, DMA_PQ); } -#endif -static void __stop_threaded_test(struct dmatest_info *info) +static void stop_threaded_test(struct dmatest_info *info) { struct dmatest_chan *dtc, *_dtc; struct dma_chan *chan; @@ -984,203 +888,86 @@ static void __stop_threaded_test(struct dmatest_info *info) list_del(&dtc->node); chan = dtc->chan; dmatest_cleanup_channel(dtc); - pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan)); + pr_debug("dropped channel %s\n", dma_chan_name(chan)); dma_release_channel(chan); } info->nr_channels = 0; } -static void stop_threaded_test(struct dmatest_info *info) +static void restart_threaded_test(struct dmatest_info *info, bool run) { - mutex_lock(&info->lock); - __stop_threaded_test(info); - mutex_unlock(&info->lock); -} - -static int __restart_threaded_test(struct dmatest_info *info, bool run) -{ - struct dmatest_params *params = &info->params; + /* we might be called early to set run=, defer running until all + * parameters have been evaluated + */ + if (!info->did_init) + return; /* Stop any running test first */ - __stop_threaded_test(info); - - if (run == false) - return 0; - - /* Clear results from previous run */ - result_free(info, NULL); - - /* Copy test parameters */ - params->buf_size = test_buf_size; - strlcpy(params->channel, strim(test_channel), sizeof(params->channel)); - strlcpy(params->device, strim(test_device), sizeof(params->device)); - params->threads_per_chan = threads_per_chan; - params->max_channels = max_channels; - params->iterations = iterations; - params->xor_sources = xor_sources; - params->pq_sources = pq_sources; - params->timeout = timeout; + stop_threaded_test(info); /* Run test with new parameters */ - return __run_threaded_test(info); -} - -static bool __is_threaded_test_run(struct dmatest_info *info) -{ - struct dmatest_chan *dtc; - - list_for_each_entry(dtc, &info->channels, node) { - struct dmatest_thread *thread; - - list_for_each_entry(thread, &dtc->threads, node) { - if (!thread->done) - return true; - } - } - - return false; + run_threaded_test(info); } -static ssize_t dtf_read_run(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) +static int dmatest_run_get(char *val, const struct kernel_param *kp) { - struct dmatest_info *info = file->private_data; - char buf[3]; + struct dmatest_info *info = &test_info; mutex_lock(&info->lock); - - if (__is_threaded_test_run(info)) { - buf[0] = 'Y'; + if (is_threaded_test_run(info)) { + dmatest_run = true; } else { - __stop_threaded_test(info); - buf[0] = 'N'; + stop_threaded_test(info); + dmatest_run = false; } - mutex_unlock(&info->lock); - buf[1] = '\n'; - buf[2] = 0x00; - return simple_read_from_buffer(user_buf, count, ppos, buf, 2); -} - -static ssize_t dtf_write_run(struct file *file, const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct dmatest_info *info = file->private_data; - char buf[16]; - bool bv; - int ret = 0; - if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1)))) - return -EFAULT; - - if (strtobool(buf, &bv) == 0) { - mutex_lock(&info->lock); - - if (__is_threaded_test_run(info)) - ret = -EBUSY; - else - ret = __restart_threaded_test(info, bv); - - mutex_unlock(&info->lock); - } - - return ret ? ret : count; + return param_get_bool(val, kp); } -static const struct file_operations dtf_run_fops = { - .read = dtf_read_run, - .write = dtf_write_run, - .open = simple_open, - .llseek = default_llseek, -}; - -static int dtf_results_show(struct seq_file *sf, void *data) +static int dmatest_run_set(const char *val, const struct kernel_param *kp) { - struct dmatest_info *info = sf->private; - struct dmatest_result *result; - struct dmatest_thread_result *tr; - unsigned int i; + struct dmatest_info *info = &test_info; + int ret; - mutex_lock(&info->results_lock); - list_for_each_entry(result, &info->results, node) { - list_for_each_entry(tr, &result->results, node) { - seq_printf(sf, "%s\n", - thread_result_get(result->name, tr)); - if (tr->type == DMATEST_ET_VERIFY_BUF) { - for (i = 0; i < tr->vr->error_count; i++) { - seq_printf(sf, "\t%s\n", - verify_result_get_one(tr->vr, i)); - } - } - } + mutex_lock(&info->lock); + ret = param_set_bool(val, kp); + if (ret) { + mutex_unlock(&info->lock); + return ret; } - mutex_unlock(&info->results_lock); - return 0; -} - -static int dtf_results_open(struct inode *inode, struct file *file) -{ - return single_open(file, dtf_results_show, inode->i_private); -} - -static const struct file_operations dtf_results_fops = { - .open = dtf_results_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -static int dmatest_register_dbgfs(struct dmatest_info *info) -{ - struct dentry *d; - - d = debugfs_create_dir("dmatest", NULL); - if (IS_ERR(d)) - return PTR_ERR(d); - if (!d) - goto err_root; + if (is_threaded_test_run(info)) + ret = -EBUSY; + else if (dmatest_run) + restart_threaded_test(info, dmatest_run); - info->root = d; - - /* Run or stop threaded test */ - debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root, info, - &dtf_run_fops); - - /* Results of test in progress */ - debugfs_create_file("results", S_IRUGO, info->root, info, - &dtf_results_fops); - - return 0; + mutex_unlock(&info->lock); -err_root: - pr_err("dmatest: Failed to initialize debugfs\n"); - return -ENOMEM; + return ret; } static int __init dmatest_init(void) { struct dmatest_info *info = &test_info; - int ret; - - memset(info, 0, sizeof(*info)); + struct dmatest_params *params = &info->params; - mutex_init(&info->lock); - INIT_LIST_HEAD(&info->channels); + if (dmatest_run) { + mutex_lock(&info->lock); + run_threaded_test(info); + mutex_unlock(&info->lock); + } - mutex_init(&info->results_lock); - INIT_LIST_HEAD(&info->results); + if (params->iterations && wait) + wait_event(thread_wait, !is_threaded_test_run(info)); - ret = dmatest_register_dbgfs(info); - if (ret) - return ret; + /* module parameters are stable, inittime tests are started, + * let userspace take over 'run' control + */ + info->did_init = true; -#ifdef MODULE return 0; -#else - return run_threaded_test(info); -#endif } /* when compiled-in wait for drivers to load first */ late_initcall(dmatest_init); @@ -1189,9 +976,9 @@ static void __exit dmatest_exit(void) { struct dmatest_info *info = &test_info; - debugfs_remove_recursive(info->root); + mutex_lock(&info->lock); stop_threaded_test(info); - result_free(info, NULL); + mutex_unlock(&info->lock); } module_exit(dmatest_exit); diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 89eb89f22284..7516be4677cf 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -85,10 +85,6 @@ static struct device *chan2dev(struct dma_chan *chan) { return &chan->dev->device; } -static struct device *chan2parent(struct dma_chan *chan) -{ - return chan->dev->device.parent; -} static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) { @@ -311,26 +307,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, list_splice_init(&desc->tx_list, &dwc->free_list); list_move(&desc->desc_node, &dwc->free_list); - if (!is_slave_direction(dwc->direction)) { - struct device *parent = chan2parent(&dwc->chan); - if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) - dma_unmap_single(parent, desc->lli.dar, - desc->total_len, DMA_FROM_DEVICE); - else - dma_unmap_page(parent, desc->lli.dar, - desc->total_len, DMA_FROM_DEVICE); - } - if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) - dma_unmap_single(parent, desc->lli.sar, - desc->total_len, DMA_TO_DEVICE); - else - dma_unmap_page(parent, desc->lli.sar, - desc->total_len, DMA_TO_DEVICE); - } - } - + dma_descriptor_unmap(txd); spin_unlock_irqrestore(&dwc->lock, flags); if (callback) @@ -1098,13 +1075,13 @@ dwc_tx_status(struct dma_chan *chan, enum dma_status ret; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; dwc_scan_descriptors(to_dw_dma(chan->device), dwc); ret = dma_cookie_status(chan, cookie, txstate); - if (ret != DMA_SUCCESS) + if (ret != DMA_COMPLETE) dma_set_residue(txstate, dwc_get_residue(dwc)); if (dwc->paused && ret == DMA_IN_PROGRESS) diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index bef8a368c8dd..2539ea0cbc63 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -46,14 +46,21 @@ #define EDMA_CHANS 64 #endif /* CONFIG_ARCH_DAVINCI_DA8XX */ -/* Max of 16 segments per channel to conserve PaRAM slots */ -#define MAX_NR_SG 16 +/* + * Max of 20 segments per channel to conserve PaRAM slots + * Also note that MAX_NR_SG should be atleast the no.of periods + * that are required for ASoC, otherwise DMA prep calls will + * fail. Today davinci-pcm is the only user of this driver and + * requires atleast 17 slots, so we setup the default to 20. + */ +#define MAX_NR_SG 20 #define EDMA_MAX_SLOTS MAX_NR_SG #define EDMA_DESCRIPTORS 16 struct edma_desc { struct virt_dma_desc vdesc; struct list_head node; + int cyclic; int absync; int pset_nr; int processed; @@ -167,8 +174,13 @@ static void edma_execute(struct edma_chan *echan) * then setup a link to the dummy slot, this results in all future * events being absorbed and that's OK because we're done */ - if (edesc->processed == edesc->pset_nr) - edma_link(echan->slot[nslots-1], echan->ecc->dummy_slot); + if (edesc->processed == edesc->pset_nr) { + if (edesc->cyclic) + edma_link(echan->slot[nslots-1], echan->slot[1]); + else + edma_link(echan->slot[nslots-1], + echan->ecc->dummy_slot); + } edma_resume(echan->ch_num); @@ -250,6 +262,117 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, return ret; } +/* + * A PaRAM set configuration abstraction used by other modes + * @chan: Channel who's PaRAM set we're configuring + * @pset: PaRAM set to initialize and setup. + * @src_addr: Source address of the DMA + * @dst_addr: Destination address of the DMA + * @burst: In units of dev_width, how much to send + * @dev_width: How much is the dev_width + * @dma_length: Total length of the DMA transfer + * @direction: Direction of the transfer + */ +static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset, + dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst, + enum dma_slave_buswidth dev_width, unsigned int dma_length, + enum dma_transfer_direction direction) +{ + struct edma_chan *echan = to_edma_chan(chan); + struct device *dev = chan->device->dev; + int acnt, bcnt, ccnt, cidx; + int src_bidx, dst_bidx, src_cidx, dst_cidx; + int absync; + + acnt = dev_width; + /* + * If the maxburst is equal to the fifo width, use + * A-synced transfers. This allows for large contiguous + * buffer transfers using only one PaRAM set. + */ + if (burst == 1) { + /* + * For the A-sync case, bcnt and ccnt are the remainder + * and quotient respectively of the division of: + * (dma_length / acnt) by (SZ_64K -1). This is so + * that in case bcnt over flows, we have ccnt to use. + * Note: In A-sync tranfer only, bcntrld is used, but it + * only applies for sg_dma_len(sg) >= SZ_64K. + * In this case, the best way adopted is- bccnt for the + * first frame will be the remainder below. Then for + * every successive frame, bcnt will be SZ_64K-1. This + * is assured as bcntrld = 0xffff in end of function. + */ + absync = false; + ccnt = dma_length / acnt / (SZ_64K - 1); + bcnt = dma_length / acnt - ccnt * (SZ_64K - 1); + /* + * If bcnt is non-zero, we have a remainder and hence an + * extra frame to transfer, so increment ccnt. + */ + if (bcnt) + ccnt++; + else + bcnt = SZ_64K - 1; + cidx = acnt; + } else { + /* + * If maxburst is greater than the fifo address_width, + * use AB-synced transfers where A count is the fifo + * address_width and B count is the maxburst. In this + * case, we are limited to transfers of C count frames + * of (address_width * maxburst) where C count is limited + * to SZ_64K-1. This places an upper bound on the length + * of an SG segment that can be handled. + */ + absync = true; + bcnt = burst; + ccnt = dma_length / (acnt * bcnt); + if (ccnt > (SZ_64K - 1)) { + dev_err(dev, "Exceeded max SG segment size\n"); + return -EINVAL; + } + cidx = acnt * bcnt; + } + + if (direction == DMA_MEM_TO_DEV) { + src_bidx = acnt; + src_cidx = cidx; + dst_bidx = 0; + dst_cidx = 0; + } else if (direction == DMA_DEV_TO_MEM) { + src_bidx = 0; + src_cidx = 0; + dst_bidx = acnt; + dst_cidx = cidx; + } else { + dev_err(dev, "%s: direction not implemented yet\n", __func__); + return -EINVAL; + } + + pset->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); + /* Configure A or AB synchronized transfers */ + if (absync) + pset->opt |= SYNCDIM; + + pset->src = src_addr; + pset->dst = dst_addr; + + pset->src_dst_bidx = (dst_bidx << 16) | src_bidx; + pset->src_dst_cidx = (dst_cidx << 16) | src_cidx; + + pset->a_b_cnt = bcnt << 16 | acnt; + pset->ccnt = ccnt; + /* + * Only time when (bcntrld) auto reload is required is for + * A-sync case, and in this case, a requirement of reload value + * of SZ_64K-1 only is assured. 'link' is initially set to NULL + * and then later will be populated by edma_execute. + */ + pset->link_bcntrld = 0xffffffff; + return absync; +} + static struct dma_async_tx_descriptor *edma_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, @@ -258,23 +381,21 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( struct edma_chan *echan = to_edma_chan(chan); struct device *dev = chan->device->dev; struct edma_desc *edesc; - dma_addr_t dev_addr; + dma_addr_t src_addr = 0, dst_addr = 0; enum dma_slave_buswidth dev_width; u32 burst; struct scatterlist *sg; - int acnt, bcnt, ccnt, src, dst, cidx; - int src_bidx, dst_bidx, src_cidx, dst_cidx; - int i, nslots; + int i, nslots, ret; if (unlikely(!echan || !sgl || !sg_len)) return NULL; if (direction == DMA_DEV_TO_MEM) { - dev_addr = echan->cfg.src_addr; + src_addr = echan->cfg.src_addr; dev_width = echan->cfg.src_addr_width; burst = echan->cfg.src_maxburst; } else if (direction == DMA_MEM_TO_DEV) { - dev_addr = echan->cfg.dst_addr; + dst_addr = echan->cfg.dst_addr; dev_width = echan->cfg.dst_addr_width; burst = echan->cfg.dst_maxburst; } else { @@ -307,7 +428,6 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( if (echan->slot[i] < 0) { kfree(edesc); dev_err(dev, "Failed to allocate slot\n"); - kfree(edesc); return NULL; } } @@ -315,64 +435,21 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( /* Configure PaRAM sets for each SG */ for_each_sg(sgl, sg, sg_len, i) { - - acnt = dev_width; - - /* - * If the maxburst is equal to the fifo width, use - * A-synced transfers. This allows for large contiguous - * buffer transfers using only one PaRAM set. - */ - if (burst == 1) { - edesc->absync = false; - ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1); - bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1); - if (bcnt) - ccnt++; - else - bcnt = SZ_64K - 1; - cidx = acnt; - /* - * If maxburst is greater than the fifo address_width, - * use AB-synced transfers where A count is the fifo - * address_width and B count is the maxburst. In this - * case, we are limited to transfers of C count frames - * of (address_width * maxburst) where C count is limited - * to SZ_64K-1. This places an upper bound on the length - * of an SG segment that can be handled. - */ - } else { - edesc->absync = true; - bcnt = burst; - ccnt = sg_dma_len(sg) / (acnt * bcnt); - if (ccnt > (SZ_64K - 1)) { - dev_err(dev, "Exceeded max SG segment size\n"); - kfree(edesc); - return NULL; - } - cidx = acnt * bcnt; + /* Get address for each SG */ + if (direction == DMA_DEV_TO_MEM) + dst_addr = sg_dma_address(sg); + else + src_addr = sg_dma_address(sg); + + ret = edma_config_pset(chan, &edesc->pset[i], src_addr, + dst_addr, burst, dev_width, + sg_dma_len(sg), direction); + if (ret < 0) { + kfree(edesc); + return NULL; } - if (direction == DMA_MEM_TO_DEV) { - src = sg_dma_address(sg); - dst = dev_addr; - src_bidx = acnt; - src_cidx = cidx; - dst_bidx = 0; - dst_cidx = 0; - } else { - src = dev_addr; - dst = sg_dma_address(sg); - src_bidx = 0; - src_cidx = 0; - dst_bidx = acnt; - dst_cidx = cidx; - } - - edesc->pset[i].opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); - /* Configure A or AB synchronized transfers */ - if (edesc->absync) - edesc->pset[i].opt |= SYNCDIM; + edesc->absync = ret; /* If this is the last in a current SG set of transactions, enable interrupts so that next set is processed */ @@ -382,17 +459,138 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( /* If this is the last set, enable completion interrupt flag */ if (i == sg_len - 1) edesc->pset[i].opt |= TCINTEN; + } - edesc->pset[i].src = src; - edesc->pset[i].dst = dst; + return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); +} - edesc->pset[i].src_dst_bidx = (dst_bidx << 16) | src_bidx; - edesc->pset[i].src_dst_cidx = (dst_cidx << 16) | src_cidx; +static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( + struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction direction, + unsigned long tx_flags, void *context) +{ + struct edma_chan *echan = to_edma_chan(chan); + struct device *dev = chan->device->dev; + struct edma_desc *edesc; + dma_addr_t src_addr, dst_addr; + enum dma_slave_buswidth dev_width; + u32 burst; + int i, ret, nslots; + + if (unlikely(!echan || !buf_len || !period_len)) + return NULL; + + if (direction == DMA_DEV_TO_MEM) { + src_addr = echan->cfg.src_addr; + dst_addr = buf_addr; + dev_width = echan->cfg.src_addr_width; + burst = echan->cfg.src_maxburst; + } else if (direction == DMA_MEM_TO_DEV) { + src_addr = buf_addr; + dst_addr = echan->cfg.dst_addr; + dev_width = echan->cfg.dst_addr_width; + burst = echan->cfg.dst_maxburst; + } else { + dev_err(dev, "%s: bad direction?\n", __func__); + return NULL; + } + + if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { + dev_err(dev, "Undefined slave buswidth\n"); + return NULL; + } + + if (unlikely(buf_len % period_len)) { + dev_err(dev, "Period should be multiple of Buffer length\n"); + return NULL; + } + + nslots = (buf_len / period_len) + 1; + + /* + * Cyclic DMA users such as audio cannot tolerate delays introduced + * by cases where the number of periods is more than the maximum + * number of SGs the EDMA driver can handle at a time. For DMA types + * such as Slave SGs, such delays are tolerable and synchronized, + * but the synchronization is difficult to achieve with Cyclic and + * cannot be guaranteed, so we error out early. + */ + if (nslots > MAX_NR_SG) + return NULL; + + edesc = kzalloc(sizeof(*edesc) + nslots * + sizeof(edesc->pset[0]), GFP_ATOMIC); + if (!edesc) { + dev_dbg(dev, "Failed to allocate a descriptor\n"); + return NULL; + } + + edesc->cyclic = 1; + edesc->pset_nr = nslots; + + dev_dbg(dev, "%s: nslots=%d\n", __func__, nslots); + dev_dbg(dev, "%s: period_len=%d\n", __func__, period_len); + dev_dbg(dev, "%s: buf_len=%d\n", __func__, buf_len); + + for (i = 0; i < nslots; i++) { + /* Allocate a PaRAM slot, if needed */ + if (echan->slot[i] < 0) { + echan->slot[i] = + edma_alloc_slot(EDMA_CTLR(echan->ch_num), + EDMA_SLOT_ANY); + if (echan->slot[i] < 0) { + dev_err(dev, "Failed to allocate slot\n"); + return NULL; + } + } + + if (i == nslots - 1) { + memcpy(&edesc->pset[i], &edesc->pset[0], + sizeof(edesc->pset[0])); + break; + } + + ret = edma_config_pset(chan, &edesc->pset[i], src_addr, + dst_addr, burst, dev_width, period_len, + direction); + if (ret < 0) + return NULL; - edesc->pset[i].a_b_cnt = bcnt << 16 | acnt; - edesc->pset[i].ccnt = ccnt; - edesc->pset[i].link_bcntrld = 0xffffffff; + if (direction == DMA_DEV_TO_MEM) + dst_addr += period_len; + else + src_addr += period_len; + dev_dbg(dev, "%s: Configure period %d of buf:\n", __func__, i); + dev_dbg(dev, + "\n pset[%d]:\n" + " chnum\t%d\n" + " slot\t%d\n" + " opt\t%08x\n" + " src\t%08x\n" + " dst\t%08x\n" + " abcnt\t%08x\n" + " ccnt\t%08x\n" + " bidx\t%08x\n" + " cidx\t%08x\n" + " lkrld\t%08x\n", + i, echan->ch_num, echan->slot[i], + edesc->pset[i].opt, + edesc->pset[i].src, + edesc->pset[i].dst, + edesc->pset[i].a_b_cnt, + edesc->pset[i].ccnt, + edesc->pset[i].src_dst_bidx, + edesc->pset[i].src_dst_cidx, + edesc->pset[i].link_bcntrld); + + edesc->absync = ret; + + /* + * Enable interrupts for every period because callback + * has to be called for every period. + */ + edesc->pset[i].opt |= TCINTEN; } return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); @@ -406,30 +604,34 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data) unsigned long flags; struct edmacc_param p; - /* Pause the channel */ - edma_pause(echan->ch_num); + edesc = echan->edesc; + + /* Pause the channel for non-cyclic */ + if (!edesc || (edesc && !edesc->cyclic)) + edma_pause(echan->ch_num); switch (ch_status) { - case DMA_COMPLETE: + case EDMA_DMA_COMPLETE: spin_lock_irqsave(&echan->vchan.lock, flags); - edesc = echan->edesc; if (edesc) { - if (edesc->processed == edesc->pset_nr) { + if (edesc->cyclic) { + vchan_cyclic_callback(&edesc->vdesc); + } else if (edesc->processed == edesc->pset_nr) { dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num); edma_stop(echan->ch_num); vchan_cookie_complete(&edesc->vdesc); + edma_execute(echan); } else { dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num); + edma_execute(echan); } - - edma_execute(echan); } spin_unlock_irqrestore(&echan->vchan.lock, flags); break; - case DMA_CC_ERROR: + case EDMA_DMA_CC_ERROR: spin_lock_irqsave(&echan->vchan.lock, flags); edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p); @@ -579,7 +781,7 @@ static enum dma_status edma_tx_status(struct dma_chan *chan, unsigned long flags; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS || !txstate) + if (ret == DMA_COMPLETE || !txstate) return ret; spin_lock_irqsave(&echan->vchan.lock, flags); @@ -619,6 +821,7 @@ static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, struct device *dev) { dma->device_prep_slave_sg = edma_prep_slave_sg; + dma->device_prep_dma_cyclic = edma_prep_dma_cyclic; dma->device_alloc_chan_resources = edma_alloc_chan_resources; dma->device_free_chan_resources = edma_free_chan_resources; dma->device_issue_pending = edma_issue_pending; diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index 591cd8c63abb..cb4bf682a708 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c @@ -733,28 +733,6 @@ static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac) spin_unlock_irqrestore(&edmac->lock, flags); } -static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc *desc) -{ - struct device *dev = desc->txd.chan->device->dev; - - if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE) - dma_unmap_single(dev, desc->src_addr, desc->size, - DMA_TO_DEVICE); - else - dma_unmap_page(dev, desc->src_addr, desc->size, - DMA_TO_DEVICE); - } - if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE) - dma_unmap_single(dev, desc->dst_addr, desc->size, - DMA_FROM_DEVICE); - else - dma_unmap_page(dev, desc->dst_addr, desc->size, - DMA_FROM_DEVICE); - } -} - static void ep93xx_dma_tasklet(unsigned long data) { struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data; @@ -787,13 +765,7 @@ static void ep93xx_dma_tasklet(unsigned long data) /* Now we can release all the chained descriptors */ list_for_each_entry_safe(desc, d, &list, node) { - /* - * For the memcpy channels the API requires us to unmap the - * buffers unless requested otherwise. - */ - if (!edmac->chan.private) - ep93xx_dma_unmap_buffers(desc); - + dma_descriptor_unmap(&desc->txd); ep93xx_dma_desc_put(edmac, desc); } diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 61517dd0d0b7..7086a16a55f2 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -870,22 +870,7 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, /* Run any dependencies */ dma_run_dependencies(txd); - /* Unmap the dst buffer, if requested */ - if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) - dma_unmap_single(dev, dst, len, DMA_FROM_DEVICE); - else - dma_unmap_page(dev, dst, len, DMA_FROM_DEVICE); - } - - /* Unmap the src buffer, if requested */ - if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) - dma_unmap_single(dev, src, len, DMA_TO_DEVICE); - else - dma_unmap_page(dev, src, len, DMA_TO_DEVICE); - } - + dma_descriptor_unmap(txd); #ifdef FSL_DMA_LD_DEBUG chan_dbg(chan, "LD %p free\n", desc); #endif @@ -1255,7 +1240,9 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev, WARN_ON(fdev->feature != chan->feature); chan->dev = fdev->dev; - chan->id = ((res.start - 0x100) & 0xfff) >> 7; + chan->id = (res.start & 0xfff) < 0x300 ? + ((res.start - 0x100) & 0xfff) >> 7 : + ((res.start - 0x200) & 0xfff) >> 7; if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { dev_err(fdev->dev, "too many channels for device\n"); err = -EINVAL; @@ -1428,6 +1415,7 @@ static int fsldma_of_remove(struct platform_device *op) } static const struct of_device_id fsldma_of_ids[] = { + { .compatible = "fsl,elo3-dma", }, { .compatible = "fsl,eloplus-dma", }, { .compatible = "fsl,elo-dma", }, {} @@ -1449,7 +1437,7 @@ static struct platform_driver fsldma_of_driver = { static __init int fsldma_init(void) { - pr_info("Freescale Elo / Elo Plus DMA driver\n"); + pr_info("Freescale Elo series DMA driver\n"); return platform_driver_register(&fsldma_of_driver); } @@ -1461,5 +1449,5 @@ static void __exit fsldma_exit(void) subsys_initcall(fsldma_init); module_exit(fsldma_exit); -MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver"); +MODULE_DESCRIPTION("Freescale Elo series DMA driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index f5c38791fc74..1ffc24484d23 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h @@ -112,7 +112,7 @@ struct fsldma_chan_regs { }; struct fsldma_chan; -#define FSL_DMA_MAX_CHANS_PER_DEVICE 4 +#define FSL_DMA_MAX_CHANS_PER_DEVICE 8 struct fsldma_device { void __iomem *regs; /* DGSR register base */ diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 55852c026791..6f9ac2022abd 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -572,9 +572,11 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel)); - dev_dbg(imxdma->dev, "%s channel: %d dest=0x%08x src=0x%08x " - "dma_length=%d\n", __func__, imxdmac->channel, - d->dest, d->src, d->len); + dev_dbg(imxdma->dev, + "%s channel: %d dest=0x%08llx src=0x%08llx dma_length=%zu\n", + __func__, imxdmac->channel, + (unsigned long long)d->dest, + (unsigned long long)d->src, d->len); break; /* Cyclic transfer is the same as slave_sg with special sg configuration. */ @@ -586,20 +588,22 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) imx_dmav1_writel(imxdma, imxdmac->ccr_from_device, DMA_CCR(imxdmac->channel)); - dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d " - "total length=%d dev_addr=0x%08x (dev2mem)\n", - __func__, imxdmac->channel, d->sg, d->sgcount, - d->len, imxdmac->per_address); + dev_dbg(imxdma->dev, + "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (dev2mem)\n", + __func__, imxdmac->channel, + d->sg, d->sgcount, d->len, + (unsigned long long)imxdmac->per_address); } else if (d->direction == DMA_MEM_TO_DEV) { imx_dmav1_writel(imxdma, imxdmac->per_address, DMA_DAR(imxdmac->channel)); imx_dmav1_writel(imxdma, imxdmac->ccr_to_device, DMA_CCR(imxdmac->channel)); - dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d " - "total length=%d dev_addr=0x%08x (mem2dev)\n", - __func__, imxdmac->channel, d->sg, d->sgcount, - d->len, imxdmac->per_address); + dev_dbg(imxdma->dev, + "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (mem2dev)\n", + __func__, imxdmac->channel, + d->sg, d->sgcount, d->len, + (unsigned long long)imxdmac->per_address); } else { dev_err(imxdma->dev, "%s channel: %d bad dma mode\n", __func__, imxdmac->channel); @@ -771,7 +775,7 @@ static int imxdma_alloc_chan_resources(struct dma_chan *chan) desc->desc.tx_submit = imxdma_tx_submit; /* txd.flags will be overwritten in prep funcs */ desc->desc.flags = DMA_CTRL_ACK; - desc->status = DMA_SUCCESS; + desc->status = DMA_COMPLETE; list_add_tail(&desc->node, &imxdmac->ld_free); imxdmac->descs_allocated++; @@ -870,7 +874,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( int i; unsigned int periods = buf_len / period_len; - dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n", + dev_dbg(imxdma->dev, "%s channel: %d buf_len=%zu period_len=%zu\n", __func__, imxdmac->channel, buf_len, period_len); if (list_empty(&imxdmac->ld_free) || @@ -926,8 +930,9 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy( struct imxdma_engine *imxdma = imxdmac->imxdma; struct imxdma_desc *desc; - dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n", - __func__, imxdmac->channel, src, dest, len); + dev_dbg(imxdma->dev, "%s channel: %d src=0x%llx dst=0x%llx len=%zu\n", + __func__, imxdmac->channel, (unsigned long long)src, + (unsigned long long)dest, len); if (list_empty(&imxdmac->ld_free) || imxdma_chan_is_doing_cyclic(imxdmac)) @@ -956,9 +961,10 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved( struct imxdma_engine *imxdma = imxdmac->imxdma; struct imxdma_desc *desc; - dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%x dst_start=0x%x\n" - " src_sgl=%s dst_sgl=%s numf=%d frame_size=%d\n", __func__, - imxdmac->channel, xt->src_start, xt->dst_start, + dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%llx dst_start=0x%llx\n" + " src_sgl=%s dst_sgl=%s numf=%zu frame_size=%zu\n", __func__, + imxdmac->channel, (unsigned long long)xt->src_start, + (unsigned long long) xt->dst_start, xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false", xt->numf, xt->frame_size); diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index c1fd504cae28..c75679d42028 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -638,7 +638,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac) if (error) sdmac->status = DMA_ERROR; else - sdmac->status = DMA_SUCCESS; + sdmac->status = DMA_COMPLETE; dma_cookie_complete(&sdmac->desc); if (sdmac->desc.callback) @@ -1089,8 +1089,8 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( param &= ~BD_CONT; } - dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", - i, count, sg->dma_address, + dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n", + i, count, (u64)sg->dma_address, param & BD_WRAP ? "wrap" : "", param & BD_INTR ? " intr" : ""); @@ -1163,8 +1163,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( if (i + 1 == num_periods) param |= BD_WRAP; - dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", - i, period_len, dma_addr, + dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n", + i, period_len, (u64)dma_addr, param & BD_WRAP ? "wrap" : "", param & BD_INTR ? " intr" : ""); diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c index a975ebebea8a..1aab8130efa1 100644 --- a/drivers/dma/intel_mid_dma.c +++ b/drivers/dma/intel_mid_dma.c @@ -309,7 +309,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, callback_txd(param_txd); } if (midc->raw_tfr) { - desc->status = DMA_SUCCESS; + desc->status = DMA_COMPLETE; if (desc->lli != NULL) { pci_pool_free(desc->lli_pool, desc->lli, desc->lli_phys); @@ -481,7 +481,7 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan, enum dma_status ret; ret = dma_cookie_status(chan, cookie, txstate); - if (ret != DMA_SUCCESS) { + if (ret != DMA_COMPLETE) { spin_lock_bh(&midc->lock); midc_scan_descriptors(to_middma_device(chan->device), midc); spin_unlock_bh(&midc->lock); diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 5ff6fc1819dc..1a49c777607c 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -531,21 +531,6 @@ static void ioat1_cleanup_event(unsigned long data) writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); } -void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, - size_t len, struct ioat_dma_descriptor *hw) -{ - struct pci_dev *pdev = chan->device->pdev; - size_t offset = len - hw->size; - - if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) - ioat_unmap(pdev, hw->dst_addr - offset, len, - PCI_DMA_FROMDEVICE, flags, 1); - - if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) - ioat_unmap(pdev, hw->src_addr - offset, len, - PCI_DMA_TODEVICE, flags, 0); -} - dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan) { dma_addr_t phys_complete; @@ -602,7 +587,7 @@ static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete) dump_desc_dbg(ioat, desc); if (tx->cookie) { dma_cookie_complete(tx); - ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); + dma_descriptor_unmap(tx); ioat->active -= desc->hw->tx_cnt; if (tx->callback) { tx->callback(tx->callback_param); @@ -733,7 +718,7 @@ ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, enum dma_status ret; ret = dma_cookie_status(c, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; device->cleanup_fn((unsigned long) c); @@ -833,8 +818,7 @@ int ioat_dma_self_test(struct ioatdma_device *device) dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); - flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP | - DMA_PREP_INTERRUPT; + flags = DMA_PREP_INTERRUPT; tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, IOAT_TEST_SIZE, flags); if (!tx) { @@ -859,7 +843,7 @@ int ioat_dma_self_test(struct ioatdma_device *device) if (tmo == 0 || dma->device_tx_status(dma_chan, cookie, NULL) - != DMA_SUCCESS) { + != DMA_COMPLETE) { dev_err(dev, "Self-test copy timed out, disabling\n"); err = -ENODEV; goto unmap_dma; @@ -885,8 +869,7 @@ static char ioat_interrupt_style[32] = "msix"; module_param_string(ioat_interrupt_style, ioat_interrupt_style, sizeof(ioat_interrupt_style), 0644); MODULE_PARM_DESC(ioat_interrupt_style, - "set ioat interrupt style: msix (default), " - "msix-single-vector, msi, intx)"); + "set ioat interrupt style: msix (default), msi, intx"); /** * ioat_dma_setup_interrupts - setup interrupt handler @@ -904,8 +887,6 @@ int ioat_dma_setup_interrupts(struct ioatdma_device *device) if (!strcmp(ioat_interrupt_style, "msix")) goto msix; - if (!strcmp(ioat_interrupt_style, "msix-single-vector")) - goto msix_single_vector; if (!strcmp(ioat_interrupt_style, "msi")) goto msi; if (!strcmp(ioat_interrupt_style, "intx")) @@ -920,10 +901,8 @@ msix: device->msix_entries[i].entry = i; err = pci_enable_msix(pdev, device->msix_entries, msixcnt); - if (err < 0) + if (err) goto msi; - if (err > 0) - goto msix_single_vector; for (i = 0; i < msixcnt; i++) { msix = &device->msix_entries[i]; @@ -937,29 +916,13 @@ msix: chan = ioat_chan_by_index(device, j); devm_free_irq(dev, msix->vector, chan); } - goto msix_single_vector; + goto msi; } } intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; device->irq_mode = IOAT_MSIX; goto done; -msix_single_vector: - msix = &device->msix_entries[0]; - msix->entry = 0; - err = pci_enable_msix(pdev, device->msix_entries, 1); - if (err) - goto msi; - - err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0, - "ioat-msix", device); - if (err) { - pci_disable_msix(pdev); - goto msi; - } - device->irq_mode = IOAT_MSIX_SINGLE; - goto done; - msi: err = pci_enable_msi(pdev); if (err) @@ -971,7 +934,7 @@ msi: pci_disable_msi(pdev); goto intx; } - device->irq_mode = IOAT_MSIX; + device->irq_mode = IOAT_MSI; goto done; intx: diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 54fb7b9ff9aa..11fb877ddca9 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -52,7 +52,6 @@ enum ioat_irq_mode { IOAT_NOIRQ = 0, IOAT_MSIX, - IOAT_MSIX_SINGLE, IOAT_MSI, IOAT_INTX }; @@ -83,7 +82,6 @@ struct ioatdma_device { struct pci_pool *completion_pool; #define MAX_SED_POOLS 5 struct dma_pool *sed_hw_pool[MAX_SED_POOLS]; - struct kmem_cache *sed_pool; struct dma_device common; u8 version; struct msix_entry msix_entries[4]; @@ -342,16 +340,6 @@ static inline bool is_ioat_bug(unsigned long err) return !!err; } -static inline void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len, - int direction, enum dma_ctrl_flags flags, bool dst) -{ - if ((dst && (flags & DMA_COMPL_DEST_UNMAP_SINGLE)) || - (!dst && (flags & DMA_COMPL_SRC_UNMAP_SINGLE))) - pci_unmap_single(pdev, addr, len, direction); - else - pci_unmap_page(pdev, addr, len, direction); -} - int ioat_probe(struct ioatdma_device *device); int ioat_register(struct ioatdma_device *device); int ioat1_dma_probe(struct ioatdma_device *dev, int dca); @@ -363,8 +351,6 @@ void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx); enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, struct dma_tx_state *txstate); -void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, - size_t len, struct ioat_dma_descriptor *hw); bool ioat_cleanup_preamble(struct ioat_chan_common *chan, dma_addr_t *phys_complete); void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index b925e1b1d139..5d3affe7e976 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -148,7 +148,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) tx = &desc->txd; dump_desc_dbg(ioat, desc); if (tx->cookie) { - ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); + dma_descriptor_unmap(tx); dma_cookie_complete(tx); if (tx->callback) { tx->callback(tx->callback_param); diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index 212d584fe427..470292767e68 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h @@ -157,7 +157,6 @@ static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr) int ioat2_dma_probe(struct ioatdma_device *dev, int dca); int ioat3_dma_probe(struct ioatdma_device *dev, int dca); -void ioat3_dma_remove(struct ioatdma_device *dev); struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs); diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index d8ececaf1b57..820817e97e62 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -67,6 +67,8 @@ #include "dma.h" #include "dma_v2.h" +extern struct kmem_cache *ioat3_sed_cache; + /* ioat hardware assumes at least two sources for raid operations */ #define src_cnt_to_sw(x) ((x) + 2) #define src_cnt_to_hw(x) ((x) - 2) @@ -87,22 +89,8 @@ static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 }; static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6 }; -/* - * technically sources 1 and 2 do not require SED, but the op will have - * at least 9 descriptors so that's irrelevant. - */ -static const u8 pq16_idx_to_sed[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1, 1, 1, 1, 1, 1, 1 }; - static void ioat3_eh(struct ioat2_dma_chan *ioat); -static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx) -{ - struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1]; - - return raw->field[xor_idx_to_field[idx]]; -} - static void xor_set_src(struct ioat_raw_descriptor *descs[2], dma_addr_t addr, u32 offset, int idx) { @@ -135,12 +123,6 @@ static void pq_set_src(struct ioat_raw_descriptor *descs[2], pq->coef[idx] = coef; } -static int sed_get_pq16_pool_idx(int src_cnt) -{ - - return pq16_idx_to_sed[src_cnt]; -} - static bool is_jf_ioat(struct pci_dev *pdev) { switch (pdev->device) { @@ -272,7 +254,7 @@ ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool) struct ioat_sed_ent *sed; gfp_t flags = __GFP_ZERO | GFP_ATOMIC; - sed = kmem_cache_alloc(device->sed_pool, flags); + sed = kmem_cache_alloc(ioat3_sed_cache, flags); if (!sed) return NULL; @@ -280,7 +262,7 @@ ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool) sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool], flags, &sed->dma); if (!sed->hw) { - kmem_cache_free(device->sed_pool, sed); + kmem_cache_free(ioat3_sed_cache, sed); return NULL; } @@ -293,165 +275,7 @@ static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *s return; dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma); - kmem_cache_free(device->sed_pool, sed); -} - -static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat, - struct ioat_ring_ent *desc, int idx) -{ - struct ioat_chan_common *chan = &ioat->base; - struct pci_dev *pdev = chan->device->pdev; - size_t len = desc->len; - size_t offset = len - desc->hw->size; - struct dma_async_tx_descriptor *tx = &desc->txd; - enum dma_ctrl_flags flags = tx->flags; - - switch (desc->hw->ctl_f.op) { - case IOAT_OP_COPY: - if (!desc->hw->ctl_f.null) /* skip 'interrupt' ops */ - ioat_dma_unmap(chan, flags, len, desc->hw); - break; - case IOAT_OP_XOR_VAL: - case IOAT_OP_XOR: { - struct ioat_xor_descriptor *xor = desc->xor; - struct ioat_ring_ent *ext; - struct ioat_xor_ext_descriptor *xor_ex = NULL; - int src_cnt = src_cnt_to_sw(xor->ctl_f.src_cnt); - struct ioat_raw_descriptor *descs[2]; - int i; - - if (src_cnt > 5) { - ext = ioat2_get_ring_ent(ioat, idx + 1); - xor_ex = ext->xor_ex; - } - - if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - descs[0] = (struct ioat_raw_descriptor *) xor; - descs[1] = (struct ioat_raw_descriptor *) xor_ex; - for (i = 0; i < src_cnt; i++) { - dma_addr_t src = xor_get_src(descs, i); - - ioat_unmap(pdev, src - offset, len, - PCI_DMA_TODEVICE, flags, 0); - } - - /* dest is a source in xor validate operations */ - if (xor->ctl_f.op == IOAT_OP_XOR_VAL) { - ioat_unmap(pdev, xor->dst_addr - offset, len, - PCI_DMA_TODEVICE, flags, 1); - break; - } - } - - if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) - ioat_unmap(pdev, xor->dst_addr - offset, len, - PCI_DMA_FROMDEVICE, flags, 1); - break; - } - case IOAT_OP_PQ_VAL: - case IOAT_OP_PQ: { - struct ioat_pq_descriptor *pq = desc->pq; - struct ioat_ring_ent *ext; - struct ioat_pq_ext_descriptor *pq_ex = NULL; - int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt); - struct ioat_raw_descriptor *descs[2]; - int i; - - if (src_cnt > 3) { - ext = ioat2_get_ring_ent(ioat, idx + 1); - pq_ex = ext->pq_ex; - } - - /* in the 'continue' case don't unmap the dests as sources */ - if (dmaf_p_disabled_continue(flags)) - src_cnt--; - else if (dmaf_continue(flags)) - src_cnt -= 3; - - if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - descs[0] = (struct ioat_raw_descriptor *) pq; - descs[1] = (struct ioat_raw_descriptor *) pq_ex; - for (i = 0; i < src_cnt; i++) { - dma_addr_t src = pq_get_src(descs, i); - - ioat_unmap(pdev, src - offset, len, - PCI_DMA_TODEVICE, flags, 0); - } - - /* the dests are sources in pq validate operations */ - if (pq->ctl_f.op == IOAT_OP_XOR_VAL) { - if (!(flags & DMA_PREP_PQ_DISABLE_P)) - ioat_unmap(pdev, pq->p_addr - offset, - len, PCI_DMA_TODEVICE, flags, 0); - if (!(flags & DMA_PREP_PQ_DISABLE_Q)) - ioat_unmap(pdev, pq->q_addr - offset, - len, PCI_DMA_TODEVICE, flags, 0); - break; - } - } - - if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (!(flags & DMA_PREP_PQ_DISABLE_P)) - ioat_unmap(pdev, pq->p_addr - offset, len, - PCI_DMA_BIDIRECTIONAL, flags, 1); - if (!(flags & DMA_PREP_PQ_DISABLE_Q)) - ioat_unmap(pdev, pq->q_addr - offset, len, - PCI_DMA_BIDIRECTIONAL, flags, 1); - } - break; - } - case IOAT_OP_PQ_16S: - case IOAT_OP_PQ_VAL_16S: { - struct ioat_pq_descriptor *pq = desc->pq; - int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt); - struct ioat_raw_descriptor *descs[4]; - int i; - - /* in the 'continue' case don't unmap the dests as sources */ - if (dmaf_p_disabled_continue(flags)) - src_cnt--; - else if (dmaf_continue(flags)) - src_cnt -= 3; - - if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - descs[0] = (struct ioat_raw_descriptor *)pq; - descs[1] = (struct ioat_raw_descriptor *)(desc->sed->hw); - descs[2] = (struct ioat_raw_descriptor *)(&desc->sed->hw->b[0]); - for (i = 0; i < src_cnt; i++) { - dma_addr_t src = pq16_get_src(descs, i); - - ioat_unmap(pdev, src - offset, len, - PCI_DMA_TODEVICE, flags, 0); - } - - /* the dests are sources in pq validate operations */ - if (pq->ctl_f.op == IOAT_OP_XOR_VAL) { - if (!(flags & DMA_PREP_PQ_DISABLE_P)) - ioat_unmap(pdev, pq->p_addr - offset, - len, PCI_DMA_TODEVICE, - flags, 0); - if (!(flags & DMA_PREP_PQ_DISABLE_Q)) - ioat_unmap(pdev, pq->q_addr - offset, - len, PCI_DMA_TODEVICE, - flags, 0); - break; - } - } - - if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (!(flags & DMA_PREP_PQ_DISABLE_P)) - ioat_unmap(pdev, pq->p_addr - offset, len, - PCI_DMA_BIDIRECTIONAL, flags, 1); - if (!(flags & DMA_PREP_PQ_DISABLE_Q)) - ioat_unmap(pdev, pq->q_addr - offset, len, - PCI_DMA_BIDIRECTIONAL, flags, 1); - } - break; - } - default: - dev_err(&pdev->dev, "%s: unknown op type: %#x\n", - __func__, desc->hw->ctl_f.op); - } + kmem_cache_free(ioat3_sed_cache, sed); } static bool desc_has_ext(struct ioat_ring_ent *desc) @@ -577,7 +401,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) tx = &desc->txd; if (tx->cookie) { dma_cookie_complete(tx); - ioat3_dma_unmap(ioat, desc, idx + i); + dma_descriptor_unmap(tx); if (tx->callback) { tx->callback(tx->callback_param); tx->callback = NULL; @@ -807,7 +631,7 @@ ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie, enum dma_status ret; ret = dma_cookie_status(c, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; ioat3_cleanup(ioat); @@ -1129,9 +953,6 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, u8 op; int i, s, idx, num_descs; - /* this function only handles src_cnt 9 - 16 */ - BUG_ON(src_cnt < 9); - /* this function is only called with 9-16 sources */ op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S; @@ -1159,8 +980,7 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, descs[0] = (struct ioat_raw_descriptor *) pq; - desc->sed = ioat3_alloc_sed(device, - sed_get_pq16_pool_idx(src_cnt)); + desc->sed = ioat3_alloc_sed(device, (src_cnt-2) >> 3); if (!desc->sed) { dev_err(to_dev(chan), "%s: no free sed entries\n", __func__); @@ -1218,13 +1038,21 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, return &desc->txd; } +static int src_cnt_flags(unsigned int src_cnt, unsigned long flags) +{ + if (dmaf_p_disabled_continue(flags)) + return src_cnt + 1; + else if (dmaf_continue(flags)) + return src_cnt + 3; + else + return src_cnt; +} + static struct dma_async_tx_descriptor * ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, unsigned int src_cnt, const unsigned char *scf, size_t len, unsigned long flags) { - struct dma_device *dma = chan->device; - /* specify valid address for disabled result */ if (flags & DMA_PREP_PQ_DISABLE_P) dst[0] = dst[1]; @@ -1244,7 +1072,7 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, single_source_coef[0] = scf[0]; single_source_coef[1] = 0; - return (src_cnt > 8) && (dma->max_pq > 8) ? + return src_cnt_flags(src_cnt, flags) > 8 ? __ioat3_prep_pq16_lock(chan, NULL, dst, single_source, 2, single_source_coef, len, flags) : @@ -1252,7 +1080,7 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, single_source_coef, len, flags); } else { - return (src_cnt > 8) && (dma->max_pq > 8) ? + return src_cnt_flags(src_cnt, flags) > 8 ? __ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt, scf, len, flags) : __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, @@ -1265,8 +1093,6 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, unsigned int src_cnt, const unsigned char *scf, size_t len, enum sum_check_flags *pqres, unsigned long flags) { - struct dma_device *dma = chan->device; - /* specify valid address for disabled result */ if (flags & DMA_PREP_PQ_DISABLE_P) pq[0] = pq[1]; @@ -1278,7 +1104,7 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, */ *pqres = 0; - return (src_cnt > 8) && (dma->max_pq > 8) ? + return src_cnt_flags(src_cnt, flags) > 8 ? __ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len, flags) : __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len, @@ -1289,7 +1115,6 @@ static struct dma_async_tx_descriptor * ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, unsigned int src_cnt, size_t len, unsigned long flags) { - struct dma_device *dma = chan->device; unsigned char scf[src_cnt]; dma_addr_t pq[2]; @@ -1298,7 +1123,7 @@ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, flags |= DMA_PREP_PQ_DISABLE_Q; pq[1] = dst; /* specify valid address for disabled result */ - return (src_cnt > 8) && (dma->max_pq > 8) ? + return src_cnt_flags(src_cnt, flags) > 8 ? __ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len, flags) : __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, @@ -1310,7 +1135,6 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, size_t len, enum sum_check_flags *result, unsigned long flags) { - struct dma_device *dma = chan->device; unsigned char scf[src_cnt]; dma_addr_t pq[2]; @@ -1324,8 +1148,7 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, flags |= DMA_PREP_PQ_DISABLE_Q; pq[1] = pq[0]; /* specify valid address for disabled result */ - - return (src_cnt > 8) && (dma->max_pq > 8) ? + return src_cnt_flags(src_cnt, flags) > 8 ? __ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1, scf, len, flags) : __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, @@ -1444,9 +1267,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) DMA_TO_DEVICE); tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, IOAT_NUM_SRC_TEST, PAGE_SIZE, - DMA_PREP_INTERRUPT | - DMA_COMPL_SKIP_SRC_UNMAP | - DMA_COMPL_SKIP_DEST_UNMAP); + DMA_PREP_INTERRUPT); if (!tx) { dev_err(dev, "Self-test xor prep failed\n"); @@ -1468,7 +1289,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); - if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { + if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { dev_err(dev, "Self-test xor timed out\n"); err = -ENODEV; goto dma_unmap; @@ -1507,9 +1328,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) DMA_TO_DEVICE); tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, - &xor_val_result, DMA_PREP_INTERRUPT | - DMA_COMPL_SKIP_SRC_UNMAP | - DMA_COMPL_SKIP_DEST_UNMAP); + &xor_val_result, DMA_PREP_INTERRUPT); if (!tx) { dev_err(dev, "Self-test zero prep failed\n"); err = -ENODEV; @@ -1530,7 +1349,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); - if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { + if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { dev_err(dev, "Self-test validate timed out\n"); err = -ENODEV; goto dma_unmap; @@ -1545,6 +1364,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) goto free_resources; } + memset(page_address(dest), 0, PAGE_SIZE); + /* test for non-zero parity sum */ op = IOAT_OP_XOR_VAL; @@ -1554,9 +1375,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) DMA_TO_DEVICE); tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, - &xor_val_result, DMA_PREP_INTERRUPT | - DMA_COMPL_SKIP_SRC_UNMAP | - DMA_COMPL_SKIP_DEST_UNMAP); + &xor_val_result, DMA_PREP_INTERRUPT); if (!tx) { dev_err(dev, "Self-test 2nd zero prep failed\n"); err = -ENODEV; @@ -1577,7 +1396,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); - if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { + if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { dev_err(dev, "Self-test 2nd validate timed out\n"); err = -ENODEV; goto dma_unmap; @@ -1630,52 +1449,36 @@ static int ioat3_dma_self_test(struct ioatdma_device *device) static int ioat3_irq_reinit(struct ioatdma_device *device) { - int msixcnt = device->common.chancnt; struct pci_dev *pdev = device->pdev; - int i; - struct msix_entry *msix; - struct ioat_chan_common *chan; - int err = 0; + int irq = pdev->irq, i; + + if (!is_bwd_ioat(pdev)) + return 0; switch (device->irq_mode) { case IOAT_MSIX: + for (i = 0; i < device->common.chancnt; i++) { + struct msix_entry *msix = &device->msix_entries[i]; + struct ioat_chan_common *chan; - for (i = 0; i < msixcnt; i++) { - msix = &device->msix_entries[i]; chan = ioat_chan_by_index(device, i); devm_free_irq(&pdev->dev, msix->vector, chan); } pci_disable_msix(pdev); break; - - case IOAT_MSIX_SINGLE: - msix = &device->msix_entries[0]; - chan = ioat_chan_by_index(device, 0); - devm_free_irq(&pdev->dev, msix->vector, chan); - pci_disable_msix(pdev); - break; - case IOAT_MSI: - chan = ioat_chan_by_index(device, 0); - devm_free_irq(&pdev->dev, pdev->irq, chan); pci_disable_msi(pdev); - break; - + /* fall through */ case IOAT_INTX: - chan = ioat_chan_by_index(device, 0); - devm_free_irq(&pdev->dev, pdev->irq, chan); + devm_free_irq(&pdev->dev, irq, device); break; - default: return 0; } - device->irq_mode = IOAT_NOIRQ; - err = ioat_dma_setup_interrupts(device); - - return err; + return ioat_dma_setup_interrupts(device); } static int ioat3_reset_hw(struct ioat_chan_common *chan) @@ -1718,14 +1521,12 @@ static int ioat3_reset_hw(struct ioat_chan_common *chan) } err = ioat2_reset_sync(chan, msecs_to_jiffies(200)); - if (err) { - dev_err(&pdev->dev, "Failed to reset!\n"); - return err; - } - - if (device->irq_mode != IOAT_NOIRQ && is_bwd_ioat(pdev)) + if (!err) err = ioat3_irq_reinit(device); + if (err) + dev_err(&pdev->dev, "Failed to reset: %d\n", err); + return err; } @@ -1835,21 +1636,15 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) char pool_name[14]; int i; - /* allocate sw descriptor pool for SED */ - device->sed_pool = kmem_cache_create("ioat_sed", - sizeof(struct ioat_sed_ent), 0, 0, NULL); - if (!device->sed_pool) - return -ENOMEM; - for (i = 0; i < MAX_SED_POOLS; i++) { snprintf(pool_name, 14, "ioat_hw%d_sed", i); /* allocate SED DMA pool */ - device->sed_hw_pool[i] = dma_pool_create(pool_name, + device->sed_hw_pool[i] = dmam_pool_create(pool_name, &pdev->dev, SED_SIZE * (i + 1), 64, 0); if (!device->sed_hw_pool[i]) - goto sed_pool_cleanup; + return -ENOMEM; } } @@ -1875,28 +1670,4 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) device->dca = ioat3_dca_init(pdev, device->reg_base); return 0; - -sed_pool_cleanup: - if (device->sed_pool) { - int i; - kmem_cache_destroy(device->sed_pool); - - for (i = 0; i < MAX_SED_POOLS; i++) - if (device->sed_hw_pool[i]) - dma_pool_destroy(device->sed_hw_pool[i]); - } - - return -ENOMEM; -} - -void ioat3_dma_remove(struct ioatdma_device *device) -{ - if (device->sed_pool) { - int i; - kmem_cache_destroy(device->sed_pool); - - for (i = 0; i < MAX_SED_POOLS; i++) - if (device->sed_hw_pool[i]) - dma_pool_destroy(device->sed_hw_pool[i]); - } } diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index 2c8d560e6334..1d051cd045db 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c @@ -123,6 +123,7 @@ module_param(ioat_dca_enabled, int, 0644); MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); struct kmem_cache *ioat2_cache; +struct kmem_cache *ioat3_sed_cache; #define DRV_NAME "ioatdma" @@ -207,9 +208,6 @@ static void ioat_remove(struct pci_dev *pdev) if (!device) return; - if (device->version >= IOAT_VER_3_0) - ioat3_dma_remove(device); - dev_err(&pdev->dev, "Removing dma and dca services\n"); if (device->dca) { unregister_dca_provider(device->dca, &pdev->dev); @@ -221,7 +219,7 @@ static void ioat_remove(struct pci_dev *pdev) static int __init ioat_init_module(void) { - int err; + int err = -ENOMEM; pr_info("%s: Intel(R) QuickData Technology Driver %s\n", DRV_NAME, IOAT_DMA_VERSION); @@ -231,9 +229,21 @@ static int __init ioat_init_module(void) if (!ioat2_cache) return -ENOMEM; + ioat3_sed_cache = KMEM_CACHE(ioat_sed_ent, 0); + if (!ioat3_sed_cache) + goto err_ioat2_cache; + err = pci_register_driver(&ioat_pci_driver); if (err) - kmem_cache_destroy(ioat2_cache); + goto err_ioat3_cache; + + return 0; + + err_ioat3_cache: + kmem_cache_destroy(ioat3_sed_cache); + + err_ioat2_cache: + kmem_cache_destroy(ioat2_cache); return err; } diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index dd8b44a56e5d..c56137bc3868 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -61,80 +61,6 @@ static void iop_adma_free_slots(struct iop_adma_desc_slot *slot) } } -static void -iop_desc_unmap(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc) -{ - struct dma_async_tx_descriptor *tx = &desc->async_tx; - struct iop_adma_desc_slot *unmap = desc->group_head; - struct device *dev = &iop_chan->device->pdev->dev; - u32 len = unmap->unmap_len; - enum dma_ctrl_flags flags = tx->flags; - u32 src_cnt; - dma_addr_t addr; - dma_addr_t dest; - - src_cnt = unmap->unmap_src_cnt; - dest = iop_desc_get_dest_addr(unmap, iop_chan); - if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - enum dma_data_direction dir; - - if (src_cnt > 1) /* is xor? */ - dir = DMA_BIDIRECTIONAL; - else - dir = DMA_FROM_DEVICE; - - dma_unmap_page(dev, dest, len, dir); - } - - if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - while (src_cnt--) { - addr = iop_desc_get_src_addr(unmap, iop_chan, src_cnt); - if (addr == dest) - continue; - dma_unmap_page(dev, addr, len, DMA_TO_DEVICE); - } - } - desc->group_head = NULL; -} - -static void -iop_desc_unmap_pq(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc) -{ - struct dma_async_tx_descriptor *tx = &desc->async_tx; - struct iop_adma_desc_slot *unmap = desc->group_head; - struct device *dev = &iop_chan->device->pdev->dev; - u32 len = unmap->unmap_len; - enum dma_ctrl_flags flags = tx->flags; - u32 src_cnt = unmap->unmap_src_cnt; - dma_addr_t pdest = iop_desc_get_dest_addr(unmap, iop_chan); - dma_addr_t qdest = iop_desc_get_qdest_addr(unmap, iop_chan); - int i; - - if (tx->flags & DMA_PREP_CONTINUE) - src_cnt -= 3; - - if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP) && !desc->pq_check_result) { - dma_unmap_page(dev, pdest, len, DMA_BIDIRECTIONAL); - dma_unmap_page(dev, qdest, len, DMA_BIDIRECTIONAL); - } - - if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - dma_addr_t addr; - - for (i = 0; i < src_cnt; i++) { - addr = iop_desc_get_src_addr(unmap, iop_chan, i); - dma_unmap_page(dev, addr, len, DMA_TO_DEVICE); - } - if (desc->pq_check_result) { - dma_unmap_page(dev, pdest, len, DMA_TO_DEVICE); - dma_unmap_page(dev, qdest, len, DMA_TO_DEVICE); - } - } - - desc->group_head = NULL; -} - - static dma_cookie_t iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, struct iop_adma_chan *iop_chan, dma_cookie_t cookie) @@ -152,15 +78,9 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, if (tx->callback) tx->callback(tx->callback_param); - /* unmap dma addresses - * (unmap_single vs unmap_page?) - */ - if (desc->group_head && desc->unmap_len) { - if (iop_desc_is_pq(desc)) - iop_desc_unmap_pq(iop_chan, desc); - else - iop_desc_unmap(iop_chan, desc); - } + dma_descriptor_unmap(tx); + if (desc->group_head) + desc->group_head = NULL; } /* run dependent operations */ @@ -591,7 +511,6 @@ iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) if (sw_desc) { grp_start = sw_desc->group_head; iop_desc_init_interrupt(grp_start, iop_chan); - grp_start->unmap_len = 0; sw_desc->async_tx.flags = flags; } spin_unlock_bh(&iop_chan->lock); @@ -623,8 +542,6 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, iop_desc_set_byte_count(grp_start, iop_chan, len); iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); iop_desc_set_memcpy_src_addr(grp_start, dma_src); - sw_desc->unmap_src_cnt = 1; - sw_desc->unmap_len = len; sw_desc->async_tx.flags = flags; } spin_unlock_bh(&iop_chan->lock); @@ -657,8 +574,6 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, iop_desc_init_xor(grp_start, src_cnt, flags); iop_desc_set_byte_count(grp_start, iop_chan, len); iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); - sw_desc->unmap_src_cnt = src_cnt; - sw_desc->unmap_len = len; sw_desc->async_tx.flags = flags; while (src_cnt--) iop_desc_set_xor_src_addr(grp_start, src_cnt, @@ -694,8 +609,6 @@ iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src, grp_start->xor_check_result = result; pr_debug("\t%s: grp_start->xor_check_result: %p\n", __func__, grp_start->xor_check_result); - sw_desc->unmap_src_cnt = src_cnt; - sw_desc->unmap_len = len; sw_desc->async_tx.flags = flags; while (src_cnt--) iop_desc_set_zero_sum_src_addr(grp_start, src_cnt, @@ -748,8 +661,6 @@ iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, dst[0] = dst[1] & 0x7; iop_desc_set_pq_addr(g, dst); - sw_desc->unmap_src_cnt = src_cnt; - sw_desc->unmap_len = len; sw_desc->async_tx.flags = flags; for (i = 0; i < src_cnt; i++) iop_desc_set_pq_src_addr(g, i, src[i], scf[i]); @@ -804,8 +715,6 @@ iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, g->pq_check_result = pqres; pr_debug("\t%s: g->pq_check_result: %p\n", __func__, g->pq_check_result); - sw_desc->unmap_src_cnt = src_cnt+2; - sw_desc->unmap_len = len; sw_desc->async_tx.flags = flags; while (src_cnt--) iop_desc_set_pq_zero_sum_src_addr(g, src_cnt, @@ -864,7 +773,7 @@ static enum dma_status iop_adma_status(struct dma_chan *chan, int ret; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; iop_adma_slot_cleanup(iop_chan); @@ -983,7 +892,7 @@ static int iop_adma_memcpy_self_test(struct iop_adma_device *device) msleep(1); if (iop_adma_status(dma_chan, cookie, NULL) != - DMA_SUCCESS) { + DMA_COMPLETE) { dev_err(dma_chan->device->dev, "Self-test copy timed out, disabling\n"); err = -ENODEV; @@ -1083,7 +992,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) msleep(8); if (iop_adma_status(dma_chan, cookie, NULL) != - DMA_SUCCESS) { + DMA_COMPLETE) { dev_err(dma_chan->device->dev, "Self-test xor timed out, disabling\n"); err = -ENODEV; @@ -1129,7 +1038,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) iop_adma_issue_pending(dma_chan); msleep(8); - if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { + if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { dev_err(dma_chan->device->dev, "Self-test zero sum timed out, disabling\n"); err = -ENODEV; @@ -1158,7 +1067,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) iop_adma_issue_pending(dma_chan); msleep(8); - if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { + if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { dev_err(dma_chan->device->dev, "Self-test non-zero sum timed out, disabling\n"); err = -ENODEV; @@ -1254,7 +1163,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) msleep(8); if (iop_adma_status(dma_chan, cookie, NULL) != - DMA_SUCCESS) { + DMA_COMPLETE) { dev_err(dev, "Self-test pq timed out, disabling\n"); err = -ENODEV; goto free_resources; @@ -1291,7 +1200,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) msleep(8); if (iop_adma_status(dma_chan, cookie, NULL) != - DMA_SUCCESS) { + DMA_COMPLETE) { dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n"); err = -ENODEV; goto free_resources; @@ -1323,7 +1232,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) msleep(8); if (iop_adma_status(dma_chan, cookie, NULL) != - DMA_SUCCESS) { + DMA_COMPLETE) { dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n"); err = -ENODEV; goto free_resources; diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index cb9c0bc317e8..128ca143486d 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -1232,8 +1232,10 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list); descnew = desc; - dev_dbg(dev, "IDMAC irq %d, dma 0x%08x, next dma 0x%08x, current %d, curbuf 0x%08x\n", - irq, sg_dma_address(*sg), sgnext ? sg_dma_address(sgnext) : 0, ichan->active_buffer, curbuf); + dev_dbg(dev, "IDMAC irq %d, dma %#llx, next dma %#llx, current %d, curbuf %#x\n", + irq, (u64)sg_dma_address(*sg), + sgnext ? (u64)sg_dma_address(sgnext) : 0, + ichan->active_buffer, curbuf); /* Find the descriptor of sgnext */ sgnew = idmac_sg_next(ichan, &descnew, *sg); diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index a2c330f5f952..e26075408e9b 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -344,7 +344,7 @@ static enum dma_status k3_dma_tx_status(struct dma_chan *chan, size_t bytes = 0; ret = dma_cookie_status(&c->vc.chan, cookie, state); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; spin_lock_irqsave(&c->vc.lock, flags); @@ -693,7 +693,7 @@ static int k3_dma_probe(struct platform_device *op) irq = platform_get_irq(op, 0); ret = devm_request_irq(&op->dev, irq, - k3_dma_int_handler, IRQF_DISABLED, DRIVER_NAME, d); + k3_dma_int_handler, 0, DRIVER_NAME, d); if (ret) return ret; diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index ff8d7827f8cb..8869500ab92b 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -798,8 +798,7 @@ static void dma_do_tasklet(unsigned long data) * move the descriptors to a temporary list so we can drop * the lock during the entire cleanup operation */ - list_del(&desc->node); - list_add(&desc->node, &chain_cleanup); + list_move(&desc->node, &chain_cleanup); /* * Look for the first list entry which has the ENDIRQEN flag @@ -863,7 +862,7 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, if (irq) { ret = devm_request_irq(pdev->dev, irq, - mmp_pdma_chan_handler, IRQF_DISABLED, "pdma", phy); + mmp_pdma_chan_handler, 0, "pdma", phy); if (ret) { dev_err(pdev->dev, "channel request irq fail!\n"); return ret; @@ -970,7 +969,7 @@ static int mmp_pdma_probe(struct platform_device *op) /* all chan share one irq, demux inside */ irq = platform_get_irq(op, 0); ret = devm_request_irq(pdev->dev, irq, - mmp_pdma_int_handler, IRQF_DISABLED, "pdma", pdev); + mmp_pdma_int_handler, 0, "pdma", pdev); if (ret) return ret; } @@ -1018,6 +1017,7 @@ static int mmp_pdma_probe(struct platform_device *op) } } + platform_set_drvdata(op, pdev); dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels); return 0; } diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index d3b6358e5a27..3ddacc14a736 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -62,6 +62,11 @@ #define TDCR_BURSTSZ_16B (0x3 << 6) #define TDCR_BURSTSZ_32B (0x6 << 6) #define TDCR_BURSTSZ_64B (0x7 << 6) +#define TDCR_BURSTSZ_SQU_1B (0x5 << 6) +#define TDCR_BURSTSZ_SQU_2B (0x6 << 6) +#define TDCR_BURSTSZ_SQU_4B (0x0 << 6) +#define TDCR_BURSTSZ_SQU_8B (0x1 << 6) +#define TDCR_BURSTSZ_SQU_16B (0x3 << 6) #define TDCR_BURSTSZ_SQU_32B (0x7 << 6) #define TDCR_BURSTSZ_128B (0x5 << 6) #define TDCR_DSTDIR_MSK (0x3 << 4) /* Dst Direction */ @@ -158,7 +163,7 @@ static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac) /* disable irq */ writel(0, tdmac->reg_base + TDIMR); - tdmac->status = DMA_SUCCESS; + tdmac->status = DMA_COMPLETE; } static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac) @@ -228,8 +233,31 @@ static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac) return -EINVAL; } } else if (tdmac->type == PXA910_SQU) { - tdcr |= TDCR_BURSTSZ_SQU_32B; tdcr |= TDCR_SSPMOD; + + switch (tdmac->burst_sz) { + case 1: + tdcr |= TDCR_BURSTSZ_SQU_1B; + break; + case 2: + tdcr |= TDCR_BURSTSZ_SQU_2B; + break; + case 4: + tdcr |= TDCR_BURSTSZ_SQU_4B; + break; + case 8: + tdcr |= TDCR_BURSTSZ_SQU_8B; + break; + case 16: + tdcr |= TDCR_BURSTSZ_SQU_16B; + break; + case 32: + tdcr |= TDCR_BURSTSZ_SQU_32B; + break; + default: + dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n"); + return -EINVAL; + } } writel(tdcr, tdmac->reg_base + TDCR); @@ -324,7 +352,7 @@ static int mmp_tdma_alloc_chan_resources(struct dma_chan *chan) if (tdmac->irq) { ret = devm_request_irq(tdmac->dev, tdmac->irq, - mmp_tdma_chan_handler, IRQF_DISABLED, "tdma", tdmac); + mmp_tdma_chan_handler, 0, "tdma", tdmac); if (ret) return ret; } @@ -365,7 +393,7 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic( int num_periods = buf_len / period_len; int i = 0, buf = 0; - if (tdmac->status != DMA_SUCCESS) + if (tdmac->status != DMA_COMPLETE) return NULL; if (period_len > TDMA_MAX_XFER_BYTES) { @@ -499,7 +527,7 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev, tdmac->idx = idx; tdmac->type = type; tdmac->reg_base = (unsigned long)tdev->base + idx * 4; - tdmac->status = DMA_SUCCESS; + tdmac->status = DMA_COMPLETE; tdev->tdmac[tdmac->idx] = tdmac; tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac); @@ -554,7 +582,7 @@ static int mmp_tdma_probe(struct platform_device *pdev) if (irq_num != chan_num) { irq = platform_get_irq(pdev, 0); ret = devm_request_irq(&pdev->dev, irq, - mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev); + mmp_tdma_int_handler, 0, "tdma", tdev); if (ret) return ret; } diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 536dcb8ba5fd..7807f0ef4e20 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -60,14 +60,6 @@ static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc) return hw_desc->phy_dest_addr; } -static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc, - int src_idx) -{ - struct mv_xor_desc *hw_desc = desc->hw_desc; - return hw_desc->phy_src_addr[mv_phy_src_idx(src_idx)]; -} - - static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, u32 byte_count) { @@ -278,42 +270,9 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, desc->async_tx.callback( desc->async_tx.callback_param); - /* unmap dma addresses - * (unmap_single vs unmap_page?) - */ - if (desc->group_head && desc->unmap_len) { - struct mv_xor_desc_slot *unmap = desc->group_head; - struct device *dev = mv_chan_to_devp(mv_chan); - u32 len = unmap->unmap_len; - enum dma_ctrl_flags flags = desc->async_tx.flags; - u32 src_cnt; - dma_addr_t addr; - dma_addr_t dest; - - src_cnt = unmap->unmap_src_cnt; - dest = mv_desc_get_dest_addr(unmap); - if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - enum dma_data_direction dir; - - if (src_cnt > 1) /* is xor ? */ - dir = DMA_BIDIRECTIONAL; - else - dir = DMA_FROM_DEVICE; - dma_unmap_page(dev, dest, len, dir); - } - - if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - while (src_cnt--) { - addr = mv_desc_get_src_addr(unmap, - src_cnt); - if (addr == dest) - continue; - dma_unmap_page(dev, addr, len, - DMA_TO_DEVICE); - } - } + dma_descriptor_unmap(&desc->async_tx); + if (desc->group_head) desc->group_head = NULL; - } } /* run dependent operations */ @@ -749,7 +708,7 @@ static enum dma_status mv_xor_status(struct dma_chan *chan, enum dma_status ret; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS) { + if (ret == DMA_COMPLETE) { mv_xor_clean_completed_slots(mv_chan); return ret; } @@ -874,7 +833,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) msleep(1); if (mv_xor_status(dma_chan, cookie, NULL) != - DMA_SUCCESS) { + DMA_COMPLETE) { dev_err(dma_chan->device->dev, "Self-test copy timed out, disabling\n"); err = -ENODEV; @@ -968,7 +927,7 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) msleep(8); if (mv_xor_status(dma_chan, cookie, NULL) != - DMA_SUCCESS) { + DMA_COMPLETE) { dev_err(dma_chan->device->dev, "Self-test xor timed out, disabling\n"); err = -ENODEV; @@ -1076,10 +1035,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev, } mv_chan->mmr_base = xordev->xor_base; - if (!mv_chan->mmr_base) { - ret = -ENOMEM; - goto err_free_dma; - } + mv_chan->mmr_high_base = xordev->xor_high_base; tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long) mv_chan); @@ -1138,7 +1094,7 @@ static void mv_xor_conf_mbus_windows(struct mv_xor_device *xordev, const struct mbus_dram_target_info *dram) { - void __iomem *base = xordev->xor_base; + void __iomem *base = xordev->xor_high_base; u32 win_enable = 0; int i; diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h index 06b067f24c9b..d0749229c875 100644 --- a/drivers/dma/mv_xor.h +++ b/drivers/dma/mv_xor.h @@ -34,13 +34,13 @@ #define XOR_OPERATION_MODE_MEMCPY 2 #define XOR_DESCRIPTOR_SWAP BIT(14) -#define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4)) -#define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4)) -#define XOR_BYTE_COUNT(chan) (chan->mmr_base + 0x220 + (chan->idx * 4)) -#define XOR_DEST_POINTER(chan) (chan->mmr_base + 0x2B0 + (chan->idx * 4)) -#define XOR_BLOCK_SIZE(chan) (chan->mmr_base + 0x2C0 + (chan->idx * 4)) -#define XOR_INIT_VALUE_LOW(chan) (chan->mmr_base + 0x2E0) -#define XOR_INIT_VALUE_HIGH(chan) (chan->mmr_base + 0x2E4) +#define XOR_CURR_DESC(chan) (chan->mmr_high_base + 0x10 + (chan->idx * 4)) +#define XOR_NEXT_DESC(chan) (chan->mmr_high_base + 0x00 + (chan->idx * 4)) +#define XOR_BYTE_COUNT(chan) (chan->mmr_high_base + 0x20 + (chan->idx * 4)) +#define XOR_DEST_POINTER(chan) (chan->mmr_high_base + 0xB0 + (chan->idx * 4)) +#define XOR_BLOCK_SIZE(chan) (chan->mmr_high_base + 0xC0 + (chan->idx * 4)) +#define XOR_INIT_VALUE_LOW(chan) (chan->mmr_high_base + 0xE0) +#define XOR_INIT_VALUE_HIGH(chan) (chan->mmr_high_base + 0xE4) #define XOR_CONFIG(chan) (chan->mmr_base + 0x10 + (chan->idx * 4)) #define XOR_ACTIVATION(chan) (chan->mmr_base + 0x20 + (chan->idx * 4)) @@ -50,11 +50,11 @@ #define XOR_ERROR_ADDR(chan) (chan->mmr_base + 0x60) #define XOR_INTR_MASK_VALUE 0x3F5 -#define WINDOW_BASE(w) (0x250 + ((w) << 2)) -#define WINDOW_SIZE(w) (0x270 + ((w) << 2)) -#define WINDOW_REMAP_HIGH(w) (0x290 + ((w) << 2)) -#define WINDOW_BAR_ENABLE(chan) (0x240 + ((chan) << 2)) -#define WINDOW_OVERRIDE_CTRL(chan) (0x2A0 + ((chan) << 2)) +#define WINDOW_BASE(w) (0x50 + ((w) << 2)) +#define WINDOW_SIZE(w) (0x70 + ((w) << 2)) +#define WINDOW_REMAP_HIGH(w) (0x90 + ((w) << 2)) +#define WINDOW_BAR_ENABLE(chan) (0x40 + ((chan) << 2)) +#define WINDOW_OVERRIDE_CTRL(chan) (0xA0 + ((chan) << 2)) struct mv_xor_device { void __iomem *xor_base; @@ -82,6 +82,7 @@ struct mv_xor_chan { int pending; spinlock_t lock; /* protects the descriptor slot pool */ void __iomem *mmr_base; + void __iomem *mmr_high_base; unsigned int idx; int irq; enum dma_transaction_type current_type; diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index ccd13df841db..ead491346da7 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -27,6 +27,7 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_dma.h> +#include <linux/list.h> #include <asm/irq.h> @@ -57,6 +58,9 @@ (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70) #define HW_APBHX_CHn_SEMA(d, n) \ (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70) +#define HW_APBHX_CHn_BAR(d, n) \ + (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x070 : 0x130) + (n) * 0x70) +#define HW_APBX_CHn_DEBUG1(d, n) (0x150 + (n) * 0x70) /* * ccw bits definitions @@ -115,7 +119,9 @@ struct mxs_dma_chan { int desc_count; enum dma_status status; unsigned int flags; + bool reset; #define MXS_DMA_SG_LOOP (1 << 0) +#define MXS_DMA_USE_SEMAPHORE (1 << 1) }; #define MXS_DMA_CHANNELS 16 @@ -201,12 +207,47 @@ static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; int chan_id = mxs_chan->chan.chan_id; - if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) + /* + * mxs dma channel resets can cause a channel stall. To recover from a + * channel stall, we have to reset the whole DMA engine. To avoid this, + * we use cyclic DMA with semaphores, that are enhanced in + * mxs_dma_int_handler. To reset the channel, we can simply stop writing + * into the semaphore counter. + */ + if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE && + mxs_chan->flags & MXS_DMA_SG_LOOP) { + mxs_chan->reset = true; + } else if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) { writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL), mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); - else + } else { + unsigned long elapsed = 0; + const unsigned long max_wait = 50000; /* 50ms */ + void __iomem *reg_dbg1 = mxs_dma->base + + HW_APBX_CHn_DEBUG1(mxs_dma, chan_id); + + /* + * On i.MX28 APBX, the DMA channel can stop working if we reset + * the channel while it is in READ_FLUSH (0x08) state. + * We wait here until we leave the state. Then we trigger the + * reset. Waiting a maximum of 50ms, the kernel shouldn't crash + * because of this. + */ + while ((readl(reg_dbg1) & 0xf) == 0x8 && elapsed < max_wait) { + udelay(100); + elapsed += 100; + } + + if (elapsed >= max_wait) + dev_err(&mxs_chan->mxs_dma->pdev->dev, + "Failed waiting for the DMA channel %d to leave state READ_FLUSH, trying to reset channel in READ_FLUSH state now\n", + chan_id); + writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL), mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET); + } + + mxs_chan->status = DMA_COMPLETE; } static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) @@ -219,12 +260,21 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id)); /* write 1 to SEMA to kick off the channel */ - writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id)); + if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE && + mxs_chan->flags & MXS_DMA_SG_LOOP) { + /* A cyclic DMA consists of at least 2 segments, so initialize + * the semaphore with 2 so we have enough time to add 1 to the + * semaphore if we need to */ + writel(2, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id)); + } else { + writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id)); + } + mxs_chan->reset = false; } static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) { - mxs_chan->status = DMA_SUCCESS; + mxs_chan->status = DMA_COMPLETE; } static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan) @@ -272,58 +322,88 @@ static void mxs_dma_tasklet(unsigned long data) mxs_chan->desc.callback(mxs_chan->desc.callback_param); } +static int mxs_dma_irq_to_chan(struct mxs_dma_engine *mxs_dma, int irq) +{ + int i; + + for (i = 0; i != mxs_dma->nr_channels; ++i) + if (mxs_dma->mxs_chans[i].chan_irq == irq) + return i; + + return -EINVAL; +} + static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) { struct mxs_dma_engine *mxs_dma = dev_id; - u32 stat1, stat2; + struct mxs_dma_chan *mxs_chan; + u32 completed; + u32 err; + int chan = mxs_dma_irq_to_chan(mxs_dma, irq); + + if (chan < 0) + return IRQ_NONE; /* completion status */ - stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1); - stat1 &= MXS_DMA_CHANNELS_MASK; - writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR); + completed = readl(mxs_dma->base + HW_APBHX_CTRL1); + completed = (completed >> chan) & 0x1; + + /* Clear interrupt */ + writel((1 << chan), + mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR); /* error status */ - stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2); - writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR); + err = readl(mxs_dma->base + HW_APBHX_CTRL2); + err &= (1 << (MXS_DMA_CHANNELS + chan)) | (1 << chan); + + /* + * error status bit is in the upper 16 bits, error irq bit in the lower + * 16 bits. We transform it into a simpler error code: + * err: 0x00 = no error, 0x01 = TERMINATION, 0x02 = BUS_ERROR + */ + err = (err >> (MXS_DMA_CHANNELS + chan)) + (err >> chan); + + /* Clear error irq */ + writel((1 << chan), + mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR); /* * When both completion and error of termination bits set at the * same time, we do not take it as an error. IOW, it only becomes - * an error we need to handle here in case of either it's (1) a bus - * error or (2) a termination error with no completion. + * an error we need to handle here in case of either it's a bus + * error or a termination error with no completion. 0x01 is termination + * error, so we can subtract err & completed to get the real error case. */ - stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */ - (~(stat2 >> MXS_DMA_CHANNELS) & stat2 & ~stat1); /* (2) */ - - /* combine error and completion status for checking */ - stat1 = (stat2 << MXS_DMA_CHANNELS) | stat1; - while (stat1) { - int channel = fls(stat1) - 1; - struct mxs_dma_chan *mxs_chan = - &mxs_dma->mxs_chans[channel % MXS_DMA_CHANNELS]; - - if (channel >= MXS_DMA_CHANNELS) { - dev_dbg(mxs_dma->dma_device.dev, - "%s: error in channel %d\n", __func__, - channel - MXS_DMA_CHANNELS); - mxs_chan->status = DMA_ERROR; - mxs_dma_reset_chan(mxs_chan); - } else { - if (mxs_chan->flags & MXS_DMA_SG_LOOP) - mxs_chan->status = DMA_IN_PROGRESS; - else - mxs_chan->status = DMA_SUCCESS; - } + err -= err & completed; - stat1 &= ~(1 << channel); + mxs_chan = &mxs_dma->mxs_chans[chan]; - if (mxs_chan->status == DMA_SUCCESS) - dma_cookie_complete(&mxs_chan->desc); + if (err) { + dev_dbg(mxs_dma->dma_device.dev, + "%s: error in channel %d\n", __func__, + chan); + mxs_chan->status = DMA_ERROR; + mxs_dma_reset_chan(mxs_chan); + } else if (mxs_chan->status != DMA_COMPLETE) { + if (mxs_chan->flags & MXS_DMA_SG_LOOP) { + mxs_chan->status = DMA_IN_PROGRESS; + if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE) + writel(1, mxs_dma->base + + HW_APBHX_CHn_SEMA(mxs_dma, chan)); + } else { + mxs_chan->status = DMA_COMPLETE; + } + } - /* schedule tasklet on this channel */ - tasklet_schedule(&mxs_chan->tasklet); + if (mxs_chan->status == DMA_COMPLETE) { + if (mxs_chan->reset) + return IRQ_HANDLED; + dma_cookie_complete(&mxs_chan->desc); } + /* schedule tasklet on this channel */ + tasklet_schedule(&mxs_chan->tasklet); + return IRQ_HANDLED; } @@ -523,6 +603,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( mxs_chan->status = DMA_IN_PROGRESS; mxs_chan->flags |= MXS_DMA_SG_LOOP; + mxs_chan->flags |= MXS_DMA_USE_SEMAPHORE; if (num_periods > NUM_CCW) { dev_err(mxs_dma->dma_device.dev, @@ -554,6 +635,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( ccw->bits |= CCW_IRQ; ccw->bits |= CCW_HALT_ON_TERM; ccw->bits |= CCW_TERM_FLUSH; + ccw->bits |= CCW_DEC_SEM; ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); @@ -599,8 +681,24 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; + u32 residue = 0; + + if (mxs_chan->status == DMA_IN_PROGRESS && + mxs_chan->flags & MXS_DMA_SG_LOOP) { + struct mxs_dma_ccw *last_ccw; + u32 bar; + + last_ccw = &mxs_chan->ccw[mxs_chan->desc_count - 1]; + residue = last_ccw->xfer_bytes + last_ccw->bufaddr; + + bar = readl(mxs_dma->base + + HW_APBHX_CHn_BAR(mxs_dma, chan->chan_id)); + residue -= bar; + } - dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0); + dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, + residue); return mxs_chan->status; } diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index ec3fc4fd9160..2f66cf4e54fe 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -248,7 +248,7 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan, unsigned long flags; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS || !txstate) + if (ret == DMA_COMPLETE || !txstate) return ret; spin_lock_irqsave(&c->vc.lock, flags); diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index df8b10fd1726..cdf0483b8f2d 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2268,6 +2268,8 @@ static void pl330_tasklet(unsigned long data) list_move_tail(&desc->node, &pch->dmac->desc_pool); } + dma_descriptor_unmap(&desc->txd); + if (callback) { spin_unlock_irqrestore(&pch->lock, flags); callback(callback_param); @@ -2314,7 +2316,7 @@ bool pl330_filter(struct dma_chan *chan, void *param) return false; peri_id = chan->private; - return *peri_id == (unsigned)param; + return *peri_id == (unsigned long)param; } EXPORT_SYMBOL(pl330_filter); @@ -2926,16 +2928,23 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) amba_set_drvdata(adev, pdmac); - irq = adev->irq[0]; - ret = request_irq(irq, pl330_irq_handler, 0, - dev_name(&adev->dev), pi); - if (ret) - return ret; + for (i = 0; i < AMBA_NR_IRQS; i++) { + irq = adev->irq[i]; + if (irq) { + ret = devm_request_irq(&adev->dev, irq, + pl330_irq_handler, 0, + dev_name(&adev->dev), pi); + if (ret) + return ret; + } else { + break; + } + } pi->pcfg.periph_id = adev->periphid; ret = pl330_add(pi); if (ret) - goto probe_err1; + return ret; INIT_LIST_HEAD(&pdmac->desc_pool); spin_lock_init(&pdmac->pool_lock); @@ -3033,8 +3042,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) return 0; probe_err3: - amba_set_drvdata(adev, NULL); - /* Idle the DMAC */ list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, chan.device_node) { @@ -3048,8 +3055,6 @@ probe_err3: } probe_err2: pl330_del(pi); -probe_err1: - free_irq(irq, pi); return ret; } @@ -3059,7 +3064,6 @@ static int pl330_remove(struct amba_device *adev) struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev); struct dma_pl330_chan *pch, *_p; struct pl330_info *pi; - int irq; if (!pdmac) return 0; @@ -3068,7 +3072,6 @@ static int pl330_remove(struct amba_device *adev) of_dma_controller_free(adev->dev.of_node); dma_async_device_unregister(&pdmac->ddma); - amba_set_drvdata(adev, NULL); /* Idle the DMAC */ list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, @@ -3086,9 +3089,6 @@ static int pl330_remove(struct amba_device *adev) pl330_del(pi); - irq = adev->irq[0]; - free_irq(irq, pi); - return 0; } diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index e24b5ef486b5..8da48c6b2a38 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c @@ -804,218 +804,6 @@ static void ppc440spe_desc_set_link(struct ppc440spe_adma_chan *chan, } /** - * ppc440spe_desc_get_src_addr - extract the source address from the descriptor - */ -static u32 ppc440spe_desc_get_src_addr(struct ppc440spe_adma_desc_slot *desc, - struct ppc440spe_adma_chan *chan, int src_idx) -{ - struct dma_cdb *dma_hw_desc; - struct xor_cb *xor_hw_desc; - - switch (chan->device->id) { - case PPC440SPE_DMA0_ID: - case PPC440SPE_DMA1_ID: - dma_hw_desc = desc->hw_desc; - /* May have 0, 1, 2, or 3 sources */ - switch (dma_hw_desc->opc) { - case DMA_CDB_OPC_NO_OP: - case DMA_CDB_OPC_DFILL128: - return 0; - case DMA_CDB_OPC_DCHECK128: - if (unlikely(src_idx)) { - printk(KERN_ERR "%s: try to get %d source for" - " DCHECK128\n", __func__, src_idx); - BUG(); - } - return le32_to_cpu(dma_hw_desc->sg1l); - case DMA_CDB_OPC_MULTICAST: - case DMA_CDB_OPC_MV_SG1_SG2: - if (unlikely(src_idx > 2)) { - printk(KERN_ERR "%s: try to get %d source from" - " DMA descr\n", __func__, src_idx); - BUG(); - } - if (src_idx) { - if (le32_to_cpu(dma_hw_desc->sg1u) & - DMA_CUED_XOR_WIN_MSK) { - u8 region; - - if (src_idx == 1) - return le32_to_cpu( - dma_hw_desc->sg1l) + - desc->unmap_len; - - region = (le32_to_cpu( - dma_hw_desc->sg1u)) >> - DMA_CUED_REGION_OFF; - - region &= DMA_CUED_REGION_MSK; - switch (region) { - case DMA_RXOR123: - return le32_to_cpu( - dma_hw_desc->sg1l) + - (desc->unmap_len << 1); - case DMA_RXOR124: - return le32_to_cpu( - dma_hw_desc->sg1l) + - (desc->unmap_len * 3); - case DMA_RXOR125: - return le32_to_cpu( - dma_hw_desc->sg1l) + - (desc->unmap_len << 2); - default: - printk(KERN_ERR - "%s: try to" - " get src3 for region %02x" - "PPC440SPE_DESC_RXOR12?\n", - __func__, region); - BUG(); - } - } else { - printk(KERN_ERR - "%s: try to get %d" - " source for non-cued descr\n", - __func__, src_idx); - BUG(); - } - } - return le32_to_cpu(dma_hw_desc->sg1l); - default: - printk(KERN_ERR "%s: unknown OPC 0x%02x\n", - __func__, dma_hw_desc->opc); - BUG(); - } - return le32_to_cpu(dma_hw_desc->sg1l); - case PPC440SPE_XOR_ID: - /* May have up to 16 sources */ - xor_hw_desc = desc->hw_desc; - return xor_hw_desc->ops[src_idx].l; - } - return 0; -} - -/** - * ppc440spe_desc_get_dest_addr - extract the destination address from the - * descriptor - */ -static u32 ppc440spe_desc_get_dest_addr(struct ppc440spe_adma_desc_slot *desc, - struct ppc440spe_adma_chan *chan, int idx) -{ - struct dma_cdb *dma_hw_desc; - struct xor_cb *xor_hw_desc; - - switch (chan->device->id) { - case PPC440SPE_DMA0_ID: - case PPC440SPE_DMA1_ID: - dma_hw_desc = desc->hw_desc; - - if (likely(!idx)) - return le32_to_cpu(dma_hw_desc->sg2l); - return le32_to_cpu(dma_hw_desc->sg3l); - case PPC440SPE_XOR_ID: - xor_hw_desc = desc->hw_desc; - return xor_hw_desc->cbtal; - } - return 0; -} - -/** - * ppc440spe_desc_get_src_num - extract the number of source addresses from - * the descriptor - */ -static u32 ppc440spe_desc_get_src_num(struct ppc440spe_adma_desc_slot *desc, - struct ppc440spe_adma_chan *chan) -{ - struct dma_cdb *dma_hw_desc; - struct xor_cb *xor_hw_desc; - - switch (chan->device->id) { - case PPC440SPE_DMA0_ID: - case PPC440SPE_DMA1_ID: - dma_hw_desc = desc->hw_desc; - - switch (dma_hw_desc->opc) { - case DMA_CDB_OPC_NO_OP: - case DMA_CDB_OPC_DFILL128: - return 0; - case DMA_CDB_OPC_DCHECK128: - return 1; - case DMA_CDB_OPC_MV_SG1_SG2: - case DMA_CDB_OPC_MULTICAST: - /* - * Only for RXOR operations we have more than - * one source - */ - if (le32_to_cpu(dma_hw_desc->sg1u) & - DMA_CUED_XOR_WIN_MSK) { - /* RXOR op, there are 2 or 3 sources */ - if (((le32_to_cpu(dma_hw_desc->sg1u) >> - DMA_CUED_REGION_OFF) & - DMA_CUED_REGION_MSK) == DMA_RXOR12) { - /* RXOR 1-2 */ - return 2; - } else { - /* RXOR 1-2-3/1-2-4/1-2-5 */ - return 3; - } - } - return 1; - default: - printk(KERN_ERR "%s: unknown OPC 0x%02x\n", - __func__, dma_hw_desc->opc); - BUG(); - } - case PPC440SPE_XOR_ID: - /* up to 16 sources */ - xor_hw_desc = desc->hw_desc; - return xor_hw_desc->cbc & XOR_CDCR_OAC_MSK; - default: - BUG(); - } - return 0; -} - -/** - * ppc440spe_desc_get_dst_num - get the number of destination addresses in - * this descriptor - */ -static u32 ppc440spe_desc_get_dst_num(struct ppc440spe_adma_desc_slot *desc, - struct ppc440spe_adma_chan *chan) -{ - struct dma_cdb *dma_hw_desc; - - switch (chan->device->id) { - case PPC440SPE_DMA0_ID: - case PPC440SPE_DMA1_ID: - /* May be 1 or 2 destinations */ - dma_hw_desc = desc->hw_desc; - switch (dma_hw_desc->opc) { - case DMA_CDB_OPC_NO_OP: - case DMA_CDB_OPC_DCHECK128: - return 0; - case DMA_CDB_OPC_MV_SG1_SG2: - case DMA_CDB_OPC_DFILL128: - return 1; - case DMA_CDB_OPC_MULTICAST: - if (desc->dst_cnt == 2) - return 2; - else - return 1; - default: - printk(KERN_ERR "%s: unknown OPC 0x%02x\n", - __func__, dma_hw_desc->opc); - BUG(); - } - case PPC440SPE_XOR_ID: - /* Always only 1 destination */ - return 1; - default: - BUG(); - } - return 0; -} - -/** * ppc440spe_desc_get_link - get the address of the descriptor that * follows this one */ @@ -1707,43 +1495,6 @@ static void ppc440spe_adma_free_slots(struct ppc440spe_adma_desc_slot *slot, } } -static void ppc440spe_adma_unmap(struct ppc440spe_adma_chan *chan, - struct ppc440spe_adma_desc_slot *desc) -{ - u32 src_cnt, dst_cnt; - dma_addr_t addr; - - /* - * get the number of sources & destination - * included in this descriptor and unmap - * them all - */ - src_cnt = ppc440spe_desc_get_src_num(desc, chan); - dst_cnt = ppc440spe_desc_get_dst_num(desc, chan); - - /* unmap destinations */ - if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - while (dst_cnt--) { - addr = ppc440spe_desc_get_dest_addr( - desc, chan, dst_cnt); - dma_unmap_page(chan->device->dev, - addr, desc->unmap_len, - DMA_FROM_DEVICE); - } - } - - /* unmap sources */ - if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - while (src_cnt--) { - addr = ppc440spe_desc_get_src_addr( - desc, chan, src_cnt); - dma_unmap_page(chan->device->dev, - addr, desc->unmap_len, - DMA_TO_DEVICE); - } - } -} - /** * ppc440spe_adma_run_tx_complete_actions - call functions to be called * upon completion @@ -1767,26 +1518,7 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions( desc->async_tx.callback( desc->async_tx.callback_param); - /* unmap dma addresses - * (unmap_single vs unmap_page?) - * - * actually, ppc's dma_unmap_page() functions are empty, so - * the following code is just for the sake of completeness - */ - if (chan && chan->needs_unmap && desc->group_head && - desc->unmap_len) { - struct ppc440spe_adma_desc_slot *unmap = - desc->group_head; - /* assume 1 slot per op always */ - u32 slot_count = unmap->slot_cnt; - - /* Run through the group list and unmap addresses */ - for (i = 0; i < slot_count; i++) { - BUG_ON(!unmap); - ppc440spe_adma_unmap(chan, unmap); - unmap = unmap->hw_next; - } - } + dma_descriptor_unmap(&desc->async_tx); } /* run dependent operations */ @@ -3893,7 +3625,7 @@ static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan, ppc440spe_chan = to_ppc440spe_adma_chan(chan); ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; ppc440spe_adma_slot_cleanup(ppc440spe_chan); diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c index 4cb127978636..4eddedb6eb7d 100644 --- a/drivers/dma/s3c24xx-dma.c +++ b/drivers/dma/s3c24xx-dma.c @@ -628,42 +628,13 @@ retry: s3cchan->state = S3C24XX_DMA_CHAN_IDLE; } -static void s3c24xx_dma_unmap_buffers(struct s3c24xx_txd *txd) -{ - struct device *dev = txd->vd.tx.chan->device->dev; - struct s3c24xx_sg *dsg; - - if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_single(dev, dsg->src_addr, dsg->len, - DMA_TO_DEVICE); - else { - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_page(dev, dsg->src_addr, dsg->len, - DMA_TO_DEVICE); - } - } - - if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_single(dev, dsg->dst_addr, dsg->len, - DMA_FROM_DEVICE); - else - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_page(dev, dsg->dst_addr, dsg->len, - DMA_FROM_DEVICE); - } -} - static void s3c24xx_dma_desc_free(struct virt_dma_desc *vd) { struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx); struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(vd->tx.chan); if (!s3cchan->slave) - s3c24xx_dma_unmap_buffers(txd); + dma_descriptor_unmap(&vd->tx); s3c24xx_dma_free_txd(txd); } @@ -795,7 +766,7 @@ static enum dma_status s3c24xx_dma_tx_status(struct dma_chan *chan, spin_lock_irqsave(&s3cchan->vc.lock, flags); ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS) { + if (ret == DMA_COMPLETE) { spin_unlock_irqrestore(&s3cchan->vc.lock, flags); return ret; } diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index 461a91ab70bb..ab26d46bbe15 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c @@ -436,7 +436,7 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan, enum dma_status ret; ret = dma_cookie_status(&c->vc.chan, cookie, state); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; if (!state) diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c index ebad84591a6e..3083d901a414 100644 --- a/drivers/dma/sh/rcar-hpbdma.c +++ b/drivers/dma/sh/rcar-hpbdma.c @@ -60,6 +60,7 @@ #define HPB_DMAE_DSTPR_DMSTP BIT(0) /* DMA status register (DSTSR) bits */ +#define HPB_DMAE_DSTSR_DQSTS BIT(2) #define HPB_DMAE_DSTSR_DMSTS BIT(0) /* DMA common registers */ @@ -286,6 +287,9 @@ static void hpb_dmae_halt(struct shdma_chan *schan) ch_reg_write(chan, HPB_DMAE_DCMDR_DQEND, HPB_DMAE_DCMDR); ch_reg_write(chan, HPB_DMAE_DSTPR_DMSTP, HPB_DMAE_DSTPR); + + chan->plane_idx = 0; + chan->first_desc = true; } static const struct hpb_dmae_slave_config * @@ -385,7 +389,10 @@ static bool hpb_dmae_channel_busy(struct shdma_chan *schan) struct hpb_dmae_chan *chan = to_chan(schan); u32 dstsr = ch_reg_read(chan, HPB_DMAE_DSTSR); - return (dstsr & HPB_DMAE_DSTSR_DMSTS) == HPB_DMAE_DSTSR_DMSTS; + if (chan->xfer_mode == XFER_DOUBLE) + return dstsr & HPB_DMAE_DSTSR_DQSTS; + else + return dstsr & HPB_DMAE_DSTSR_DMSTS; } static int @@ -510,6 +517,8 @@ static int hpb_dmae_chan_probe(struct hpb_dmae_device *hpbdev, int id) } schan = &new_hpb_chan->shdma_chan; + schan->max_xfer_len = HPB_DMA_TCR_MAX; + shdma_chan_probe(sdev, schan, id); if (pdev->id >= 0) diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index d94ab592cc1b..2e7b394def80 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -724,7 +724,7 @@ static enum dma_status shdma_tx_status(struct dma_chan *chan, * If we don't find cookie on the queue, it has been aborted and we have * to report error */ - if (status != DMA_SUCCESS) { + if (status != DMA_COMPLETE) { struct shdma_desc *sdesc; status = DMA_ERROR; list_for_each_entry(sdesc, &schan->ld_queue, node) diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c index 1069e8869f20..0d765c0e21ec 100644 --- a/drivers/dma/sh/shdmac.c +++ b/drivers/dma/sh/shdmac.c @@ -685,7 +685,7 @@ MODULE_DEVICE_TABLE(of, sh_dmae_of_match); static int sh_dmae_probe(struct platform_device *pdev) { const struct sh_dmae_pdata *pdata; - unsigned long irqflags = IRQF_DISABLED, + unsigned long irqflags = 0, chan_flag[SH_DMAE_MAX_CHANNELS] = {}; int errirq, chan_irq[SH_DMAE_MAX_CHANNELS]; int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0; @@ -838,7 +838,7 @@ static int sh_dmae_probe(struct platform_device *pdev) IORESOURCE_IRQ_SHAREABLE) chan_flag[irq_cnt] = IRQF_SHARED; else - chan_flag[irq_cnt] = IRQF_DISABLED; + chan_flag[irq_cnt] = 0; dev_dbg(&pdev->dev, "Found IRQ %d for channel %d\n", i, irq_cnt); diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 82d2b97ad942..b8c031b7de4e 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -14,6 +14,7 @@ #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/delay.h> +#include <linux/log2.h> #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/err.h> @@ -2626,7 +2627,7 @@ static enum dma_status d40_tx_status(struct dma_chan *chan, } ret = dma_cookie_status(chan, cookie, txstate); - if (ret != DMA_SUCCESS) + if (ret != DMA_COMPLETE) dma_set_residue(txstate, stedma40_residue(chan)); if (d40_is_paused(d40c)) @@ -2796,8 +2797,8 @@ static int d40_set_runtime_config(struct dma_chan *chan, src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED || dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || - ((src_addr_width > 1) && (src_addr_width & 1)) || - ((dst_addr_width > 1) && (dst_addr_width & 1))) + !is_power_of_2(src_addr_width) || + !is_power_of_2(dst_addr_width)) return -EINVAL; cfg->src_info.data_width = src_addr_width; diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 5d4986e5f5fa..73654e33f13b 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -570,7 +570,7 @@ static void handle_once_dma_done(struct tegra_dma_channel *tdc, list_del(&sgreq->node); if (sgreq->last_sg) { - dma_desc->dma_status = DMA_SUCCESS; + dma_desc->dma_status = DMA_COMPLETE; dma_cookie_complete(&dma_desc->txd); if (!dma_desc->cb_count) list_add_tail(&dma_desc->cb_node, &tdc->cb_desc); @@ -768,7 +768,7 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, unsigned int residual; ret = dma_cookie_status(dc, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; spin_lock_irqsave(&tdc->lock, flags); @@ -1018,7 +1018,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( return &dma_desc->txd; } -struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( +static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, unsigned long flags, void *context) diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index 28af214fce04..4506a7b4f972 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c @@ -154,38 +154,6 @@ static bool __td_dma_done_ack(struct timb_dma_chan *td_chan) return done; } -static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc, - bool single) -{ - dma_addr_t addr; - int len; - - addr = (dma_desc[7] << 24) | (dma_desc[6] << 16) | (dma_desc[5] << 8) | - dma_desc[4]; - - len = (dma_desc[3] << 8) | dma_desc[2]; - - if (single) - dma_unmap_single(chan2dev(&td_chan->chan), addr, len, - DMA_TO_DEVICE); - else - dma_unmap_page(chan2dev(&td_chan->chan), addr, len, - DMA_TO_DEVICE); -} - -static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single) -{ - struct timb_dma_chan *td_chan = container_of(td_desc->txd.chan, - struct timb_dma_chan, chan); - u8 *descs; - - for (descs = td_desc->desc_list; ; descs += TIMB_DMA_DESC_SIZE) { - __td_unmap_desc(td_chan, descs, single); - if (descs[0] & 0x02) - break; - } -} - static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc, struct scatterlist *sg, bool last) { @@ -293,10 +261,7 @@ static void __td_finish(struct timb_dma_chan *td_chan) list_move(&td_desc->desc_node, &td_chan->free_list); - if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) - __td_unmap_descs(td_desc, - txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE); - + dma_descriptor_unmap(txd); /* * The API requires that no submissions are done from a * callback, so we don't need to drop the lock here diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index 71e8e775189e..bae6c29f5502 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c @@ -419,30 +419,7 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, list_splice_init(&desc->tx_list, &dc->free_list); list_move(&desc->desc_node, &dc->free_list); - if (!ds) { - dma_addr_t dmaaddr; - if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - dmaaddr = is_dmac64(dc) ? - desc->hwdesc.DAR : desc->hwdesc32.DAR; - if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) - dma_unmap_single(chan2parent(&dc->chan), - dmaaddr, desc->len, DMA_FROM_DEVICE); - else - dma_unmap_page(chan2parent(&dc->chan), - dmaaddr, desc->len, DMA_FROM_DEVICE); - } - if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - dmaaddr = is_dmac64(dc) ? - desc->hwdesc.SAR : desc->hwdesc32.SAR; - if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) - dma_unmap_single(chan2parent(&dc->chan), - dmaaddr, desc->len, DMA_TO_DEVICE); - else - dma_unmap_page(chan2parent(&dc->chan), - dmaaddr, desc->len, DMA_TO_DEVICE); - } - } - + dma_descriptor_unmap(txd); /* * The API requires that no submissions are done from a * callback, so we don't need to drop the lock here @@ -962,8 +939,8 @@ txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, enum dma_status ret; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS) - return DMA_SUCCESS; + if (ret == DMA_COMPLETE) + return DMA_COMPLETE; spin_lock_bh(&dc->lock); txx9dmac_scan_descriptors(dc); diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index 8472405c5586..d7f1b57bd3be 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -945,7 +945,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci, u32 tad_offset; u32 rir_way; u32 mb, kb; - u64 ch_addr, offset, limit, prv = 0; + u64 ch_addr, offset, limit = 0, prv = 0; /* diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c index 3c55ec856e39..a287cece0593 100644 --- a/drivers/extcon/extcon-arizona.c +++ b/drivers/extcon/extcon-arizona.c @@ -1082,7 +1082,7 @@ static void arizona_micd_set_level(struct arizona *arizona, int index, static int arizona_extcon_probe(struct platform_device *pdev) { struct arizona *arizona = dev_get_drvdata(pdev->dev.parent); - struct arizona_pdata *pdata; + struct arizona_pdata *pdata = &arizona->pdata; struct arizona_extcon_info *info; unsigned int val; int jack_irq_fall, jack_irq_rise; @@ -1091,8 +1091,6 @@ static int arizona_extcon_probe(struct platform_device *pdev) if (!arizona->dapm || !arizona->dapm->card) return -EPROBE_DEFER; - pdata = dev_get_platdata(arizona->dev); - info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); if (!info) { dev_err(&pdev->dev, "Failed to allocate memory\n"); diff --git a/drivers/extcon/extcon-class.c b/drivers/extcon/extcon-class.c index 15443d3b6be1..76322330cbd7 100644 --- a/drivers/extcon/extcon-class.c +++ b/drivers/extcon/extcon-class.c @@ -792,6 +792,8 @@ void extcon_dev_unregister(struct extcon_dev *edev) return; } + device_unregister(&edev->dev); + if (edev->mutually_exclusive && edev->max_supported) { for (index = 0; edev->mutually_exclusive[index]; index++) @@ -812,7 +814,6 @@ void extcon_dev_unregister(struct extcon_dev *edev) if (switch_class) class_compat_remove_link(switch_class, &edev->dev, NULL); #endif - device_unregister(&edev->dev); put_device(&edev->dev); } EXPORT_SYMBOL_GPL(extcon_dev_unregister); diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index 281029daf98c..b0bb056458a3 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c @@ -1623,6 +1623,7 @@ static struct scsi_host_template scsi_driver_template = { .cmd_per_lun = 1, .can_queue = 1, .sdev_attrs = sbp2_scsi_sysfs_attrs, + .no_write_same = 1, }; MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c index 5002d50e3781..743fd426f21b 100644 --- a/drivers/firmware/efi/efi-pstore.c +++ b/drivers/firmware/efi/efi-pstore.c @@ -18,14 +18,12 @@ module_param_named(pstore_disable, efivars_pstore_disable, bool, 0644); static int efi_pstore_open(struct pstore_info *psi) { - efivar_entry_iter_begin(); psi->data = NULL; return 0; } static int efi_pstore_close(struct pstore_info *psi) { - efivar_entry_iter_end(); psi->data = NULL; return 0; } @@ -39,6 +37,12 @@ struct pstore_read_data { char **buf; }; +static inline u64 generic_id(unsigned long timestamp, + unsigned int part, int count) +{ + return (timestamp * 100 + part) * 1000 + count; +} + static int efi_pstore_read_func(struct efivar_entry *entry, void *data) { efi_guid_t vendor = LINUX_EFI_CRASH_GUID; @@ -57,7 +61,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data) if (sscanf(name, "dump-type%u-%u-%d-%lu-%c", cb_data->type, &part, &cnt, &time, &data_type) == 5) { - *cb_data->id = part; + *cb_data->id = generic_id(time, part, cnt); *cb_data->count = cnt; cb_data->timespec->tv_sec = time; cb_data->timespec->tv_nsec = 0; @@ -67,7 +71,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data) *cb_data->compressed = false; } else if (sscanf(name, "dump-type%u-%u-%d-%lu", cb_data->type, &part, &cnt, &time) == 4) { - *cb_data->id = part; + *cb_data->id = generic_id(time, part, cnt); *cb_data->count = cnt; cb_data->timespec->tv_sec = time; cb_data->timespec->tv_nsec = 0; @@ -79,7 +83,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data) * which doesn't support holding * multiple logs, remains. */ - *cb_data->id = part; + *cb_data->id = generic_id(time, part, 0); *cb_data->count = 0; cb_data->timespec->tv_sec = time; cb_data->timespec->tv_nsec = 0; @@ -91,19 +95,125 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data) __efivar_entry_get(entry, &entry->var.Attributes, &entry->var.DataSize, entry->var.Data); size = entry->var.DataSize; + memcpy(*cb_data->buf, entry->var.Data, + (size_t)min_t(unsigned long, EFIVARS_DATA_SIZE_MAX, size)); - *cb_data->buf = kmemdup(entry->var.Data, size, GFP_KERNEL); - if (*cb_data->buf == NULL) - return -ENOMEM; return size; } +/** + * efi_pstore_scan_sysfs_enter + * @entry: scanning entry + * @next: next entry + * @head: list head + */ +static void efi_pstore_scan_sysfs_enter(struct efivar_entry *pos, + struct efivar_entry *next, + struct list_head *head) +{ + pos->scanning = true; + if (&next->list != head) + next->scanning = true; +} + +/** + * __efi_pstore_scan_sysfs_exit + * @entry: deleting entry + * @turn_off_scanning: Check if a scanning flag should be turned off + */ +static inline void __efi_pstore_scan_sysfs_exit(struct efivar_entry *entry, + bool turn_off_scanning) +{ + if (entry->deleting) { + list_del(&entry->list); + efivar_entry_iter_end(); + efivar_unregister(entry); + efivar_entry_iter_begin(); + } else if (turn_off_scanning) + entry->scanning = false; +} + +/** + * efi_pstore_scan_sysfs_exit + * @pos: scanning entry + * @next: next entry + * @head: list head + * @stop: a flag checking if scanning will stop + */ +static void efi_pstore_scan_sysfs_exit(struct efivar_entry *pos, + struct efivar_entry *next, + struct list_head *head, bool stop) +{ + __efi_pstore_scan_sysfs_exit(pos, true); + if (stop) + __efi_pstore_scan_sysfs_exit(next, &next->list != head); +} + +/** + * efi_pstore_sysfs_entry_iter + * + * @data: function-specific data to pass to callback + * @pos: entry to begin iterating from + * + * You MUST call efivar_enter_iter_begin() before this function, and + * efivar_entry_iter_end() afterwards. + * + * It is possible to begin iteration from an arbitrary entry within + * the list by passing @pos. @pos is updated on return to point to + * the next entry of the last one passed to efi_pstore_read_func(). + * To begin iterating from the beginning of the list @pos must be %NULL. + */ +static int efi_pstore_sysfs_entry_iter(void *data, struct efivar_entry **pos) +{ + struct efivar_entry *entry, *n; + struct list_head *head = &efivar_sysfs_list; + int size = 0; + + if (!*pos) { + list_for_each_entry_safe(entry, n, head, list) { + efi_pstore_scan_sysfs_enter(entry, n, head); + + size = efi_pstore_read_func(entry, data); + efi_pstore_scan_sysfs_exit(entry, n, head, size < 0); + if (size) + break; + } + *pos = n; + return size; + } + + list_for_each_entry_safe_from((*pos), n, head, list) { + efi_pstore_scan_sysfs_enter((*pos), n, head); + + size = efi_pstore_read_func((*pos), data); + efi_pstore_scan_sysfs_exit((*pos), n, head, size < 0); + if (size) + break; + } + *pos = n; + return size; +} + +/** + * efi_pstore_read + * + * This function returns a size of NVRAM entry logged via efi_pstore_write(). + * The meaning and behavior of efi_pstore/pstore are as below. + * + * size > 0: Got data of an entry logged via efi_pstore_write() successfully, + * and pstore filesystem will continue reading subsequent entries. + * size == 0: Entry was not logged via efi_pstore_write(), + * and efi_pstore driver will continue reading subsequent entries. + * size < 0: Failed to get data of entry logging via efi_pstore_write(), + * and pstore will stop reading entry. + */ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, int *count, struct timespec *timespec, char **buf, bool *compressed, struct pstore_info *psi) { struct pstore_read_data data; + ssize_t size; data.id = id; data.type = type; @@ -112,8 +222,17 @@ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, data.compressed = compressed; data.buf = buf; - return __efivar_entry_iter(efi_pstore_read_func, &efivar_sysfs_list, &data, - (struct efivar_entry **)&psi->data); + *data.buf = kzalloc(EFIVARS_DATA_SIZE_MAX, GFP_KERNEL); + if (!*data.buf) + return -ENOMEM; + + efivar_entry_iter_begin(); + size = efi_pstore_sysfs_entry_iter(&data, + (struct efivar_entry **)&psi->data); + efivar_entry_iter_end(); + if (size <= 0) + kfree(*data.buf); + return size; } static int efi_pstore_write(enum pstore_type_id type, @@ -184,9 +303,17 @@ static int efi_pstore_erase_func(struct efivar_entry *entry, void *data) return 0; } + if (entry->scanning) { + /* + * Skip deletion because this entry will be deleted + * after scanning is completed. + */ + entry->deleting = true; + } else + list_del(&entry->list); + /* found */ __efivar_entry_delete(entry); - list_del(&entry->list); return 1; } @@ -199,14 +326,16 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count, char name[DUMP_NAME_LEN]; efi_char16_t efi_name[DUMP_NAME_LEN]; int found, i; + unsigned int part; - sprintf(name, "dump-type%u-%u-%d-%lu", type, (unsigned int)id, count, - time.tv_sec); + do_div(id, 1000); + part = do_div(id, 100); + sprintf(name, "dump-type%u-%u-%d-%lu", type, part, count, time.tv_sec); for (i = 0; i < DUMP_NAME_LEN; i++) efi_name[i] = name[i]; - edata.id = id; + edata.id = part; edata.type = type; edata.count = count; edata.time = time; @@ -214,10 +343,12 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count, efivar_entry_iter_begin(); found = __efivar_entry_iter(efi_pstore_erase_func, &efivar_sysfs_list, &edata, &entry); - efivar_entry_iter_end(); - if (found) + if (found && !entry->scanning) { + efivar_entry_iter_end(); efivar_unregister(entry); + } else + efivar_entry_iter_end(); return 0; } diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c index 933eb027d527..3dc248239197 100644 --- a/drivers/firmware/efi/efivars.c +++ b/drivers/firmware/efi/efivars.c @@ -383,12 +383,16 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj, else if (__efivar_entry_delete(entry)) err = -EIO; - efivar_entry_iter_end(); - - if (err) + if (err) { + efivar_entry_iter_end(); return err; + } - efivar_unregister(entry); + if (!entry->scanning) { + efivar_entry_iter_end(); + efivar_unregister(entry); + } else + efivar_entry_iter_end(); /* It's dead Jim.... */ return count; diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c index 391c67b182d9..b22659cccca4 100644 --- a/drivers/firmware/efi/vars.c +++ b/drivers/firmware/efi/vars.c @@ -683,8 +683,16 @@ struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid, if (!found) return NULL; - if (remove) - list_del(&entry->list); + if (remove) { + if (entry->scanning) { + /* + * The entry will be deleted + * after scanning is completed. + */ + entry->deleting = true; + } else + list_del(&entry->list); + } return entry; } diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c index 72c927dc3be1..54c18c220a60 100644 --- a/drivers/gpio/gpio-bcm-kona.c +++ b/drivers/gpio/gpio-bcm-kona.c @@ -158,7 +158,7 @@ static int bcm_kona_gpio_get(struct gpio_chip *chip, unsigned gpio) spin_unlock_irqrestore(&kona_gpio->lock, flags); /* return the specified bit status */ - return !!(val & bit); + return !!(val & BIT(bit)); } static int bcm_kona_gpio_direction_input(struct gpio_chip *chip, unsigned gpio) diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c index 8847adf392b7..84be70157ad6 100644 --- a/drivers/gpio/gpio-davinci.c +++ b/drivers/gpio/gpio-davinci.c @@ -327,7 +327,7 @@ static int gpio_to_irq_unbanked(struct gpio_chip *chip, unsigned offset) * NOTE: we assume for now that only irqs in the first gpio_chip * can provide direct-mapped IRQs to AINTC (up to 32 GPIOs). */ - if (offset < d->irq_base) + if (offset < d->gpio_unbanked) return d->gpio_irq + offset; else return -ENODEV; @@ -419,6 +419,8 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev) /* pass "bank 0" GPIO IRQs to AINTC */ chips[0].chip.to_irq = gpio_to_irq_unbanked; + chips[0].gpio_irq = bank_irq; + chips[0].gpio_unbanked = pdata->gpio_unbanked; binten = BIT(0); /* AINTC handles mask/unmask; GPIO handles triggering */ diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c index 914e859e3eda..d7d6d72eba33 100644 --- a/drivers/gpio/gpio-mpc8xxx.c +++ b/drivers/gpio/gpio-mpc8xxx.c @@ -70,10 +70,14 @@ static int mpc8572_gpio_get(struct gpio_chip *gc, unsigned int gpio) u32 val; struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); + u32 out_mask, out_shadow; - val = in_be32(mm->regs + GPIO_DAT) & ~in_be32(mm->regs + GPIO_DIR); + out_mask = in_be32(mm->regs + GPIO_DIR); - return (val | mpc8xxx_gc->data) & mpc8xxx_gpio2mask(gpio); + val = in_be32(mm->regs + GPIO_DAT) & ~out_mask; + out_shadow = mpc8xxx_gc->data & out_mask; + + return (val | out_shadow) & mpc8xxx_gpio2mask(gpio); } static int mpc8xxx_gpio_get(struct gpio_chip *gc, unsigned int gpio) diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c index f7a0cc4da950..7b37300973db 100644 --- a/drivers/gpio/gpio-msm-v2.c +++ b/drivers/gpio/gpio-msm-v2.c @@ -102,7 +102,7 @@ struct msm_gpio_dev { DECLARE_BITMAP(wake_irqs, MAX_NR_GPIO); DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO); struct irq_domain *domain; - unsigned int summary_irq; + int summary_irq; void __iomem *msm_tlmm_base; }; diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index 3c3321f94053..db3129043e63 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c @@ -79,7 +79,7 @@ struct mvebu_gpio_chip { spinlock_t lock; void __iomem *membase; void __iomem *percpu_membase; - unsigned int irqbase; + int irqbase; struct irq_domain *domain; int soc_variant; }; diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c index f22f7f3e2e53..b4d42112d02d 100644 --- a/drivers/gpio/gpio-pl061.c +++ b/drivers/gpio/gpio-pl061.c @@ -286,11 +286,6 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id) if (!chip->base) return -ENOMEM; - chip->domain = irq_domain_add_simple(adev->dev.of_node, PL061_GPIO_NR, - irq_base, &pl061_domain_ops, chip); - if (!chip->domain) - return -ENODEV; - spin_lock_init(&chip->lock); chip->gc.request = pl061_gpio_request; @@ -320,6 +315,11 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id) irq_set_chained_handler(irq, pl061_irq_handler); irq_set_handler_data(irq, chip); + chip->domain = irq_domain_add_simple(adev->dev.of_node, PL061_GPIO_NR, + irq_base, &pl061_domain_ops, chip); + if (!chip->domain) + return -ENODEV; + for (i = 0; i < PL061_GPIO_NR; i++) { if (pdata) { if (pdata->directions & (1 << i)) diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c index d3f15ae93bd3..fe088a30567a 100644 --- a/drivers/gpio/gpio-rcar.c +++ b/drivers/gpio/gpio-rcar.c @@ -381,7 +381,7 @@ static int gpio_rcar_probe(struct platform_device *pdev) if (!p->irq_domain) { ret = -ENXIO; dev_err(&pdev->dev, "cannot initialize irq domain\n"); - goto err1; + goto err0; } if (devm_request_irq(&pdev->dev, irq->start, diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c index 0502b9a041a5..da071ddbad99 100644 --- a/drivers/gpio/gpio-tb10x.c +++ b/drivers/gpio/gpio-tb10x.c @@ -132,6 +132,7 @@ static int tb10x_gpio_direction_out(struct gpio_chip *chip, int mask = BIT(offset); int val = TB10X_GPIO_DIR_OUT << offset; + tb10x_gpio_set(chip, offset, value); tb10x_set_bits(tb10x_gpio, OFFSET_TO_REG_DDR, mask, val); return 0; diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c index 0c7e891c8651..b97d6a6577b9 100644 --- a/drivers/gpio/gpio-twl4030.c +++ b/drivers/gpio/gpio-twl4030.c @@ -354,17 +354,18 @@ static void twl_set(struct gpio_chip *chip, unsigned offset, int value) static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value) { struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip); + int ret = -EINVAL; mutex_lock(&priv->mutex); if (offset < TWL4030_GPIO_MAX) - twl4030_set_gpio_dataout(offset, value); + ret = twl4030_set_gpio_direction(offset, 0); priv->direction |= BIT(offset); mutex_unlock(&priv->mutex); twl_set(chip, offset, value); - return 0; + return ret; } static int twl_to_irq(struct gpio_chip *chip, unsigned offset) @@ -435,7 +436,8 @@ static int gpio_twl4030_debounce(u32 debounce, u8 mmc_cd) static int gpio_twl4030_remove(struct platform_device *pdev); -static struct twl4030_gpio_platform_data *of_gpio_twl4030(struct device *dev) +static struct twl4030_gpio_platform_data *of_gpio_twl4030(struct device *dev, + struct twl4030_gpio_platform_data *pdata) { struct twl4030_gpio_platform_data *omap_twl_info; @@ -443,6 +445,9 @@ static struct twl4030_gpio_platform_data *of_gpio_twl4030(struct device *dev) if (!omap_twl_info) return NULL; + if (pdata) + *omap_twl_info = *pdata; + omap_twl_info->use_leds = of_property_read_bool(dev->of_node, "ti,use-leds"); @@ -500,7 +505,7 @@ no_irqs: mutex_init(&priv->mutex); if (node) - pdata = of_gpio_twl4030(&pdev->dev); + pdata = of_gpio_twl4030(&pdev->dev, pdata); if (pdata == NULL) { dev_err(&pdev->dev, "Platform data is missing\n"); diff --git a/drivers/gpio/gpio-ucb1400.c b/drivers/gpio/gpio-ucb1400.c index 1a605f2a0f55..06fb5cf99ded 100644 --- a/drivers/gpio/gpio-ucb1400.c +++ b/drivers/gpio/gpio-ucb1400.c @@ -105,3 +105,4 @@ module_platform_driver(ucb1400_gpio_driver); MODULE_DESCRIPTION("Philips UCB1400 GPIO driver"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:ucb1400_gpio"); diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 7dd446150294..85f772c0b26a 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -13,6 +13,8 @@ #include <linux/acpi_gpio.h> #include <linux/idr.h> #include <linux/slab.h> +#include <linux/acpi.h> +#include <linux/gpio/driver.h> #define CREATE_TRACE_POINTS #include <trace/events/gpio.h> @@ -1307,6 +1309,18 @@ struct gpio_chip *gpiochip_find(void *data, } EXPORT_SYMBOL_GPL(gpiochip_find); +static int gpiochip_match_name(struct gpio_chip *chip, void *data) +{ + const char *name = data; + + return !strcmp(chip->label, name); +} + +static struct gpio_chip *find_chip_by_name(const char *name) +{ + return gpiochip_find((void *)name, gpiochip_match_name); +} + #ifdef CONFIG_PINCTRL /** @@ -1340,8 +1354,10 @@ int gpiochip_add_pingroup_range(struct gpio_chip *chip, ret = pinctrl_get_group_pins(pctldev, pin_group, &pin_range->range.pins, &pin_range->range.npins); - if (ret < 0) + if (ret < 0) { + kfree(pin_range); return ret; + } pinctrl_add_gpio_range(pctldev, &pin_range->range); @@ -2259,26 +2275,10 @@ void gpiod_add_table(struct gpiod_lookup *table, size_t size) mutex_unlock(&gpio_lookup_lock); } -/* - * Caller must have a acquired gpio_lookup_lock - */ -static struct gpio_chip *find_chip_by_name(const char *name) -{ - struct gpio_chip *chip = NULL; - - list_for_each_entry(chip, &gpio_lookup_list, list) { - if (chip->label == NULL) - continue; - if (!strcmp(chip->label, name)) - break; - } - - return chip; -} - #ifdef CONFIG_OF static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id, - unsigned int idx, unsigned long *flags) + unsigned int idx, + enum gpio_lookup_flags *flags) { char prop_name[32]; /* 32 is max size of property name */ enum of_gpio_flags of_flags; @@ -2296,20 +2296,22 @@ static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id, return desc; if (of_flags & OF_GPIO_ACTIVE_LOW) - *flags |= GPIOF_ACTIVE_LOW; + *flags |= GPIO_ACTIVE_LOW; return desc; } #else static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id, - unsigned int idx, unsigned long *flags) + unsigned int idx, + enum gpio_lookup_flags *flags) { return ERR_PTR(-ENODEV); } #endif static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id, - unsigned int idx, unsigned long *flags) + unsigned int idx, + enum gpio_lookup_flags *flags) { struct acpi_gpio_info info; struct gpio_desc *desc; @@ -2319,13 +2321,14 @@ static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id, return desc; if (info.gpioint && info.active_low) - *flags |= GPIOF_ACTIVE_LOW; + *flags |= GPIO_ACTIVE_LOW; return desc; } static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id, - unsigned int idx, unsigned long *flags) + unsigned int idx, + enum gpio_lookup_flags *flags) { const char *dev_id = dev ? dev_name(dev) : NULL; struct gpio_desc *desc = ERR_PTR(-ENODEV); @@ -2365,7 +2368,7 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id, continue; } - if (chip->ngpio >= p->chip_hwnum) { + if (chip->ngpio <= p->chip_hwnum) { dev_warn(dev, "GPIO chip %s has %d GPIOs\n", chip->label, chip->ngpio); continue; @@ -2415,9 +2418,9 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev, const char *con_id, unsigned int idx) { - struct gpio_desc *desc; + struct gpio_desc *desc = NULL; int status; - unsigned long flags = 0; + enum gpio_lookup_flags flags = 0; dev_dbg(dev, "GPIO lookup for consumer %s\n", con_id); @@ -2428,13 +2431,23 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev, } else if (IS_ENABLED(CONFIG_ACPI) && dev && ACPI_HANDLE(dev)) { dev_dbg(dev, "using ACPI for GPIO lookup\n"); desc = acpi_find_gpio(dev, con_id, idx, &flags); - } else { + } + + /* + * Either we are not using DT or ACPI, or their lookup did not return + * a result. In that case, use platform lookup as a fallback. + */ + if (!desc || IS_ERR(desc)) { + struct gpio_desc *pdesc; dev_dbg(dev, "using lookup tables for GPIO lookup"); - desc = gpiod_find(dev, con_id, idx, &flags); + pdesc = gpiod_find(dev, con_id, idx, &flags); + /* If used as fallback, do not replace the previous error */ + if (!IS_ERR(pdesc) || !desc) + desc = pdesc; } if (IS_ERR(desc)) { - dev_warn(dev, "lookup for GPIO %s failed\n", con_id); + dev_dbg(dev, "lookup for GPIO %s failed\n", con_id); return desc; } @@ -2443,8 +2456,12 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev, if (status < 0) return ERR_PTR(status); - if (flags & GPIOF_ACTIVE_LOW) + if (flags & GPIO_ACTIVE_LOW) set_bit(FLAG_ACTIVE_LOW, &desc->flags); + if (flags & GPIO_OPEN_DRAIN) + set_bit(FLAG_OPEN_DRAIN, &desc->flags); + if (flags & GPIO_OPEN_SOURCE) + set_bit(FLAG_OPEN_SOURCE, &desc->flags); return desc; } diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index fb7cf0e796f6..0a1e4a5f4234 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -2674,7 +2674,7 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure, int modes = 0; u8 cea_mode; - if (video_db == NULL || video_index > video_len) + if (video_db == NULL || video_index >= video_len) return 0; /* CEA modes are numbered 1..127 */ @@ -2701,7 +2701,7 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure, if (structure & (1 << 8)) { newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]); if (newmode) { - newmode->flags = DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF; + newmode->flags |= DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF; drm_mode_probed_add(connector, newmode); modes++; } diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 1a35ea53106b..c22c3097c3e8 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -489,6 +489,11 @@ void drm_sysfs_hotplug_event(struct drm_device *dev) } EXPORT_SYMBOL(drm_sysfs_hotplug_event); +static void drm_sysfs_release(struct device *dev) +{ + kfree(dev); +} + /** * drm_sysfs_device_add - adds a class device to sysfs for a character driver * @dev: DRM device to be added @@ -501,6 +506,7 @@ EXPORT_SYMBOL(drm_sysfs_hotplug_event); int drm_sysfs_device_add(struct drm_minor *minor) { char *minor_str; + int r; if (minor->type == DRM_MINOR_CONTROL) minor_str = "controlD%d"; @@ -509,14 +515,34 @@ int drm_sysfs_device_add(struct drm_minor *minor) else minor_str = "card%d"; - minor->kdev = device_create(drm_class, minor->dev->dev, - MKDEV(DRM_MAJOR, minor->index), - minor, minor_str, minor->index); - if (IS_ERR(minor->kdev)) { - DRM_ERROR("device create failed %ld\n", PTR_ERR(minor->kdev)); - return PTR_ERR(minor->kdev); + minor->kdev = kzalloc(sizeof(*minor->kdev), GFP_KERNEL); + if (!minor->kdev) { + r = -ENOMEM; + goto error; } + + device_initialize(minor->kdev); + minor->kdev->devt = MKDEV(DRM_MAJOR, minor->index); + minor->kdev->class = drm_class; + minor->kdev->type = &drm_sysfs_device_minor; + minor->kdev->parent = minor->dev->dev; + minor->kdev->release = drm_sysfs_release; + dev_set_drvdata(minor->kdev, minor); + + r = dev_set_name(minor->kdev, minor_str, minor->index); + if (r < 0) + goto error; + + r = device_add(minor->kdev); + if (r < 0) + goto error; + return 0; + +error: + DRM_ERROR("device create failed %d\n", r); + put_device(minor->kdev); + return r; } /** @@ -529,7 +555,7 @@ int drm_sysfs_device_add(struct drm_minor *minor) void drm_sysfs_device_remove(struct drm_minor *minor) { if (minor->kdev) - device_destroy(drm_class, MKDEV(DRM_MAJOR, minor->index)); + device_unregister(minor->kdev); minor->kdev = NULL; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index b676006a95a0..22b8f5eced80 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -173,28 +173,37 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) static void exynos_drm_preclose(struct drm_device *dev, struct drm_file *file) { + exynos_drm_subdrv_close(dev, file); +} + +static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) +{ struct exynos_drm_private *private = dev->dev_private; - struct drm_pending_vblank_event *e, *t; + struct drm_pending_vblank_event *v, *vt; + struct drm_pending_event *e, *et; unsigned long flags; - /* release events of current file */ + if (!file->driver_priv) + return; + + /* Release all events not unhandled by page flip handler. */ spin_lock_irqsave(&dev->event_lock, flags); - list_for_each_entry_safe(e, t, &private->pageflip_event_list, + list_for_each_entry_safe(v, vt, &private->pageflip_event_list, base.link) { - if (e->base.file_priv == file) { - list_del(&e->base.link); - e->base.destroy(&e->base); + if (v->base.file_priv == file) { + list_del(&v->base.link); + drm_vblank_put(dev, v->pipe); + v->base.destroy(&v->base); } } - spin_unlock_irqrestore(&dev->event_lock, flags); - exynos_drm_subdrv_close(dev, file); -} + /* Release all events handled by page flip handler but not freed. */ + list_for_each_entry_safe(e, et, &file->event_list, link) { + list_del(&e->link); + e->destroy(e); + } + spin_unlock_irqrestore(&dev->event_lock, flags); -static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) -{ - if (!file->driver_priv) - return; kfree(file->driver_priv); file->driver_priv = NULL; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 23da72b5eae9..a61878bf5dcd 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -31,7 +31,7 @@ #include "exynos_drm_iommu.h" /* - * FIMD is stand for Fully Interactive Mobile Display and + * FIMD stands for Fully Interactive Mobile Display and * as a display controller, it transfers contents drawn on memory * to a LCD Panel through Display Interfaces such as RGB or * CPU Interface. diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 3271fd4b1724..7bccedca487a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -383,6 +383,8 @@ out: g2d_userptr->npages, g2d_userptr->vma); + exynos_gem_put_vma(g2d_userptr->vma); + if (!g2d_userptr->out_of_list) list_del_init(&g2d_userptr->list); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 989be12cdd6e..2e367a1c6a64 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -534,8 +534,10 @@ static int i915_drm_freeze(struct drm_device *dev) * Disable CRTCs directly since we want to preserve sw state * for _thaw. */ + mutex_lock(&dev->mode_config.mutex); list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) dev_priv->display.crtc_disable(crtc); + mutex_unlock(&dev->mode_config.mutex); intel_modeset_suspend_hw(dev); } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 8600c315b4c4..ccdbecca070d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1816,6 +1816,7 @@ struct drm_i915_file_private { #define HAS_POWER_WELL(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev)) #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) #define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev)) +#define HAS_PC8(dev) (IS_HASWELL(dev)) /* XXX HSW:ULX */ #define INTEL_PCH_DEVICE_ID_MASK 0xff00 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 12bbd5eac70d..621c7c67a643 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4442,10 +4442,9 @@ i915_gem_init_hw(struct drm_device *dev) if (dev_priv->ellc_size) I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); - if (IS_HSW_GT3(dev)) - I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_ENABLED); - else - I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_DISABLED); + if (IS_HASWELL(dev)) + I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ? + LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); if (HAS_PCH_NOP(dev)) { u32 temp = I915_READ(GEN7_MSG_CTL); diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 7d5752fda5f1..9bb533e0d762 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -125,13 +125,15 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) ret = i915_gem_object_get_pages(obj); if (ret) - goto error; + goto err; + + i915_gem_object_pin_pages(obj); ret = -ENOMEM; pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); if (pages == NULL) - goto error; + goto err_unpin; i = 0; for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) @@ -141,15 +143,16 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) drm_free_large(pages); if (!obj->dma_buf_vmapping) - goto error; + goto err_unpin; obj->vmapping_count = 1; - i915_gem_object_pin_pages(obj); out_unlock: mutex_unlock(&dev->struct_mutex); return obj->dma_buf_vmapping; -error: +err_unpin: + i915_gem_object_unpin_pages(obj); +err: mutex_unlock(&dev->struct_mutex); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 885d595e0e02..b7e787fb4649 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -33,6 +33,9 @@ #include "intel_drv.h" #include <linux/dma_remapping.h> +#define __EXEC_OBJECT_HAS_PIN (1<<31) +#define __EXEC_OBJECT_HAS_FENCE (1<<30) + struct eb_vmas { struct list_head vmas; int and; @@ -187,7 +190,28 @@ static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle) } } -static void eb_destroy(struct eb_vmas *eb) { +static void +i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma) +{ + struct drm_i915_gem_exec_object2 *entry; + struct drm_i915_gem_object *obj = vma->obj; + + if (!drm_mm_node_allocated(&vma->node)) + return; + + entry = vma->exec_entry; + + if (entry->flags & __EXEC_OBJECT_HAS_FENCE) + i915_gem_object_unpin_fence(obj); + + if (entry->flags & __EXEC_OBJECT_HAS_PIN) + i915_gem_object_unpin(obj); + + entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN); +} + +static void eb_destroy(struct eb_vmas *eb) +{ while (!list_empty(&eb->vmas)) { struct i915_vma *vma; @@ -195,6 +219,7 @@ static void eb_destroy(struct eb_vmas *eb) { struct i915_vma, exec_list); list_del_init(&vma->exec_list); + i915_gem_execbuffer_unreserve_vma(vma); drm_gem_object_unreference(&vma->obj->base); } kfree(eb); @@ -478,9 +503,6 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb, return ret; } -#define __EXEC_OBJECT_HAS_PIN (1<<31) -#define __EXEC_OBJECT_HAS_FENCE (1<<30) - static int need_reloc_mappable(struct i915_vma *vma) { @@ -552,26 +574,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, return 0; } -static void -i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma) -{ - struct drm_i915_gem_exec_object2 *entry; - struct drm_i915_gem_object *obj = vma->obj; - - if (!drm_mm_node_allocated(&vma->node)) - return; - - entry = vma->exec_entry; - - if (entry->flags & __EXEC_OBJECT_HAS_FENCE) - i915_gem_object_unpin_fence(obj); - - if (entry->flags & __EXEC_OBJECT_HAS_PIN) - i915_gem_object_unpin(obj); - - entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN); -} - static int i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, struct list_head *vmas, @@ -670,13 +672,14 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, goto err; } -err: /* Decrement pin count for bound objects */ - list_for_each_entry(vma, vmas, exec_list) - i915_gem_execbuffer_unreserve_vma(vma); - +err: if (ret != -ENOSPC || retry++) return ret; + /* Decrement pin count for bound objects */ + list_for_each_entry(vma, vmas, exec_list) + i915_gem_execbuffer_unreserve_vma(vma); + ret = i915_gem_evict_vm(vm, true); if (ret) return ret; @@ -708,6 +711,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, while (!list_empty(&eb->vmas)) { vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list); list_del_init(&vma->exec_list); + i915_gem_execbuffer_unreserve_vma(vma); drm_gem_object_unreference(&vma->obj->base); } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 3620a1b0a73c..38cb8d44a013 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -57,7 +57,9 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t; #define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2) #define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) #define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) +#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8) #define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) +#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7) #define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t)) #define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t)) @@ -185,10 +187,10 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, case I915_CACHE_NONE: break; case I915_CACHE_WT: - pte |= HSW_WT_ELLC_LLC_AGE0; + pte |= HSW_WT_ELLC_LLC_AGE3; break; default: - pte |= HSW_WB_ELLC_LLC_AGE0; + pte |= HSW_WB_ELLC_LLC_AGE3; break; } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index f9eafb6ed523..ee2742122a02 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -235,6 +235,7 @@ */ #define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) #define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1) +#define MI_SRM_LRM_GLOBAL_GTT (1<<22) #define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ #define MI_FLUSH_DW_STORE_INDEX (1<<21) #define MI_INVALIDATE_TLB (1<<18) diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c index 43959edd4291..dfff0907f70e 100644 --- a/drivers/gpu/drm/i915/intel_acpi.c +++ b/drivers/gpu/drm/i915/intel_acpi.c @@ -196,7 +196,7 @@ static bool intel_dsm_pci_probe(struct pci_dev *pdev) acpi_handle dhandle; int ret; - dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); + dhandle = ACPI_HANDLE(&pdev->dev); if (!dhandle) return false; diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 6dd622d733b9..e4fba39631a5 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -790,7 +790,12 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) /* Default to using SSC */ dev_priv->vbt.lvds_use_ssc = 1; - dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1); + /* + * Core/SandyBridge/IvyBridge use alternative (120MHz) reference + * clock for LVDS. + */ + dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, + !HAS_PCH_SPLIT(dev)); DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq); for (port = PORT_A; port < I915_MAX_PORTS; port++) { diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 1591576a6101..526c8ded16b0 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -173,7 +173,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port) ddi_translations = ddi_translations_dp; break; case PORT_D: - if (intel_dpd_is_edp(dev)) + if (intel_dp_is_edp(dev, PORT_D)) ddi_translations = ddi_translations_edp; else ddi_translations = ddi_translations_dp; @@ -1158,9 +1158,10 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder) if (wait) intel_wait_ddi_buf_idle(dev_priv, port); - if (type == INTEL_OUTPUT_EDP) { + if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); ironlake_edp_panel_vdd_on(intel_dp); + intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); ironlake_edp_panel_off(intel_dp); } @@ -1406,6 +1407,26 @@ void intel_ddi_get_config(struct intel_encoder *encoder, default: break; } + + if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp && + pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) { + /* + * This is a big fat ugly hack. + * + * Some machines in UEFI boot mode provide us a VBT that has 18 + * bpp and 1.62 GHz link bandwidth for eDP, which for reasons + * unknown we fail to light up. Yet the same BIOS boots up with + * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as + * max, not what it tells us to use. + * + * Note: This will still be broken if the eDP panel is not lit + * up by the BIOS, and thus we can't get the mode at module + * load. + */ + DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", + pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp); + dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp; + } } static void intel_ddi_destroy(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3cddd508d110..080f6fd4e839 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5815,7 +5815,7 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc) uint16_t postoff = 0; if (intel_crtc->config.limited_color_range) - postoff = (16 * (1 << 13) / 255) & 0x1fff; + postoff = (16 * (1 << 12) / 255) & 0x1fff; I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff); @@ -6402,7 +6402,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) /* Make sure we're not on PC8 state before disabling PC8, otherwise * we'll hang the machine! */ - dev_priv->uncore.funcs.force_wake_get(dev_priv); + gen6_gt_force_wake_get(dev_priv); if (val & LCPLL_POWER_DOWN_ALLOW) { val &= ~LCPLL_POWER_DOWN_ALLOW; @@ -6436,7 +6436,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) DRM_ERROR("Switching back to LCPLL failed\n"); } - dev_priv->uncore.funcs.force_wake_put(dev_priv); + gen6_gt_force_wake_put(dev_priv); } void hsw_enable_pc8_work(struct work_struct *__work) @@ -6518,6 +6518,9 @@ static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv) void hsw_enable_package_c8(struct drm_i915_private *dev_priv) { + if (!HAS_PC8(dev_priv->dev)) + return; + mutex_lock(&dev_priv->pc8.lock); __hsw_enable_package_c8(dev_priv); mutex_unlock(&dev_priv->pc8.lock); @@ -6525,6 +6528,9 @@ void hsw_enable_package_c8(struct drm_i915_private *dev_priv) void hsw_disable_package_c8(struct drm_i915_private *dev_priv) { + if (!HAS_PC8(dev_priv->dev)) + return; + mutex_lock(&dev_priv->pc8.lock); __hsw_disable_package_c8(dev_priv); mutex_unlock(&dev_priv->pc8.lock); @@ -6562,6 +6568,9 @@ static void hsw_update_package_c8(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; bool allow; + if (!HAS_PC8(dev_priv->dev)) + return; + if (!i915_enable_pc8) return; @@ -6585,18 +6594,28 @@ done: static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv) { + if (!HAS_PC8(dev_priv->dev)) + return; + + mutex_lock(&dev_priv->pc8.lock); if (!dev_priv->pc8.gpu_idle) { dev_priv->pc8.gpu_idle = true; - hsw_enable_package_c8(dev_priv); + __hsw_enable_package_c8(dev_priv); } + mutex_unlock(&dev_priv->pc8.lock); } static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv) { + if (!HAS_PC8(dev_priv->dev)) + return; + + mutex_lock(&dev_priv->pc8.lock); if (dev_priv->pc8.gpu_idle) { dev_priv->pc8.gpu_idle = false; - hsw_disable_package_c8(dev_priv); + __hsw_disable_package_c8(dev_priv); } + mutex_unlock(&dev_priv->pc8.lock); } #define for_each_power_domain(domain, mask) \ @@ -7184,7 +7203,9 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) intel_crtc->cursor_visible = visible; } /* and commit changes on next vblank */ + POSTING_READ(CURCNTR(pipe)); I915_WRITE(CURBASE(pipe), base); + POSTING_READ(CURBASE(pipe)); } static void ivb_update_cursor(struct drm_crtc *crtc, u32 base) @@ -7213,7 +7234,9 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base) intel_crtc->cursor_visible = visible; } /* and commit changes on next vblank */ + POSTING_READ(CURCNTR_IVB(pipe)); I915_WRITE(CURBASE_IVB(pipe), base); + POSTING_READ(CURBASE_IVB(pipe)); } /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ @@ -8331,7 +8354,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev, intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | DERRMR_PIPEB_PRI_FLIP_DONE | DERRMR_PIPEC_PRI_FLIP_DONE)); - intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1)); + intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | + MI_SRM_LRM_GLOBAL_GTT); intel_ring_emit(ring, DERRMR); intel_ring_emit(ring, ring->scratch.gtt_offset + 256); } @@ -9248,8 +9272,7 @@ check_crtc_state(struct drm_device *dev) enum pipe pipe; if (encoder->base.crtc != &crtc->base) continue; - if (encoder->get_config && - encoder->get_hw_state(encoder, &pipe)) + if (encoder->get_hw_state(encoder, &pipe)) encoder->get_config(encoder, &pipe_config); } @@ -10027,7 +10050,7 @@ static void intel_setup_outputs(struct drm_device *dev) intel_ddi_init(dev, PORT_D); } else if (HAS_PCH_SPLIT(dev)) { int found; - dpd_is_edp = intel_dpd_is_edp(dev); + dpd_is_edp = intel_dp_is_edp(dev, PORT_D); if (has_edp_a(dev)) intel_dp_init(dev, DP_A, PORT_A); @@ -10064,8 +10087,7 @@ static void intel_setup_outputs(struct drm_device *dev) intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC, PORT_C); if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED) - intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, - PORT_C); + intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C); } intel_dsi_init(dev); @@ -10909,8 +10931,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) if (encoder->get_hw_state(encoder, &pipe)) { crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); encoder->base.crtc = &crtc->base; - if (encoder->get_config) - encoder->get_config(encoder, &crtc->config); + encoder->get_config(encoder, &crtc->config); } else { encoder->base.crtc = NULL; } diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index eb8139da9763..30c627c7b7ba 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1774,7 +1774,7 @@ static void intel_disable_dp(struct intel_encoder *encoder) * ensure that we have vdd while we switch off the panel. */ ironlake_edp_panel_vdd_on(intel_dp); ironlake_edp_backlight_off(intel_dp); - intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); + intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); ironlake_edp_panel_off(intel_dp); /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */ @@ -3326,11 +3326,19 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc) } /* check the VBT to see whether the eDP is on DP-D port */ -bool intel_dpd_is_edp(struct drm_device *dev) +bool intel_dp_is_edp(struct drm_device *dev, enum port port) { struct drm_i915_private *dev_priv = dev->dev_private; union child_device_config *p_child; int i; + static const short port_mapping[] = { + [PORT_B] = PORT_IDPB, + [PORT_C] = PORT_IDPC, + [PORT_D] = PORT_IDPD, + }; + + if (port == PORT_A) + return true; if (!dev_priv->vbt.child_dev_num) return false; @@ -3338,7 +3346,7 @@ bool intel_dpd_is_edp(struct drm_device *dev) for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { p_child = dev_priv->vbt.child_dev + i; - if (p_child->common.dvo_port == PORT_IDPD && + if (p_child->common.dvo_port == port_mapping[port] && (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) == (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS)) return true; @@ -3616,26 +3624,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, intel_dp->DP = I915_READ(intel_dp->output_reg); intel_dp->attached_connector = intel_connector; - type = DRM_MODE_CONNECTOR_DisplayPort; - /* - * FIXME : We need to initialize built-in panels before external panels. - * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup - */ - switch (port) { - case PORT_A: + if (intel_dp_is_edp(dev, port)) type = DRM_MODE_CONNECTOR_eDP; - break; - case PORT_C: - if (IS_VALLEYVIEW(dev)) - type = DRM_MODE_CONNECTOR_eDP; - break; - case PORT_D: - if (HAS_PCH_SPLIT(dev) && intel_dpd_is_edp(dev)) - type = DRM_MODE_CONNECTOR_eDP; - break; - default: /* silence GCC warning */ - break; - } + else + type = DRM_MODE_CONNECTOR_DisplayPort; /* * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 1e49aa8f5377..a18e88b3e425 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -708,7 +708,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder); void intel_dp_check_link_status(struct intel_dp *intel_dp); bool intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc_config *pipe_config); -bool intel_dpd_is_edp(struct drm_device *dev); +bool intel_dp_is_edp(struct drm_device *dev, enum port port); void ironlake_edp_backlight_on(struct intel_dp *intel_dp); void ironlake_edp_backlight_off(struct intel_dp *intel_dp); void ironlake_edp_panel_on(struct intel_dp *intel_dp); diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 1b2f41c3f191..6d69a9bad865 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -638,7 +638,7 @@ static void intel_didl_outputs(struct drm_device *dev) u32 temp; int i = 0; - handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); + handle = ACPI_HANDLE(&dev->pdev->dev); if (!handle || acpi_bus_get_device(handle, &acpi_dev)) return; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 0a07d7c9cafc..6e0d5e075b15 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -1180,7 +1180,7 @@ static bool g4x_compute_wm0(struct drm_device *dev, adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; clock = adjusted_mode->crtc_clock; - htotal = adjusted_mode->htotal; + htotal = adjusted_mode->crtc_htotal; hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; pixel_size = crtc->fb->bits_per_pixel / 8; @@ -1267,7 +1267,7 @@ static bool g4x_compute_srwm(struct drm_device *dev, crtc = intel_get_crtc_for_plane(dev, plane); adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; clock = adjusted_mode->crtc_clock; - htotal = adjusted_mode->htotal; + htotal = adjusted_mode->crtc_htotal; hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; pixel_size = crtc->fb->bits_per_pixel / 8; @@ -1498,7 +1498,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc) const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; int clock = adjusted_mode->crtc_clock; - int htotal = adjusted_mode->htotal; + int htotal = adjusted_mode->crtc_htotal; int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; int pixel_size = crtc->fb->bits_per_pixel / 8; unsigned long line_time_us; @@ -1624,8 +1624,8 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) const struct drm_display_mode *adjusted_mode = &to_intel_crtc(enabled)->config.adjusted_mode; int clock = adjusted_mode->crtc_clock; - int htotal = adjusted_mode->htotal; - int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; + int htotal = adjusted_mode->crtc_htotal; + int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w; int pixel_size = enabled->fb->bits_per_pixel / 8; unsigned long line_time_us; int entries; @@ -1776,7 +1776,7 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane, crtc = intel_get_crtc_for_plane(dev, plane); adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; clock = adjusted_mode->crtc_clock; - htotal = adjusted_mode->htotal; + htotal = adjusted_mode->crtc_htotal; hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; pixel_size = crtc->fb->bits_per_pixel / 8; @@ -2469,8 +2469,9 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc) /* The WM are computed with base on how long it takes to fill a single * row at the given clock rate, multiplied by 8. * */ - linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, mode->clock); - ips_linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, + linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8, + mode->crtc_clock); + ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8, intel_ddi_get_cdclk_freq(dev_priv)); return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) | @@ -3888,7 +3889,7 @@ static void gen6_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RC_SLEEP, 0); I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); - if (INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) + if (IS_IVYBRIDGE(dev)) I915_WRITE(GEN6_RC6_THRESHOLD, 125000); else I915_WRITE(GEN6_RC6_THRESHOLD, 50000); diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 18c406246a2d..22cf0f4ba248 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -902,6 +902,13 @@ intel_tv_mode_valid(struct drm_connector *connector, } +static void +intel_tv_get_config(struct intel_encoder *encoder, + struct intel_crtc_config *pipe_config) +{ + pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock; +} + static bool intel_tv_compute_config(struct intel_encoder *encoder, struct intel_crtc_config *pipe_config) @@ -1621,6 +1628,7 @@ intel_tv_init(struct drm_device *dev) DRM_MODE_ENCODER_TVDAC); intel_encoder->compute_config = intel_tv_compute_config; + intel_encoder->get_config = intel_tv_get_config; intel_encoder->mode_set = intel_tv_mode_set; intel_encoder->enable = intel_enable_tv; intel_encoder->disable = intel_disable_tv; diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index f9883ceff946..0b02078a0b84 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -217,6 +217,19 @@ static void gen6_force_wake_work(struct work_struct *work) spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } +static void intel_uncore_forcewake_reset(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (IS_VALLEYVIEW(dev)) { + vlv_force_wake_reset(dev_priv); + } else if (INTEL_INFO(dev)->gen >= 6) { + __gen6_gt_force_wake_reset(dev_priv); + if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) + __gen6_gt_force_wake_mt_reset(dev_priv); + } +} + void intel_uncore_early_sanitize(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -234,19 +247,8 @@ void intel_uncore_early_sanitize(struct drm_device *dev) dev_priv->ellc_size = 128; DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size); } -} -static void intel_uncore_forcewake_reset(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (IS_VALLEYVIEW(dev)) { - vlv_force_wake_reset(dev_priv); - } else if (INTEL_INFO(dev)->gen >= 6) { - __gen6_gt_force_wake_reset(dev_priv); - if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) - __gen6_gt_force_wake_mt_reset(dev_priv); - } + intel_uncore_forcewake_reset(dev); } void intel_uncore_sanitize(struct drm_device *dev) diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index edcf801613e6..b3fa1ba191b7 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile @@ -59,6 +59,7 @@ nouveau-y += core/subdev/clock/nv40.o nouveau-y += core/subdev/clock/nv50.o nouveau-y += core/subdev/clock/nv84.o nouveau-y += core/subdev/clock/nva3.o +nouveau-y += core/subdev/clock/nvaa.o nouveau-y += core/subdev/clock/nvc0.o nouveau-y += core/subdev/clock/nve0.o nouveau-y += core/subdev/clock/pllnv04.o diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c index db139827047c..db3fc7be856a 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c @@ -283,7 +283,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; - device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; @@ -311,7 +311,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; - device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c index 5f555788121c..e6352bd5b4ff 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c @@ -33,6 +33,7 @@ #include <engine/dmaobj.h> #include <engine/fifo.h> +#include "nv04.h" #include "nv50.h" /******************************************************************************* @@ -460,6 +461,8 @@ nv50_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, nv_subdev(priv)->intr = nv04_fifo_intr; nv_engine(priv)->cclass = &nv50_fifo_cclass; nv_engine(priv)->sclass = nv50_fifo_sclass; + priv->base.pause = nv04_fifo_pause; + priv->base.start = nv04_fifo_start; return 0; } diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c index 0908dc834c84..fe0f41e65d9b 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c @@ -35,6 +35,7 @@ #include <engine/dmaobj.h> #include <engine/fifo.h> +#include "nv04.h" #include "nv50.h" /******************************************************************************* @@ -432,6 +433,8 @@ nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, nv_subdev(priv)->intr = nv04_fifo_intr; nv_engine(priv)->cclass = &nv84_fifo_cclass; nv_engine(priv)->sclass = nv84_fifo_sclass; + priv->base.pause = nv04_fifo_pause; + priv->base.start = nv04_fifo_start; return 0; } diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c index b574dd4bb828..5ce686ee729e 100644 --- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c @@ -176,7 +176,7 @@ nv50_software_context_ctor(struct nouveau_object *parent, if (ret) return ret; - chan->vblank.nr_event = pdisp->vblank->index_nr; + chan->vblank.nr_event = pdisp ? pdisp->vblank->index_nr : 0; chan->vblank.event = kzalloc(chan->vblank.nr_event * sizeof(*chan->vblank.event), GFP_KERNEL); if (!chan->vblank.event) diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h index e2675bc0edba..8f4ced75444a 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h @@ -14,6 +14,9 @@ enum nv_clk_src { nv_clk_src_hclk, nv_clk_src_hclkm3, nv_clk_src_hclkm3d2, + nv_clk_src_hclkm2d3, /* NVAA */ + nv_clk_src_hclkm4, /* NVAA */ + nv_clk_src_cclk, /* NVAA */ nv_clk_src_host, @@ -127,6 +130,7 @@ extern struct nouveau_oclass nv04_clock_oclass; extern struct nouveau_oclass nv40_clock_oclass; extern struct nouveau_oclass *nv50_clock_oclass; extern struct nouveau_oclass *nv84_clock_oclass; +extern struct nouveau_oclass *nvaa_clock_oclass; extern struct nouveau_oclass nva3_clock_oclass; extern struct nouveau_oclass nvc0_clock_oclass; extern struct nouveau_oclass nve0_clock_oclass; diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c index da50c1b12928..30c1f3a4158e 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c @@ -69,6 +69,11 @@ nv04_clock_pll_prog(struct nouveau_clock *clk, u32 reg1, return 0; } +static struct nouveau_clocks +nv04_domain[] = { + { nv_clk_src_max } +}; + static int nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, @@ -77,7 +82,7 @@ nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nv04_clock_priv *priv; int ret; - ret = nouveau_clock_create(parent, engine, oclass, NULL, &priv); + ret = nouveau_clock_create(parent, engine, oclass, nv04_domain, &priv); *pobject = nv_object(priv); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c new file mode 100644 index 000000000000..7a723b4f564d --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c @@ -0,0 +1,445 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <engine/fifo.h> +#include <subdev/bios.h> +#include <subdev/bios/pll.h> +#include <subdev/timer.h> +#include <subdev/clock.h> + +#include "pll.h" + +struct nvaa_clock_priv { + struct nouveau_clock base; + enum nv_clk_src csrc, ssrc, vsrc; + u32 cctrl, sctrl; + u32 ccoef, scoef; + u32 cpost, spost; + u32 vdiv; +}; + +static u32 +read_div(struct nouveau_clock *clk) +{ + return nv_rd32(clk, 0x004600); +} + +static u32 +read_pll(struct nouveau_clock *clk, u32 base) +{ + u32 ctrl = nv_rd32(clk, base + 0); + u32 coef = nv_rd32(clk, base + 4); + u32 ref = clk->read(clk, nv_clk_src_href); + u32 post_div = 0; + u32 clock = 0; + int N1, M1; + + switch (base){ + case 0x4020: + post_div = 1 << ((nv_rd32(clk, 0x4070) & 0x000f0000) >> 16); + break; + case 0x4028: + post_div = (nv_rd32(clk, 0x4040) & 0x000f0000) >> 16; + break; + default: + break; + } + + N1 = (coef & 0x0000ff00) >> 8; + M1 = (coef & 0x000000ff); + if ((ctrl & 0x80000000) && M1) { + clock = ref * N1 / M1; + clock = clock / post_div; + } + + return clock; +} + +static int +nvaa_clock_read(struct nouveau_clock *clk, enum nv_clk_src src) +{ + struct nvaa_clock_priv *priv = (void *)clk; + u32 mast = nv_rd32(clk, 0x00c054); + u32 P = 0; + + switch (src) { + case nv_clk_src_crystal: + return nv_device(priv)->crystal; + case nv_clk_src_href: + return 100000; /* PCIE reference clock */ + case nv_clk_src_hclkm4: + return clk->read(clk, nv_clk_src_href) * 4; + case nv_clk_src_hclkm2d3: + return clk->read(clk, nv_clk_src_href) * 2 / 3; + case nv_clk_src_host: + switch (mast & 0x000c0000) { + case 0x00000000: return clk->read(clk, nv_clk_src_hclkm2d3); + case 0x00040000: break; + case 0x00080000: return clk->read(clk, nv_clk_src_hclkm4); + case 0x000c0000: return clk->read(clk, nv_clk_src_cclk); + } + break; + case nv_clk_src_core: + P = (nv_rd32(clk, 0x004028) & 0x00070000) >> 16; + + switch (mast & 0x00000003) { + case 0x00000000: return clk->read(clk, nv_clk_src_crystal) >> P; + case 0x00000001: return 0; + case 0x00000002: return clk->read(clk, nv_clk_src_hclkm4) >> P; + case 0x00000003: return read_pll(clk, 0x004028) >> P; + } + break; + case nv_clk_src_cclk: + if ((mast & 0x03000000) != 0x03000000) + return clk->read(clk, nv_clk_src_core); + + if ((mast & 0x00000200) == 0x00000000) + return clk->read(clk, nv_clk_src_core); + + switch (mast & 0x00000c00) { + case 0x00000000: return clk->read(clk, nv_clk_src_href); + case 0x00000400: return clk->read(clk, nv_clk_src_hclkm4); + case 0x00000800: return clk->read(clk, nv_clk_src_hclkm2d3); + default: return 0; + } + case nv_clk_src_shader: + P = (nv_rd32(clk, 0x004020) & 0x00070000) >> 16; + switch (mast & 0x00000030) { + case 0x00000000: + if (mast & 0x00000040) + return clk->read(clk, nv_clk_src_href) >> P; + return clk->read(clk, nv_clk_src_crystal) >> P; + case 0x00000010: break; + case 0x00000020: return read_pll(clk, 0x004028) >> P; + case 0x00000030: return read_pll(clk, 0x004020) >> P; + } + break; + case nv_clk_src_mem: + return 0; + break; + case nv_clk_src_vdec: + P = (read_div(clk) & 0x00000700) >> 8; + + switch (mast & 0x00400000) { + case 0x00400000: + return clk->read(clk, nv_clk_src_core) >> P; + break; + default: + return 500000 >> P; + break; + } + break; + default: + break; + } + + nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast); + return 0; +} + +static u32 +calc_pll(struct nvaa_clock_priv *priv, u32 reg, + u32 clock, int *N, int *M, int *P) +{ + struct nouveau_bios *bios = nouveau_bios(priv); + struct nvbios_pll pll; + struct nouveau_clock *clk = &priv->base; + int ret; + + ret = nvbios_pll_parse(bios, reg, &pll); + if (ret) + return 0; + + pll.vco2.max_freq = 0; + pll.refclk = clk->read(clk, nv_clk_src_href); + if (!pll.refclk) + return 0; + + return nv04_pll_calc(nv_subdev(priv), &pll, clock, N, M, NULL, NULL, P); +} + +static inline u32 +calc_P(u32 src, u32 target, int *div) +{ + u32 clk0 = src, clk1 = src; + for (*div = 0; *div <= 7; (*div)++) { + if (clk0 <= target) { + clk1 = clk0 << (*div ? 1 : 0); + break; + } + clk0 >>= 1; + } + + if (target - clk0 <= clk1 - target) + return clk0; + (*div)--; + return clk1; +} + +static int +nvaa_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate) +{ + struct nvaa_clock_priv *priv = (void *)clk; + const int shader = cstate->domain[nv_clk_src_shader]; + const int core = cstate->domain[nv_clk_src_core]; + const int vdec = cstate->domain[nv_clk_src_vdec]; + u32 out = 0, clock = 0; + int N, M, P1, P2 = 0; + int divs = 0; + + /* cclk: find suitable source, disable PLL if we can */ + if (core < clk->read(clk, nv_clk_src_hclkm4)) + out = calc_P(clk->read(clk, nv_clk_src_hclkm4), core, &divs); + + /* Calculate clock * 2, so shader clock can use it too */ + clock = calc_pll(priv, 0x4028, (core << 1), &N, &M, &P1); + + if (abs(core - out) <= + abs(core - (clock >> 1))) { + priv->csrc = nv_clk_src_hclkm4; + priv->cctrl = divs << 16; + } else { + /* NVCTRL is actually used _after_ NVPOST, and after what we + * call NVPLL. To make matters worse, NVPOST is an integer + * divider instead of a right-shift number. */ + if(P1 > 2) { + P2 = P1 - 2; + P1 = 2; + } + + priv->csrc = nv_clk_src_core; + priv->ccoef = (N << 8) | M; + + priv->cctrl = (P2 + 1) << 16; + priv->cpost = (1 << P1) << 16; + } + + /* sclk: nvpll + divisor, href or spll */ + out = 0; + if (shader == clk->read(clk, nv_clk_src_href)) { + priv->ssrc = nv_clk_src_href; + } else { + clock = calc_pll(priv, 0x4020, shader, &N, &M, &P1); + if (priv->csrc == nv_clk_src_core) { + out = calc_P((core << 1), shader, &divs); + } + + if (abs(shader - out) <= + abs(shader - clock) && + (divs + P2) <= 7) { + priv->ssrc = nv_clk_src_core; + priv->sctrl = (divs + P2) << 16; + } else { + priv->ssrc = nv_clk_src_shader; + priv->scoef = (N << 8) | M; + priv->sctrl = P1 << 16; + } + } + + /* vclk */ + out = calc_P(core, vdec, &divs); + clock = calc_P(500000, vdec, &P1); + if(abs(vdec - out) <= + abs(vdec - clock)) { + priv->vsrc = nv_clk_src_cclk; + priv->vdiv = divs << 16; + } else { + priv->vsrc = nv_clk_src_vdec; + priv->vdiv = P1 << 16; + } + + /* Print strategy! */ + nv_debug(priv, "nvpll: %08x %08x %08x\n", + priv->ccoef, priv->cpost, priv->cctrl); + nv_debug(priv, " spll: %08x %08x %08x\n", + priv->scoef, priv->spost, priv->sctrl); + nv_debug(priv, " vdiv: %08x\n", priv->vdiv); + if (priv->csrc == nv_clk_src_hclkm4) + nv_debug(priv, "core: hrefm4\n"); + else + nv_debug(priv, "core: nvpll\n"); + + if (priv->ssrc == nv_clk_src_hclkm4) + nv_debug(priv, "shader: hrefm4\n"); + else if (priv->ssrc == nv_clk_src_core) + nv_debug(priv, "shader: nvpll\n"); + else + nv_debug(priv, "shader: spll\n"); + + if (priv->vsrc == nv_clk_src_hclkm4) + nv_debug(priv, "vdec: 500MHz\n"); + else + nv_debug(priv, "vdec: core\n"); + + return 0; +} + +static int +nvaa_clock_prog(struct nouveau_clock *clk) +{ + struct nvaa_clock_priv *priv = (void *)clk; + struct nouveau_fifo *pfifo = nouveau_fifo(clk); + unsigned long flags; + u32 pllmask = 0, mast, ptherm_gate; + int ret = -EBUSY; + + /* halt and idle execution engines */ + ptherm_gate = nv_mask(clk, 0x020060, 0x00070000, 0x00000000); + nv_mask(clk, 0x002504, 0x00000001, 0x00000001); + /* Wait until the interrupt handler is finished */ + if (!nv_wait(clk, 0x000100, 0xffffffff, 0x00000000)) + goto resume; + + if (pfifo) + pfifo->pause(pfifo, &flags); + + if (!nv_wait(clk, 0x002504, 0x00000010, 0x00000010)) + goto resume; + if (!nv_wait(clk, 0x00251c, 0x0000003f, 0x0000003f)) + goto resume; + + /* First switch to safe clocks: href */ + mast = nv_mask(clk, 0xc054, 0x03400e70, 0x03400640); + mast &= ~0x00400e73; + mast |= 0x03000000; + + switch (priv->csrc) { + case nv_clk_src_hclkm4: + nv_mask(clk, 0x4028, 0x00070000, priv->cctrl); + mast |= 0x00000002; + break; + case nv_clk_src_core: + nv_wr32(clk, 0x402c, priv->ccoef); + nv_wr32(clk, 0x4028, 0x80000000 | priv->cctrl); + nv_wr32(clk, 0x4040, priv->cpost); + pllmask |= (0x3 << 8); + mast |= 0x00000003; + break; + default: + nv_warn(priv,"Reclocking failed: unknown core clock\n"); + goto resume; + } + + switch (priv->ssrc) { + case nv_clk_src_href: + nv_mask(clk, 0x4020, 0x00070000, 0x00000000); + /* mast |= 0x00000000; */ + break; + case nv_clk_src_core: + nv_mask(clk, 0x4020, 0x00070000, priv->sctrl); + mast |= 0x00000020; + break; + case nv_clk_src_shader: + nv_wr32(clk, 0x4024, priv->scoef); + nv_wr32(clk, 0x4020, 0x80000000 | priv->sctrl); + nv_wr32(clk, 0x4070, priv->spost); + pllmask |= (0x3 << 12); + mast |= 0x00000030; + break; + default: + nv_warn(priv,"Reclocking failed: unknown sclk clock\n"); + goto resume; + } + + if (!nv_wait(clk, 0x004080, pllmask, pllmask)) { + nv_warn(priv,"Reclocking failed: unstable PLLs\n"); + goto resume; + } + + switch (priv->vsrc) { + case nv_clk_src_cclk: + mast |= 0x00400000; + default: + nv_wr32(clk, 0x4600, priv->vdiv); + } + + nv_wr32(clk, 0xc054, mast); + ret = 0; + +resume: + if (pfifo) + pfifo->start(pfifo, &flags); + + nv_mask(clk, 0x002504, 0x00000001, 0x00000000); + nv_wr32(clk, 0x020060, ptherm_gate); + + /* Disable some PLLs and dividers when unused */ + if (priv->csrc != nv_clk_src_core) { + nv_wr32(clk, 0x4040, 0x00000000); + nv_mask(clk, 0x4028, 0x80000000, 0x00000000); + } + + if (priv->ssrc != nv_clk_src_shader) { + nv_wr32(clk, 0x4070, 0x00000000); + nv_mask(clk, 0x4020, 0x80000000, 0x00000000); + } + + return ret; +} + +static void +nvaa_clock_tidy(struct nouveau_clock *clk) +{ +} + +static struct nouveau_clocks +nvaa_domains[] = { + { nv_clk_src_crystal, 0xff }, + { nv_clk_src_href , 0xff }, + { nv_clk_src_core , 0xff, 0, "core", 1000 }, + { nv_clk_src_shader , 0xff, 0, "shader", 1000 }, + { nv_clk_src_vdec , 0xff, 0, "vdec", 1000 }, + { nv_clk_src_max } +}; + +static int +nvaa_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nvaa_clock_priv *priv; + int ret; + + ret = nouveau_clock_create(parent, engine, oclass, nvaa_domains, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.read = nvaa_clock_read; + priv->base.calc = nvaa_clock_calc; + priv->base.prog = nvaa_clock_prog; + priv->base.tidy = nvaa_clock_tidy; + return 0; +} + +struct nouveau_oclass * +nvaa_clock_oclass = &(struct nouveau_oclass) { + .handle = NV_SUBDEV(CLOCK, 0xaa), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nvaa_clock_ctor, + .dtor = _nouveau_clock_dtor, + .init = _nouveau_clock_init, + .fini = _nouveau_clock_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c index e286e132c7e7..129120473f6c 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c @@ -116,7 +116,7 @@ mxm_shadow_dsm(struct nouveau_mxm *mxm, u8 version) acpi_handle handle; int ret; - handle = DEVICE_ACPI_HANDLE(&device->pdev->dev); + handle = ACPI_HANDLE(&device->pdev->dev); if (!handle) return false; diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c index 3618ac6b6316..32e7064b819b 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c +++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c @@ -58,8 +58,8 @@ struct nouveau_plane { }; static uint32_t formats[] = { - DRM_FORMAT_NV12, DRM_FORMAT_UYVY, + DRM_FORMAT_NV12, }; /* Sine can be approximated with @@ -99,13 +99,28 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); struct nouveau_bo *cur = nv_plane->cur; bool flip = nv_plane->flip; - int format = ALIGN(src_w * 4, 0x100); int soff = NV_PCRTC0_SIZE * nv_crtc->index; int soff2 = NV_PCRTC0_SIZE * !nv_crtc->index; - int ret; + int format, ret; + + /* Source parameters given in 16.16 fixed point, ignore fractional. */ + src_x >>= 16; + src_y >>= 16; + src_w >>= 16; + src_h >>= 16; + + format = ALIGN(src_w * 4, 0x100); if (format > 0xffff) - return -EINVAL; + return -ERANGE; + + if (dev->chipset >= 0x30) { + if (crtc_w < (src_w >> 1) || crtc_h < (src_h >> 1)) + return -ERANGE; + } else { + if (crtc_w < (src_w >> 3) || crtc_h < (src_h >> 3)) + return -ERANGE; + } ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM); if (ret) @@ -113,12 +128,6 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, nv_plane->cur = nv_fb->nvbo; - /* Source parameters given in 16.16 fixed point, ignore fractional. */ - src_x = src_x >> 16; - src_y = src_y >> 16; - src_w = src_w >> 16; - src_h = src_h >> 16; - nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY); nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0); @@ -245,14 +254,25 @@ nv10_overlay_init(struct drm_device *device) { struct nouveau_device *dev = nouveau_dev(device); struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL); + int num_formats = ARRAY_SIZE(formats); int ret; if (!plane) return; + switch (dev->chipset) { + case 0x10: + case 0x11: + case 0x15: + case 0x1a: + case 0x20: + num_formats = 1; + break; + } + ret = drm_plane_init(device, &plane->base, 3 /* both crtc's */, &nv10_plane_funcs, - formats, ARRAY_SIZE(formats), false); + formats, num_formats, false); if (ret) goto err; diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 07273a2ae62f..95c740454049 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -256,7 +256,7 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev) acpi_handle dhandle; int retval = 0; - dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); + dhandle = ACPI_HANDLE(&pdev->dev); if (!dhandle) return false; @@ -414,7 +414,7 @@ bool nouveau_acpi_rom_supported(struct pci_dev *pdev) if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected) return false; - dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); + dhandle = ACPI_HANDLE(&pdev->dev); if (!dhandle) return false; @@ -448,7 +448,7 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) return NULL; } - handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); + handle = ACPI_HANDLE(&dev->pdev->dev); if (!handle) return NULL; diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 7809d92183c4..29c3efdfc7dd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -608,6 +608,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, fence = nouveau_fence_ref(new_bo->bo.sync_obj); spin_unlock(&new_bo->bo.bdev->fence_lock); ret = nouveau_fence_sync(fence, chan); + nouveau_fence_unref(&fence); if (ret) return ret; @@ -701,7 +702,7 @@ nouveau_finish_page_flip(struct nouveau_channel *chan, s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head); if (s->event) - drm_send_vblank_event(dev, -1, s->event); + drm_send_vblank_event(dev, s->crtc, s->event); list_del(&s->head); if (ps) diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c index 38a4db5bfe21..4aff04fa483c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c +++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c @@ -630,7 +630,6 @@ error: hwmon->hwmon = NULL; return ret; #else - hwmon->hwmon = NULL; return 0; #endif } diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index f8e66c08b11a..4e384a2f99c3 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -1265,7 +1265,7 @@ nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, uint32_t size) { struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); - u32 end = max(start + size, (u32)256); + u32 end = min_t(u32, start + size, 256); u32 i; for (i = start; i < end; i++) { diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index 0109a9644cb2..821ab7b9409b 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -92,6 +92,7 @@ qxl_release_free(struct qxl_device *qdev, - DRM_FILE_OFFSET); qxl_fence_remove_release(&bo->fence, release->id); qxl_bo_unref(&bo); + kfree(entry); } spin_lock(&qdev->release_idr_lock); idr_remove(&qdev->release_idr, release->id); diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c index deaf98cdca3a..f685035dbe39 100644 --- a/drivers/gpu/drm/radeon/atombios_i2c.c +++ b/drivers/gpu/drm/radeon/atombios_i2c.c @@ -44,7 +44,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan, PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction); unsigned char *base; - u16 out; + u16 out = cpu_to_le16(0); memset(&args, 0, sizeof(args)); @@ -55,9 +55,14 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan, DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num); return -EINVAL; } - args.ucRegIndex = buf[0]; - if (num > 1) - memcpy(&out, &buf[1], num - 1); + if (buf == NULL) + args.ucRegIndex = 0; + else + args.ucRegIndex = buf[0]; + if (num) + num--; + if (num) + memcpy(&out, &buf[1], num); args.lpI2CDataOut = cpu_to_le16(out); } else { if (num > ATOM_MAX_HW_I2C_READ) { @@ -94,14 +99,14 @@ int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap, struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); struct i2c_msg *p; int i, remaining, current_count, buffer_offset, max_bytes, ret; - u8 buf = 0, flags; + u8 flags; /* check for bus probe */ p = &msgs[0]; if ((num == 1) && (p->len == 0)) { ret = radeon_process_i2c_ch(i2c, p->addr, HW_I2C_WRITE, - &buf, 1); + NULL, 0); if (ret) return ret; else diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index ae92aa041c6a..b43a3a3c9067 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -1560,17 +1560,17 @@ u32 cik_get_xclk(struct radeon_device *rdev) * cik_mm_rdoorbell - read a doorbell dword * * @rdev: radeon_device pointer - * @offset: byte offset into the aperture + * @index: doorbell index * * Returns the value in the doorbell aperture at the - * requested offset (CIK). + * requested doorbell index (CIK). */ -u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset) +u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index) { - if (offset < rdev->doorbell.size) { - return readl(((void __iomem *)rdev->doorbell.ptr) + offset); + if (index < rdev->doorbell.num_doorbells) { + return readl(rdev->doorbell.ptr + index); } else { - DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", offset); + DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); return 0; } } @@ -1579,18 +1579,18 @@ u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset) * cik_mm_wdoorbell - write a doorbell dword * * @rdev: radeon_device pointer - * @offset: byte offset into the aperture + * @index: doorbell index * @v: value to write * * Writes @v to the doorbell aperture at the - * requested offset (CIK). + * requested doorbell index (CIK). */ -void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v) +void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v) { - if (offset < rdev->doorbell.size) { - writel(v, ((void __iomem *)rdev->doorbell.ptr) + offset); + if (index < rdev->doorbell.num_doorbells) { + writel(v, rdev->doorbell.ptr + index); } else { - DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", offset); + DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); } } @@ -2427,6 +2427,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev) gb_tile_moden = 0; break; } + rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden; WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden); } } else if (num_pipe_configs == 4) { @@ -2773,6 +2774,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev) gb_tile_moden = 0; break; } + rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden; WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden); } } else if (num_pipe_configs == 2) { @@ -2990,6 +2992,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev) gb_tile_moden = 0; break; } + rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden; WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden); } } else @@ -3556,17 +3559,24 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev, radeon_ring_write(ring, 0); } -void cik_semaphore_ring_emit(struct radeon_device *rdev, +bool cik_semaphore_ring_emit(struct radeon_device *rdev, struct radeon_ring *ring, struct radeon_semaphore *semaphore, bool emit_wait) { +/* TODO: figure out why semaphore cause lockups */ +#if 0 uint64_t addr = semaphore->gpu_addr; unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); radeon_ring_write(ring, addr & 0xffffffff); radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel); + + return true; +#else + return false; +#endif } /** @@ -3609,13 +3619,8 @@ int cik_copy_cpdma(struct radeon_device *rdev, return r; } - if (radeon_fence_need_sync(*fence, ring->idx)) { - radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, - ring->idx); - radeon_fence_note_sync(*fence, ring->idx); - } else { - radeon_semaphore_free(rdev, &sem, NULL); - } + radeon_semaphore_sync_to(sem, *fence); + radeon_semaphore_sync_rings(rdev, sem, ring->idx); for (i = 0; i < num_loops; i++) { cur_size_in_bytes = size_in_bytes; @@ -4052,7 +4057,7 @@ void cik_compute_ring_set_wptr(struct radeon_device *rdev, struct radeon_ring *ring) { rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(ring->wptr); - WDOORBELL32(ring->doorbell_offset, ring->wptr); + WDOORBELL32(ring->doorbell_index, ring->wptr); } /** @@ -4393,10 +4398,6 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) return r; } - /* doorbell offset */ - rdev->ring[idx].doorbell_offset = - (rdev->ring[idx].doorbell_page_num * PAGE_SIZE) + 0; - /* init the mqd struct */ memset(buf, 0, sizeof(struct bonaire_mqd)); @@ -4508,7 +4509,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) RREG32(CP_HQD_PQ_DOORBELL_CONTROL); mqd->queue_state.cp_hqd_pq_doorbell_control &= ~DOORBELL_OFFSET_MASK; mqd->queue_state.cp_hqd_pq_doorbell_control |= - DOORBELL_OFFSET(rdev->ring[idx].doorbell_offset / 4); + DOORBELL_OFFSET(rdev->ring[idx].doorbell_index); mqd->queue_state.cp_hqd_pq_doorbell_control |= DOORBELL_EN; mqd->queue_state.cp_hqd_pq_doorbell_control &= ~(DOORBELL_SOURCE | DOORBELL_HIT); @@ -7839,14 +7840,14 @@ int cik_init(struct radeon_device *rdev) ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; ring->ring_obj = NULL; r600_ring_init(rdev, ring, 1024 * 1024); - r = radeon_doorbell_get(rdev, &ring->doorbell_page_num); + r = radeon_doorbell_get(rdev, &ring->doorbell_index); if (r) return r; ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; ring->ring_obj = NULL; r600_ring_init(rdev, ring, 1024 * 1024); - r = radeon_doorbell_get(rdev, &ring->doorbell_page_num); + r = radeon_doorbell_get(rdev, &ring->doorbell_index); if (r) return r; diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index 9c9529de20ee..0300727a4f70 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c @@ -130,7 +130,7 @@ void cik_sdma_fence_ring_emit(struct radeon_device *rdev, * Add a DMA semaphore packet to the ring wait on or signal * other rings (CIK). */ -void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, +bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, struct radeon_ring *ring, struct radeon_semaphore *semaphore, bool emit_wait) @@ -141,6 +141,8 @@ void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits)); radeon_ring_write(ring, addr & 0xfffffff8); radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff); + + return true; } /** @@ -443,13 +445,8 @@ int cik_copy_dma(struct radeon_device *rdev, return r; } - if (radeon_fence_need_sync(*fence, ring->idx)) { - radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, - ring->idx); - radeon_fence_note_sync(*fence, ring->idx); - } else { - radeon_semaphore_free(rdev, &sem, NULL); - } + radeon_semaphore_sync_to(sem, *fence); + radeon_semaphore_sync_rings(rdev, sem, ring->idx); for (i = 0; i < num_loops; i++) { cur_size_in_bytes = size_in_bytes; diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c index 91bb470de0a3..920e1e4a52c5 100644 --- a/drivers/gpu/drm/radeon/cypress_dpm.c +++ b/drivers/gpu/drm/radeon/cypress_dpm.c @@ -299,7 +299,9 @@ void cypress_program_response_times(struct radeon_device *rdev) static int cypress_pcie_performance_request(struct radeon_device *rdev, u8 perf_req, bool advertise) { +#if defined(CONFIG_ACPI) struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); +#endif u32 tmp; udelay(10); diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c index 009f46e0ce72..de86493cbc44 100644 --- a/drivers/gpu/drm/radeon/dce6_afmt.c +++ b/drivers/gpu/drm/radeon/dce6_afmt.c @@ -93,11 +93,13 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder) struct radeon_device *rdev = encoder->dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - u32 offset = dig->afmt->offset; + u32 offset; - if (!dig->afmt->pin) + if (!dig || !dig->afmt || !dig->afmt->pin) return; + offset = dig->afmt->offset; + WREG32(AFMT_AUDIO_SRC_CONTROL + offset, AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id)); } @@ -112,7 +114,7 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder, struct radeon_connector *radeon_connector = NULL; u32 tmp = 0, offset; - if (!dig->afmt->pin) + if (!dig || !dig->afmt || !dig->afmt->pin) return; offset = dig->afmt->pin->offset; @@ -156,7 +158,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder) u8 *sadb; int sad_count; - if (!dig->afmt->pin) + if (!dig || !dig->afmt || !dig->afmt->pin) return; offset = dig->afmt->pin->offset; @@ -217,7 +219,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder) { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, }; - if (!dig->afmt->pin) + if (!dig || !dig->afmt || !dig->afmt->pin) return; offset = dig->afmt->pin->offset; diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c index 6a0656d00ed0..a37b54436382 100644 --- a/drivers/gpu/drm/radeon/evergreen_dma.c +++ b/drivers/gpu/drm/radeon/evergreen_dma.c @@ -131,13 +131,8 @@ int evergreen_copy_dma(struct radeon_device *rdev, return r; } - if (radeon_fence_need_sync(*fence, ring->idx)) { - radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, - ring->idx); - radeon_fence_note_sync(*fence, ring->idx); - } else { - radeon_semaphore_free(rdev, &sem, NULL); - } + radeon_semaphore_sync_to(sem, *fence); + radeon_semaphore_sync_rings(rdev, sem, ring->idx); for (i = 0; i < num_loops; i++) { cur_size_in_dw = size_in_dw; diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index f26339028154..49c4d48f54d6 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c @@ -785,8 +785,8 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev, struct ni_ps *ps = ni_get_ps(rps); struct radeon_clock_and_voltage_limits *max_limits; bool disable_mclk_switching; - u32 mclk, sclk; - u16 vddc, vddci; + u32 mclk; + u16 vddci; u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; int i; @@ -839,24 +839,14 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev, /* XXX validate the min clocks required for display */ + /* adjust low state */ if (disable_mclk_switching) { - mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; - sclk = ps->performance_levels[0].sclk; - vddc = ps->performance_levels[0].vddc; - vddci = ps->performance_levels[ps->performance_level_count - 1].vddci; - } else { - sclk = ps->performance_levels[0].sclk; - mclk = ps->performance_levels[0].mclk; - vddc = ps->performance_levels[0].vddc; - vddci = ps->performance_levels[0].vddci; + ps->performance_levels[0].mclk = + ps->performance_levels[ps->performance_level_count - 1].mclk; + ps->performance_levels[0].vddci = + ps->performance_levels[ps->performance_level_count - 1].vddci; } - /* adjusted low state */ - ps->performance_levels[0].sclk = sclk; - ps->performance_levels[0].mclk = mclk; - ps->performance_levels[0].vddc = vddc; - ps->performance_levels[0].vddci = vddci; - btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk, &ps->performance_levels[0].sclk, &ps->performance_levels[0].mclk); @@ -868,11 +858,15 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev, ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc; } + /* adjust remaining states */ if (disable_mclk_switching) { mclk = ps->performance_levels[0].mclk; + vddci = ps->performance_levels[0].vddci; for (i = 1; i < ps->performance_level_count; i++) { if (mclk < ps->performance_levels[i].mclk) mclk = ps->performance_levels[i].mclk; + if (vddci < ps->performance_levels[i].vddci) + vddci = ps->performance_levels[i].vddci; } for (i = 0; i < ps->performance_level_count; i++) { ps->performance_levels[i].mclk = mclk; @@ -3445,9 +3439,9 @@ static int ni_enable_smc_cac(struct radeon_device *rdev, static int ni_pcie_performance_request(struct radeon_device *rdev, u8 perf_req, bool advertise) { +#if defined(CONFIG_ACPI) struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); -#if defined(CONFIG_ACPI) if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) || (perf_req == PCIE_PERF_REQ_PECI_GEN2)) { if (eg_pi->pcie_performance_request_registered == false) diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 784983d78158..10abc4d5a6cc 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -869,13 +869,14 @@ void r100_fence_ring_emit(struct radeon_device *rdev, radeon_ring_write(ring, RADEON_SW_INT_FIRE); } -void r100_semaphore_ring_emit(struct radeon_device *rdev, +bool r100_semaphore_ring_emit(struct radeon_device *rdev, struct radeon_ring *ring, struct radeon_semaphore *semaphore, bool emit_wait) { /* Unused on older asics, since we don't have semaphores or multiple rings */ BUG(); + return false; } int r100_copy_blit(struct radeon_device *rdev, diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 4e609e8a8d2b..9ad06732a78b 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2650,7 +2650,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev, } } -void r600_semaphore_ring_emit(struct radeon_device *rdev, +bool r600_semaphore_ring_emit(struct radeon_device *rdev, struct radeon_ring *ring, struct radeon_semaphore *semaphore, bool emit_wait) @@ -2664,6 +2664,8 @@ void r600_semaphore_ring_emit(struct radeon_device *rdev, radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); radeon_ring_write(ring, addr & 0xffffffff); radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); + + return true; } /** @@ -2706,13 +2708,8 @@ int r600_copy_cpdma(struct radeon_device *rdev, return r; } - if (radeon_fence_need_sync(*fence, ring->idx)) { - radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, - ring->idx); - radeon_fence_note_sync(*fence, ring->idx); - } else { - radeon_semaphore_free(rdev, &sem, NULL); - } + radeon_semaphore_sync_to(sem, *fence); + radeon_semaphore_sync_rings(rdev, sem, ring->idx); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c index 3b317456512a..7844d15c139f 100644 --- a/drivers/gpu/drm/radeon/r600_dma.c +++ b/drivers/gpu/drm/radeon/r600_dma.c @@ -311,7 +311,7 @@ void r600_dma_fence_ring_emit(struct radeon_device *rdev, * Add a DMA semaphore packet to the ring wait on or signal * other rings (r6xx-SI). */ -void r600_dma_semaphore_ring_emit(struct radeon_device *rdev, +bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev, struct radeon_ring *ring, struct radeon_semaphore *semaphore, bool emit_wait) @@ -322,6 +322,8 @@ void r600_dma_semaphore_ring_emit(struct radeon_device *rdev, radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0)); radeon_ring_write(ring, addr & 0xfffffffc); radeon_ring_write(ring, upper_32_bits(addr) & 0xff); + + return true; } /** @@ -462,13 +464,8 @@ int r600_copy_dma(struct radeon_device *rdev, return r; } - if (radeon_fence_need_sync(*fence, ring->idx)) { - radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, - ring->idx); - radeon_fence_note_sync(*fence, ring->idx); - } else { - radeon_semaphore_free(rdev, &sem, NULL); - } + radeon_semaphore_sync_to(sem, *fence); + radeon_semaphore_sync_rings(rdev, sem, ring->idx); for (i = 0; i < num_loops; i++) { cur_size_in_dw = size_in_dw; diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index 4b89262f3f0e..b7d3ecba43e3 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c @@ -304,9 +304,9 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo); WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ } - } else if (ASIC_IS_DCE3(rdev)) { + } else { /* according to the reg specs, this should DCE3.2 only, but in - * practice it seems to cover DCE3.0/3.1 as well. + * practice it seems to cover DCE2.0/3.0/3.1 as well. */ if (dig->dig_encoder == 0) { WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100); @@ -317,10 +317,6 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100); WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ } - } else { - /* according to the reg specs, this should be DCE2.0 and DCE3.0/3.1 */ - WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) | - AUDIO_DTO_MODULE(clock / 10)); } } diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index b9ee99258602..b1f990d0eaa1 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -348,6 +348,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, i void radeon_fence_process(struct radeon_device *rdev, int ring); bool radeon_fence_signaled(struct radeon_fence *fence); int radeon_fence_wait(struct radeon_fence *fence, bool interruptible); +int radeon_fence_wait_locked(struct radeon_fence *fence); int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring); int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring); int radeon_fence_wait_any(struct radeon_device *rdev, @@ -548,17 +549,20 @@ struct radeon_semaphore { struct radeon_sa_bo *sa_bo; signed waiters; uint64_t gpu_addr; + struct radeon_fence *sync_to[RADEON_NUM_RINGS]; }; int radeon_semaphore_create(struct radeon_device *rdev, struct radeon_semaphore **semaphore); -void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring, +bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring, struct radeon_semaphore *semaphore); -void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, +bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, struct radeon_semaphore *semaphore); +void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore, + struct radeon_fence *fence); int radeon_semaphore_sync_rings(struct radeon_device *rdev, struct radeon_semaphore *semaphore, - int signaler, int waiter); + int waiting_ring); void radeon_semaphore_free(struct radeon_device *rdev, struct radeon_semaphore **semaphore, struct radeon_fence *fence); @@ -645,13 +649,15 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg); /* * GPU doorbell structures, functions & helpers */ +#define RADEON_MAX_DOORBELLS 1024 /* Reserve at most 1024 doorbell slots for radeon-owned rings. */ + struct radeon_doorbell { - u32 num_pages; - bool free[1024]; /* doorbell mmio */ - resource_size_t base; - resource_size_t size; - void __iomem *ptr; + resource_size_t base; + resource_size_t size; + u32 __iomem *ptr; + u32 num_doorbells; /* Number of doorbells actually reserved for radeon. */ + unsigned long used[DIV_ROUND_UP(RADEON_MAX_DOORBELLS, BITS_PER_LONG)]; }; int radeon_doorbell_get(struct radeon_device *rdev, u32 *page); @@ -765,7 +771,6 @@ struct radeon_ib { struct radeon_fence *fence; struct radeon_vm *vm; bool is_const_ib; - struct radeon_fence *sync_to[RADEON_NUM_RINGS]; struct radeon_semaphore *semaphore; }; @@ -799,8 +804,7 @@ struct radeon_ring { u32 pipe; u32 queue; struct radeon_bo *mqd_obj; - u32 doorbell_page_num; - u32 doorbell_offset; + u32 doorbell_index; unsigned wptr_offs; }; @@ -921,7 +925,6 @@ int radeon_ib_get(struct radeon_device *rdev, int ring, struct radeon_ib *ib, struct radeon_vm *vm, unsigned size); void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib); -void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence); int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, struct radeon_ib *const_ib); int radeon_ib_pool_init(struct radeon_device *rdev); @@ -1638,7 +1641,7 @@ struct radeon_asic_ring { /* command emmit functions */ void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence); - void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp, + bool (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp, struct radeon_semaphore *semaphore, bool emit_wait); void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); @@ -1979,6 +1982,7 @@ struct cik_asic { unsigned tile_config; uint32_t tile_mode_array[32]; + uint32_t macrotile_mode_array[16]; }; union radeon_asic_config { @@ -2239,8 +2243,8 @@ void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v, u32 r100_io_rreg(struct radeon_device *rdev, u32 reg); void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v); -u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset); -void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v); +u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index); +void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v); /* * Cast helper @@ -2303,8 +2307,8 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v); #define RREG32_IO(reg) r100_io_rreg(rdev, (reg)) #define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v)) -#define RDOORBELL32(offset) cik_mm_rdoorbell(rdev, (offset)) -#define WDOORBELL32(offset, v) cik_mm_wdoorbell(rdev, (offset), (v)) +#define RDOORBELL32(index) cik_mm_rdoorbell(rdev, (index)) +#define WDOORBELL32(index, v) cik_mm_wdoorbell(rdev, (index), (v)) /* * Indirect registers accessor @@ -2706,10 +2710,10 @@ void radeon_vm_fence(struct radeon_device *rdev, struct radeon_vm *vm, struct radeon_fence *fence); uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr); -int radeon_vm_bo_update_pte(struct radeon_device *rdev, - struct radeon_vm *vm, - struct radeon_bo *bo, - struct ttm_mem_reg *mem); +int radeon_vm_bo_update(struct radeon_device *rdev, + struct radeon_vm *vm, + struct radeon_bo *bo, + struct ttm_mem_reg *mem); void radeon_vm_bo_invalidate(struct radeon_device *rdev, struct radeon_bo *bo); struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm, diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c index 10f98c7742d8..98a9074b306b 100644 --- a/drivers/gpu/drm/radeon/radeon_acpi.c +++ b/drivers/gpu/drm/radeon/radeon_acpi.c @@ -369,7 +369,7 @@ int radeon_atif_handler(struct radeon_device *rdev, return NOTIFY_DONE; /* Check pending SBIOS requests */ - handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev); + handle = ACPI_HANDLE(&rdev->pdev->dev); count = radeon_atif_get_sbios_requests(handle, &req); if (count <= 0) @@ -556,7 +556,7 @@ int radeon_acpi_pcie_notify_device_ready(struct radeon_device *rdev) struct radeon_atcs *atcs = &rdev->atcs; /* Get the device handle */ - handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev); + handle = ACPI_HANDLE(&rdev->pdev->dev); if (!handle) return -EINVAL; @@ -596,7 +596,7 @@ int radeon_acpi_pcie_performance_request(struct radeon_device *rdev, u32 retry = 3; /* Get the device handle */ - handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev); + handle = ACPI_HANDLE(&rdev->pdev->dev); if (!handle) return -EINVAL; @@ -699,7 +699,7 @@ int radeon_acpi_init(struct radeon_device *rdev) int ret; /* Get the device handle */ - handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev); + handle = ACPI_HANDLE(&rdev->pdev->dev); /* No need to proceed if we're sure that ATIF is not supported */ if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle) diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 50853c0cb49d..e354ce94cdd1 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -2015,6 +2015,8 @@ static struct radeon_asic ci_asic = { .bandwidth_update = &dce8_bandwidth_update, .get_vblank_counter = &evergreen_get_vblank_counter, .wait_for_vblank = &dce4_wait_for_vblank, + .set_backlight_level = &atombios_set_backlight_level, + .get_backlight_level = &atombios_get_backlight_level, .hdmi_enable = &evergreen_hdmi_enable, .hdmi_setmode = &evergreen_hdmi_setmode, }, @@ -2114,6 +2116,8 @@ static struct radeon_asic kv_asic = { .bandwidth_update = &dce8_bandwidth_update, .get_vblank_counter = &evergreen_get_vblank_counter, .wait_for_vblank = &dce4_wait_for_vblank, + .set_backlight_level = &atombios_set_backlight_level, + .get_backlight_level = &atombios_get_backlight_level, .hdmi_enable = &evergreen_hdmi_enable, .hdmi_setmode = &evergreen_hdmi_setmode, }, diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index f2833ee3a613..c9fd97b58076 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -80,7 +80,7 @@ int r100_irq_set(struct radeon_device *rdev); int r100_irq_process(struct radeon_device *rdev); void r100_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence); -void r100_semaphore_ring_emit(struct radeon_device *rdev, +bool r100_semaphore_ring_emit(struct radeon_device *rdev, struct radeon_ring *cp, struct radeon_semaphore *semaphore, bool emit_wait); @@ -313,13 +313,13 @@ int r600_cs_parse(struct radeon_cs_parser *p); int r600_dma_cs_parse(struct radeon_cs_parser *p); void r600_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence); -void r600_semaphore_ring_emit(struct radeon_device *rdev, +bool r600_semaphore_ring_emit(struct radeon_device *rdev, struct radeon_ring *cp, struct radeon_semaphore *semaphore, bool emit_wait); void r600_dma_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence); -void r600_dma_semaphore_ring_emit(struct radeon_device *rdev, +bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev, struct radeon_ring *ring, struct radeon_semaphore *semaphore, bool emit_wait); @@ -566,10 +566,6 @@ int sumo_dpm_force_performance_level(struct radeon_device *rdev, */ void cayman_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence); -void cayman_uvd_semaphore_emit(struct radeon_device *rdev, - struct radeon_ring *ring, - struct radeon_semaphore *semaphore, - bool emit_wait); void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev); int cayman_init(struct radeon_device *rdev); void cayman_fini(struct radeon_device *rdev); @@ -697,7 +693,7 @@ void cik_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); void cik_sdma_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence); -void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, +bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, struct radeon_ring *ring, struct radeon_semaphore *semaphore, bool emit_wait); @@ -717,7 +713,7 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence); void cik_fence_compute_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence); -void cik_semaphore_ring_emit(struct radeon_device *rdev, +bool cik_semaphore_ring_emit(struct radeon_device *rdev, struct radeon_ring *cp, struct radeon_semaphore *semaphore, bool emit_wait); @@ -807,7 +803,7 @@ void uvd_v1_0_stop(struct radeon_device *rdev); int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring); int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); -void uvd_v1_0_semaphore_emit(struct radeon_device *rdev, +bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev, struct radeon_ring *ring, struct radeon_semaphore *semaphore, bool emit_wait); @@ -819,7 +815,7 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence); /* uvd v3.1 */ -void uvd_v3_1_semaphore_emit(struct radeon_device *rdev, +bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev, struct radeon_ring *ring, struct radeon_semaphore *semaphore, bool emit_wait); diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index f79ee184ffd5..5c39bf7c3d88 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -2918,7 +2918,7 @@ int radeon_atom_get_memory_pll_dividers(struct radeon_device *rdev, mpll_param->dll_speed = args.ucDllSpeed; mpll_param->bwcntl = args.ucBWCntl; mpll_param->vco_mode = - (args.ucPllCntlFlag & MPLL_CNTL_FLAG_VCO_MODE_MASK) ? 1 : 0; + (args.ucPllCntlFlag & MPLL_CNTL_FLAG_VCO_MODE_MASK); mpll_param->yclk_sel = (args.ucPllCntlFlag & MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0; mpll_param->qdr = diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index 6153ec18943a..9d302eaeea15 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c @@ -8,8 +8,7 @@ */ #include <linux/vga_switcheroo.h> #include <linux/slab.h> -#include <acpi/acpi.h> -#include <acpi/acpi_bus.h> +#include <linux/acpi.h> #include <linux/pci.h> #include "radeon_acpi.h" @@ -447,7 +446,7 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) acpi_handle dhandle, atpx_handle; acpi_status status; - dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); + dhandle = ACPI_HANDLE(&pdev->dev); if (!dhandle) return false; @@ -493,7 +492,7 @@ static int radeon_atpx_init(void) */ static int radeon_atpx_get_client_id(struct pci_dev *pdev) { - if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev)) + if (radeon_atpx_priv.dhandle == ACPI_HANDLE(&pdev->dev)) return VGA_SWITCHEROO_IGD; else return VGA_SWITCHEROO_DIS; diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index c155d6f3fa68..b3633d9a5317 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -185,7 +185,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev) return false; while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { - dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); + dhandle = ACPI_HANDLE(&pdev->dev); if (!dhandle) continue; diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 26ca223d12d6..0b366169d64d 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -159,7 +159,8 @@ static void radeon_cs_sync_rings(struct radeon_cs_parser *p) if (!p->relocs[i].robj) continue; - radeon_ib_sync_to(&p->ib, p->relocs[i].robj->tbo.sync_obj); + radeon_semaphore_sync_to(p->ib.semaphore, + p->relocs[i].robj->tbo.sync_obj); } } @@ -359,13 +360,13 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser, struct radeon_bo *bo; int r; - r = radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem); + r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem); if (r) { return r; } list_for_each_entry(lobj, &parser->validated, tv.head) { bo = lobj->bo; - r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem); + r = radeon_vm_bo_update(parser->rdev, vm, bo, &bo->tbo.mem); if (r) { return r; } @@ -411,9 +412,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, goto out; } radeon_cs_sync_rings(parser); - radeon_ib_sync_to(&parser->ib, vm->fence); - radeon_ib_sync_to(&parser->ib, radeon_vm_grab_id( - rdev, vm, parser->ring)); + radeon_semaphore_sync_to(parser->ib.semaphore, vm->fence); + radeon_semaphore_sync_to(parser->ib.semaphore, + radeon_vm_grab_id(rdev, vm, parser->ring)); if ((rdev->family >= CHIP_TAHITI) && (parser->chunk_const_ib_idx != -1)) { diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index b9234c43f43d..39b033b441d2 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -251,28 +251,23 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) */ int radeon_doorbell_init(struct radeon_device *rdev) { - int i; - /* doorbell bar mapping */ rdev->doorbell.base = pci_resource_start(rdev->pdev, 2); rdev->doorbell.size = pci_resource_len(rdev->pdev, 2); - /* limit to 4 MB for now */ - if (rdev->doorbell.size > (4 * 1024 * 1024)) - rdev->doorbell.size = 4 * 1024 * 1024; + rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS); + if (rdev->doorbell.num_doorbells == 0) + return -EINVAL; - rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.size); + rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32)); if (rdev->doorbell.ptr == NULL) { return -ENOMEM; } DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base); DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size); - rdev->doorbell.num_pages = rdev->doorbell.size / PAGE_SIZE; + memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used)); - for (i = 0; i < rdev->doorbell.num_pages; i++) { - rdev->doorbell.free[i] = true; - } return 0; } @@ -290,40 +285,38 @@ void radeon_doorbell_fini(struct radeon_device *rdev) } /** - * radeon_doorbell_get - Allocate a doorbell page + * radeon_doorbell_get - Allocate a doorbell entry * * @rdev: radeon_device pointer - * @doorbell: doorbell page number + * @doorbell: doorbell index * - * Allocate a doorbell page for use by the driver (all asics). + * Allocate a doorbell for use by the driver (all asics). * Returns 0 on success or -EINVAL on failure. */ int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell) { - int i; - - for (i = 0; i < rdev->doorbell.num_pages; i++) { - if (rdev->doorbell.free[i]) { - rdev->doorbell.free[i] = false; - *doorbell = i; - return 0; - } + unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells); + if (offset < rdev->doorbell.num_doorbells) { + __set_bit(offset, rdev->doorbell.used); + *doorbell = offset; + return 0; + } else { + return -EINVAL; } - return -EINVAL; } /** - * radeon_doorbell_free - Free a doorbell page + * radeon_doorbell_free - Free a doorbell entry * * @rdev: radeon_device pointer - * @doorbell: doorbell page number + * @doorbell: doorbell index * - * Free a doorbell page allocated for use by the driver (all asics) + * Free a doorbell allocated for use by the driver (all asics) */ void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell) { - if (doorbell < rdev->doorbell.num_pages) - rdev->doorbell.free[doorbell] = true; + if (doorbell < rdev->doorbell.num_doorbells) + __clear_bit(doorbell, rdev->doorbell.used); } /* diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 1aee32213f66..9f5ff28864f6 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -76,9 +76,10 @@ * 2.32.0 - new info request for rings working * 2.33.0 - Add SI tiling mode array query * 2.34.0 - Add CIK tiling mode array query + * 2.35.0 - Add CIK macrotile mode array query */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 34 +#define KMS_DRIVER_MINOR 35 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index 543dcfae7e6f..00e0d449021c 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h @@ -108,9 +108,10 @@ * 1.31- Add support for num Z pipes from GET_PARAM * 1.32- fixes for rv740 setup * 1.33- Add r6xx/r7xx const buffer support + * 1.34- fix evergreen/cayman GS register */ #define DRIVER_MAJOR 1 -#define DRIVER_MINOR 33 +#define DRIVER_MINOR 34 #define DRIVER_PATCHLEVEL 0 long radeon_drm_ioctl(struct file *filp, diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 281d14c22a47..d3a86e43c012 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -472,6 +472,36 @@ int radeon_fence_wait_any(struct radeon_device *rdev, } /** + * radeon_fence_wait_locked - wait for a fence to signal + * + * @fence: radeon fence object + * + * Wait for the requested fence to signal (all asics). + * Returns 0 if the fence has passed, error for all other cases. + */ +int radeon_fence_wait_locked(struct radeon_fence *fence) +{ + uint64_t seq[RADEON_NUM_RINGS] = {}; + int r; + + if (fence == NULL) { + WARN(1, "Querying an invalid fence : %p !\n", fence); + return -EINVAL; + } + + seq[fence->ring] = fence->seq; + if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ) + return 0; + + r = radeon_fence_wait_seq(fence->rdev, seq, false, false); + if (r) + return r; + + fence->seq = RADEON_FENCE_SIGNALED_SEQ; + return 0; +} + +/** * radeon_fence_wait_next_locked - wait for the next fence to signal * * @rdev: radeon device pointer diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 8a83b89d4709..96e440061bdb 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -29,6 +29,7 @@ #include <drm/radeon_drm.h> #include "radeon.h" #include "radeon_reg.h" +#include "radeon_trace.h" /* * GART @@ -651,7 +652,7 @@ retry: radeon_asic_vm_set_page(rdev, &ib, vm->pd_gpu_addr, 0, pd_entries, 0, 0); - radeon_ib_sync_to(&ib, vm->fence); + radeon_semaphore_sync_to(ib.semaphore, vm->fence); r = radeon_ib_schedule(rdev, &ib, NULL); if (r) { radeon_ib_free(rdev, &ib); @@ -737,6 +738,7 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, for (i = 0; i < 2; ++i) { if (choices[i]) { vm->id = choices[i]; + trace_radeon_vm_grab_id(vm->id, ring); return rdev->vm_manager.active[choices[i]]; } } @@ -1116,7 +1118,7 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev, } /** - * radeon_vm_bo_update_pte - map a bo into the vm page table + * radeon_vm_bo_update - map a bo into the vm page table * * @rdev: radeon_device pointer * @vm: requested vm @@ -1128,10 +1130,10 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev, * * Object have to be reserved & global and local mutex must be locked! */ -int radeon_vm_bo_update_pte(struct radeon_device *rdev, - struct radeon_vm *vm, - struct radeon_bo *bo, - struct ttm_mem_reg *mem) +int radeon_vm_bo_update(struct radeon_device *rdev, + struct radeon_vm *vm, + struct radeon_bo *bo, + struct ttm_mem_reg *mem) { struct radeon_ib ib; struct radeon_bo_va *bo_va; @@ -1176,6 +1178,8 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, bo_va->valid = false; } + trace_radeon_vm_bo_update(bo_va); + nptes = radeon_bo_ngpu_pages(bo); /* assume two extra pdes in case the mapping overlaps the borders */ @@ -1209,6 +1213,8 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, return -ENOMEM; r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4); + if (r) + return r; ib.length_dw = 0; r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset); @@ -1220,7 +1226,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset, addr, radeon_vm_page_flags(bo_va->flags)); - radeon_ib_sync_to(&ib, vm->fence); + radeon_semaphore_sync_to(ib.semaphore, vm->fence); r = radeon_ib_schedule(rdev, &ib, NULL); if (r) { radeon_ib_free(rdev, &ib); @@ -1255,7 +1261,7 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev, mutex_lock(&rdev->vm_manager.lock); mutex_lock(&bo_va->vm->mutex); if (bo_va->soffset) { - r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL); + r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL); } mutex_unlock(&rdev->vm_manager.lock); list_del(&bo_va->vm_list); diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index bb8710531a1b..55d0b474bd37 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -340,7 +340,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) break; case RADEON_INFO_BACKEND_MAP: if (rdev->family >= CHIP_BONAIRE) - return -EINVAL; + *value = rdev->config.cik.backend_map; else if (rdev->family >= CHIP_TAHITI) *value = rdev->config.si.backend_map; else if (rdev->family >= CHIP_CAYMAN) @@ -449,6 +449,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) return -EINVAL; } break; + case RADEON_INFO_CIK_MACROTILE_MODE_ARRAY: + if (rdev->family >= CHIP_BONAIRE) { + value = rdev->config.cik.macrotile_mode_array; + value_size = sizeof(uint32_t)*16; + } else { + DRM_DEBUG_KMS("macrotile mode array is cik+ only!\n"); + return -EINVAL; + } + break; case RADEON_INFO_SI_CP_DMA_COMPUTE: *value = 1; break; diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 0c7b8c66301b..0b158f98d287 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -422,6 +422,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc, /* Pin framebuffer & get tilling informations */ obj = radeon_fb->obj; rbo = gem_to_radeon_bo(obj); +retry: r = radeon_bo_reserve(rbo, false); if (unlikely(r != 0)) return r; @@ -430,6 +431,33 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc, &base); if (unlikely(r != 0)) { radeon_bo_unreserve(rbo); + + /* On old GPU like RN50 with little vram pining can fails because + * current fb is taking all space needed. So instead of unpining + * the old buffer after pining the new one, first unpin old one + * and then retry pining new one. + * + * As only master can set mode only master can pin and it is + * unlikely the master client will race with itself especialy + * on those old gpu with single crtc. + * + * We don't shutdown the display controller because new buffer + * will end up in same spot. + */ + if (!atomic && fb && fb != crtc->fb) { + struct radeon_bo *old_rbo; + unsigned long nsize, osize; + + old_rbo = gem_to_radeon_bo(to_radeon_framebuffer(fb)->obj); + osize = radeon_bo_size(old_rbo); + nsize = radeon_bo_size(rbo); + if (nsize <= osize && !radeon_bo_reserve(old_rbo, false)) { + radeon_bo_unpin(old_rbo); + radeon_bo_unreserve(old_rbo); + fb = NULL; + goto retry; + } + } return -EINVAL; } radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 866ace070b91..984097b907ef 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -537,8 +537,7 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev, struct device_attribute *attr, char *buf) { - struct drm_device *ddev = dev_get_drvdata(dev); - struct radeon_device *rdev = ddev->dev_private; + struct radeon_device *rdev = dev_get_drvdata(dev); int temp; if (rdev->asic->pm.get_temperature) @@ -553,8 +552,7 @@ static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev, struct device_attribute *attr, char *buf) { - struct drm_device *ddev = dev_get_drvdata(dev); - struct radeon_device *rdev = ddev->dev_private; + struct radeon_device *rdev = dev_get_drvdata(dev); int hyst = to_sensor_dev_attr(attr)->index; int temp; @@ -566,23 +564,14 @@ static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev, return snprintf(buf, PAGE_SIZE, "%d\n", temp); } -static ssize_t radeon_hwmon_show_name(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - return sprintf(buf, "radeon\n"); -} - static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0); static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0); static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1); -static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0); static struct attribute *hwmon_attributes[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_crit.dev_attr.attr, &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, - &sensor_dev_attr_name.dev_attr.attr, NULL }; @@ -590,8 +579,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, struct attribute *attr, int index) { struct device *dev = container_of(kobj, struct device, kobj); - struct drm_device *ddev = dev_get_drvdata(dev); - struct radeon_device *rdev = ddev->dev_private; + struct radeon_device *rdev = dev_get_drvdata(dev); /* Skip limit attributes if DPM is not enabled */ if (rdev->pm.pm_method != PM_METHOD_DPM && @@ -607,11 +595,15 @@ static const struct attribute_group hwmon_attrgroup = { .is_visible = hwmon_attributes_visible, }; +static const struct attribute_group *hwmon_groups[] = { + &hwmon_attrgroup, + NULL +}; + static int radeon_hwmon_init(struct radeon_device *rdev) { int err = 0; - - rdev->pm.int_hwmon_dev = NULL; + struct device *hwmon_dev; switch (rdev->pm.int_thermal_type) { case THERMAL_TYPE_RV6XX: @@ -624,20 +616,13 @@ static int radeon_hwmon_init(struct radeon_device *rdev) case THERMAL_TYPE_KV: if (rdev->asic->pm.get_temperature == NULL) return err; - rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev); - if (IS_ERR(rdev->pm.int_hwmon_dev)) { - err = PTR_ERR(rdev->pm.int_hwmon_dev); + hwmon_dev = hwmon_device_register_with_groups(rdev->dev, + "radeon", rdev, + hwmon_groups); + if (IS_ERR(hwmon_dev)) { + err = PTR_ERR(hwmon_dev); dev_err(rdev->dev, "Unable to register hwmon device: %d\n", err); - break; - } - dev_set_drvdata(rdev->pm.int_hwmon_dev, rdev->ddev); - err = sysfs_create_group(&rdev->pm.int_hwmon_dev->kobj, - &hwmon_attrgroup); - if (err) { - dev_err(rdev->dev, - "Unable to create hwmon sysfs file: %d\n", err); - hwmon_device_unregister(rdev->dev); } break; default: @@ -647,14 +632,6 @@ static int radeon_hwmon_init(struct radeon_device *rdev) return err; } -static void radeon_hwmon_fini(struct radeon_device *rdev) -{ - if (rdev->pm.int_hwmon_dev) { - sysfs_remove_group(&rdev->pm.int_hwmon_dev->kobj, &hwmon_attrgroup); - hwmon_device_unregister(rdev->pm.int_hwmon_dev); - } -} - static void radeon_dpm_thermal_work_handler(struct work_struct *work) { struct radeon_device *rdev = @@ -1252,7 +1229,6 @@ int radeon_pm_init(struct radeon_device *rdev) case CHIP_RS780: case CHIP_RS880: case CHIP_CAYMAN: - case CHIP_ARUBA: case CHIP_BONAIRE: case CHIP_KABINI: case CHIP_KAVERI: @@ -1284,6 +1260,7 @@ int radeon_pm_init(struct radeon_device *rdev) case CHIP_BARTS: case CHIP_TURKS: case CHIP_CAICOS: + case CHIP_ARUBA: case CHIP_TAHITI: case CHIP_PITCAIRN: case CHIP_VERDE: @@ -1337,8 +1314,6 @@ static void radeon_pm_fini_old(struct radeon_device *rdev) if (rdev->pm.power_state) kfree(rdev->pm.power_state); - - radeon_hwmon_fini(rdev); } static void radeon_pm_fini_dpm(struct radeon_device *rdev) @@ -1358,8 +1333,6 @@ static void radeon_pm_fini_dpm(struct radeon_device *rdev) if (rdev->pm.power_state) kfree(rdev->pm.power_state); - - radeon_hwmon_fini(rdev); } void radeon_pm_fini(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 18254e1c3e71..9214403ae173 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -61,7 +61,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring, struct radeon_ib *ib, struct radeon_vm *vm, unsigned size) { - int i, r; + int r; r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true); if (r) { @@ -87,8 +87,6 @@ int radeon_ib_get(struct radeon_device *rdev, int ring, ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo); } ib->is_const_ib = false; - for (i = 0; i < RADEON_NUM_RINGS; ++i) - ib->sync_to[i] = NULL; return 0; } @@ -109,25 +107,6 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib) } /** - * radeon_ib_sync_to - sync to fence before executing the IB - * - * @ib: IB object to add fence to - * @fence: fence to sync to - * - * Sync to the fence before executing the IB - */ -void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence) -{ - struct radeon_fence *other; - - if (!fence) - return; - - other = ib->sync_to[fence->ring]; - ib->sync_to[fence->ring] = radeon_fence_later(fence, other); -} - -/** * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring * * @rdev: radeon_device pointer @@ -151,8 +130,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, struct radeon_ib *const_ib) { struct radeon_ring *ring = &rdev->ring[ib->ring]; - bool need_sync = false; - int i, r = 0; + int r = 0; if (!ib->length_dw || !ring->ready) { /* TODO: Nothings in the ib we should report. */ @@ -166,19 +144,15 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, dev_err(rdev->dev, "scheduling IB failed (%d).\n", r); return r; } - for (i = 0; i < RADEON_NUM_RINGS; ++i) { - struct radeon_fence *fence = ib->sync_to[i]; - if (radeon_fence_need_sync(fence, ib->ring)) { - need_sync = true; - radeon_semaphore_sync_rings(rdev, ib->semaphore, - fence->ring, ib->ring); - radeon_fence_note_sync(fence, ib->ring); - } - } - /* immediately free semaphore when we don't need to sync */ - if (!need_sync) { - radeon_semaphore_free(rdev, &ib->semaphore, NULL); + + /* sync with other rings */ + r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring); + if (r) { + dev_err(rdev->dev, "failed to sync rings (%d)\n", r); + radeon_ring_unlock_undo(rdev, ring); + return r; } + /* if we can't remember our last VM flush then flush now! */ /* XXX figure out why we have to flush for every IB */ if (ib->vm /*&& !ib->vm->last_flush*/) { diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c index 8dcc20f53d73..2b42aa1914f2 100644 --- a/drivers/gpu/drm/radeon/radeon_semaphore.c +++ b/drivers/gpu/drm/radeon/radeon_semaphore.c @@ -29,12 +29,12 @@ */ #include <drm/drmP.h> #include "radeon.h" - +#include "radeon_trace.h" int radeon_semaphore_create(struct radeon_device *rdev, struct radeon_semaphore **semaphore) { - int r; + int i, r; *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL); if (*semaphore == NULL) { @@ -50,54 +50,121 @@ int radeon_semaphore_create(struct radeon_device *rdev, (*semaphore)->waiters = 0; (*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo); *((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0; + + for (i = 0; i < RADEON_NUM_RINGS; ++i) + (*semaphore)->sync_to[i] = NULL; + return 0; } -void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring, +bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ridx, struct radeon_semaphore *semaphore) { - --semaphore->waiters; - radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false); + struct radeon_ring *ring = &rdev->ring[ridx]; + + trace_radeon_semaphore_signale(ridx, semaphore); + + if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, false)) { + --semaphore->waiters; + + /* for debugging lockup only, used by sysfs debug files */ + ring->last_semaphore_signal_addr = semaphore->gpu_addr; + return true; + } + return false; } -void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, +bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ridx, struct radeon_semaphore *semaphore) { - ++semaphore->waiters; - radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true); + struct radeon_ring *ring = &rdev->ring[ridx]; + + trace_radeon_semaphore_wait(ridx, semaphore); + + if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, true)) { + ++semaphore->waiters; + + /* for debugging lockup only, used by sysfs debug files */ + ring->last_semaphore_wait_addr = semaphore->gpu_addr; + return true; + } + return false; +} + +/** + * radeon_semaphore_sync_to - use the semaphore to sync to a fence + * + * @semaphore: semaphore object to add fence to + * @fence: fence to sync to + * + * Sync to the fence using this semaphore object + */ +void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore, + struct radeon_fence *fence) +{ + struct radeon_fence *other; + + if (!fence) + return; + + other = semaphore->sync_to[fence->ring]; + semaphore->sync_to[fence->ring] = radeon_fence_later(fence, other); } -/* caller must hold ring lock */ +/** + * radeon_semaphore_sync_rings - sync ring to all registered fences + * + * @rdev: radeon_device pointer + * @semaphore: semaphore object to use for sync + * @ring: ring that needs sync + * + * Ensure that all registered fences are signaled before letting + * the ring continue. The caller must hold the ring lock. + */ int radeon_semaphore_sync_rings(struct radeon_device *rdev, struct radeon_semaphore *semaphore, - int signaler, int waiter) + int ring) { - int r; + int i, r; - /* no need to signal and wait on the same ring */ - if (signaler == waiter) { - return 0; - } + for (i = 0; i < RADEON_NUM_RINGS; ++i) { + struct radeon_fence *fence = semaphore->sync_to[i]; - /* prevent GPU deadlocks */ - if (!rdev->ring[signaler].ready) { - dev_err(rdev->dev, "Trying to sync to a disabled ring!"); - return -EINVAL; - } + /* check if we really need to sync */ + if (!radeon_fence_need_sync(fence, ring)) + continue; - r = radeon_ring_alloc(rdev, &rdev->ring[signaler], 8); - if (r) { - return r; - } - radeon_semaphore_emit_signal(rdev, signaler, semaphore); - radeon_ring_commit(rdev, &rdev->ring[signaler]); + /* prevent GPU deadlocks */ + if (!rdev->ring[i].ready) { + dev_err(rdev->dev, "Syncing to a disabled ring!"); + return -EINVAL; + } - /* we assume caller has already allocated space on waiters ring */ - radeon_semaphore_emit_wait(rdev, waiter, semaphore); + /* allocate enough space for sync command */ + r = radeon_ring_alloc(rdev, &rdev->ring[i], 16); + if (r) { + return r; + } - /* for debugging lockup only, used by sysfs debug files */ - rdev->ring[signaler].last_semaphore_signal_addr = semaphore->gpu_addr; - rdev->ring[waiter].last_semaphore_wait_addr = semaphore->gpu_addr; + /* emit the signal semaphore */ + if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) { + /* signaling wasn't successful wait manually */ + radeon_ring_undo(&rdev->ring[i]); + radeon_fence_wait_locked(fence); + continue; + } + + /* we assume caller has already allocated space on waiters ring */ + if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) { + /* waiting wasn't successful wait manually */ + radeon_ring_undo(&rdev->ring[i]); + radeon_fence_wait_locked(fence); + continue; + } + + radeon_ring_commit(rdev, &rdev->ring[i]); + radeon_fence_note_sync(fence, ring); + } return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h index 811bca691b36..0473257d4078 100644 --- a/drivers/gpu/drm/radeon/radeon_trace.h +++ b/drivers/gpu/drm/radeon/radeon_trace.h @@ -47,6 +47,39 @@ TRACE_EVENT(radeon_cs, __entry->fences) ); +TRACE_EVENT(radeon_vm_grab_id, + TP_PROTO(unsigned vmid, int ring), + TP_ARGS(vmid, ring), + TP_STRUCT__entry( + __field(u32, vmid) + __field(u32, ring) + ), + + TP_fast_assign( + __entry->vmid = vmid; + __entry->ring = ring; + ), + TP_printk("vmid=%u, ring=%u", __entry->vmid, __entry->ring) +); + +TRACE_EVENT(radeon_vm_bo_update, + TP_PROTO(struct radeon_bo_va *bo_va), + TP_ARGS(bo_va), + TP_STRUCT__entry( + __field(u64, soffset) + __field(u64, eoffset) + __field(u32, flags) + ), + + TP_fast_assign( + __entry->soffset = bo_va->soffset; + __entry->eoffset = bo_va->eoffset; + __entry->flags = bo_va->flags; + ), + TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x", + __entry->soffset, __entry->eoffset, __entry->flags) +); + TRACE_EVENT(radeon_vm_set_page, TP_PROTO(uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags), @@ -111,6 +144,42 @@ DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_end, TP_ARGS(dev, seqno) ); +DECLARE_EVENT_CLASS(radeon_semaphore_request, + + TP_PROTO(int ring, struct radeon_semaphore *sem), + + TP_ARGS(ring, sem), + + TP_STRUCT__entry( + __field(int, ring) + __field(signed, waiters) + __field(uint64_t, gpu_addr) + ), + + TP_fast_assign( + __entry->ring = ring; + __entry->waiters = sem->waiters; + __entry->gpu_addr = sem->gpu_addr; + ), + + TP_printk("ring=%u, waiters=%d, addr=%010Lx", __entry->ring, + __entry->waiters, __entry->gpu_addr) +); + +DEFINE_EVENT(radeon_semaphore_request, radeon_semaphore_signale, + + TP_PROTO(int ring, struct radeon_semaphore *sem), + + TP_ARGS(ring, sem) +); + +DEFINE_EVENT(radeon_semaphore_request, radeon_semaphore_wait, + + TP_PROTO(int ring, struct radeon_semaphore *sem), + + TP_ARGS(ring, sem) +); + #endif /* This part must be outside protection */ diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman index a072fa8c46b0..d46b58d078aa 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/cayman +++ b/drivers/gpu/drm/radeon/reg_srcs/cayman @@ -21,7 +21,7 @@ cayman 0x9400 0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE 0x000089B0 VGT_HS_OFFCHIP_PARAM 0x00008A14 PA_CL_ENHANCE -0x00008A60 PA_SC_LINE_STIPPLE_VALUE +0x00008A60 PA_SU_LINE_STIPPLE_VALUE 0x00008B10 PA_SC_LINE_STIPPLE_STATE 0x00008BF0 PA_SC_ENHANCE 0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ @@ -532,7 +532,7 @@ cayman 0x9400 0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET 0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE 0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET -0x00028B74 VGT_GS_INSTANCE_CNT +0x00028B90 VGT_GS_INSTANCE_CNT 0x00028BD4 PA_SC_CENTROID_PRIORITY_0 0x00028BD8 PA_SC_CENTROID_PRIORITY_1 0x00028BDC PA_SC_LINE_CNTL diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen index b912a37689bf..57745c8761c8 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/evergreen +++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen @@ -22,7 +22,7 @@ evergreen 0x9400 0x000089A4 VGT_COMPUTE_START_Z 0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE 0x00008A14 PA_CL_ENHANCE -0x00008A60 PA_SC_LINE_STIPPLE_VALUE +0x00008A60 PA_SU_LINE_STIPPLE_VALUE 0x00008B10 PA_SC_LINE_STIPPLE_STATE 0x00008BF0 PA_SC_ENHANCE 0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ @@ -545,7 +545,7 @@ evergreen 0x9400 0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET 0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE 0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET -0x00028B74 VGT_GS_INSTANCE_CNT +0x00028B90 VGT_GS_INSTANCE_CNT 0x00028C00 PA_SC_LINE_CNTL 0x00028C08 PA_SU_VTX_CNTL 0x00028C0C PA_CL_GB_VERT_CLIP_ADJ diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c index f9b02e3d6830..aca8cbe8a335 100644 --- a/drivers/gpu/drm/radeon/rv770_dma.c +++ b/drivers/gpu/drm/radeon/rv770_dma.c @@ -66,13 +66,8 @@ int rv770_copy_dma(struct radeon_device *rdev, return r; } - if (radeon_fence_need_sync(*fence, ring->idx)) { - radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, - ring->idx); - radeon_fence_note_sync(*fence, ring->idx); - } else { - radeon_semaphore_free(rdev, &sem, NULL); - } + radeon_semaphore_sync_to(sem, *fence); + radeon_semaphore_sync_rings(rdev, sem, ring->idx); for (i = 0; i < num_loops; i++) { cur_size_in_dw = size_in_dw; diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 6a64ccaa0695..a36736dab5e0 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -3882,8 +3882,15 @@ static int si_mc_init(struct radeon_device *rdev) rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); /* size in MB on si */ - rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; - rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; + tmp = RREG32(CONFIG_MEMSIZE); + /* some boards may have garbage in the upper 16 bits */ + if (tmp & 0xffff0000) { + DRM_INFO("Probable bad vram size: 0x%08x\n", tmp); + if (tmp & 0xffff) + tmp &= 0xffff; + } + rdev->mc.mc_vram_size = tmp * 1024ULL * 1024ULL; + rdev->mc.real_vram_size = rdev->mc.mc_vram_size; rdev->mc.visible_vram_size = rdev->mc.aper_size; si_vram_gtt_location(rdev, &rdev->mc); radeon_update_bandwidth_info(rdev); diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c index 8e8f46133532..59be2cfcbb47 100644 --- a/drivers/gpu/drm/radeon/si_dma.c +++ b/drivers/gpu/drm/radeon/si_dma.c @@ -195,13 +195,8 @@ int si_copy_dma(struct radeon_device *rdev, return r; } - if (radeon_fence_need_sync(*fence, ring->idx)) { - radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, - ring->idx); - radeon_fence_note_sync(*fence, ring->idx); - } else { - radeon_semaphore_free(rdev, &sem, NULL); - } + radeon_semaphore_sync_to(sem, *fence); + radeon_semaphore_sync_rings(rdev, sem, ring->idx); for (i = 0; i < num_loops; i++) { cur_size_in_bytes = size_in_bytes; diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c index 9364129ba292..d700698a1f22 100644 --- a/drivers/gpu/drm/radeon/trinity_dpm.c +++ b/drivers/gpu/drm/radeon/trinity_dpm.c @@ -1873,9 +1873,9 @@ int trinity_dpm_init(struct radeon_device *rdev) pi->enable_sclk_ds = true; pi->enable_gfx_power_gating = true; pi->enable_gfx_clock_gating = true; - pi->enable_mg_clock_gating = true; - pi->enable_gfx_dynamic_mgpg = true; /* ??? */ - pi->override_dynamic_mgpg = true; + pi->enable_mg_clock_gating = false; + pi->enable_gfx_dynamic_mgpg = false; + pi->override_dynamic_mgpg = false; pi->enable_auto_thermal_throttling = true; pi->voltage_drop_in_dce = false; /* need to restructure dpm/modeset interaction */ pi->uvd_dpm = true; /* ??? */ diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c index 7266805d9786..d4a68af1a279 100644 --- a/drivers/gpu/drm/radeon/uvd_v1_0.c +++ b/drivers/gpu/drm/radeon/uvd_v1_0.c @@ -357,7 +357,7 @@ int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) * * Emit a semaphore command (either wait or signal) to the UVD ring. */ -void uvd_v1_0_semaphore_emit(struct radeon_device *rdev, +bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev, struct radeon_ring *ring, struct radeon_semaphore *semaphore, bool emit_wait) @@ -372,6 +372,8 @@ void uvd_v1_0_semaphore_emit(struct radeon_device *rdev, radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0)); radeon_ring_write(ring, emit_wait ? 1 : 0); + + return true; } /** diff --git a/drivers/gpu/drm/radeon/uvd_v3_1.c b/drivers/gpu/drm/radeon/uvd_v3_1.c index 5b6fa1f62d4e..d722db2cf340 100644 --- a/drivers/gpu/drm/radeon/uvd_v3_1.c +++ b/drivers/gpu/drm/radeon/uvd_v3_1.c @@ -37,7 +37,7 @@ * * Emit a semaphore command (either wait or signal) to the UVD ring. */ -void uvd_v3_1_semaphore_emit(struct radeon_device *rdev, +bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev, struct radeon_ring *ring, struct radeon_semaphore *semaphore, bool emit_wait) @@ -52,4 +52,6 @@ void uvd_v3_1_semaphore_emit(struct radeon_device *rdev, radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0)); radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0)); + + return true; } diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 28e178137718..07eba596d458 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -135,11 +135,11 @@ int tegra_drm_submit(struct tegra_drm_context *context, unsigned int num_relocs = args->num_relocs; unsigned int num_waitchks = args->num_waitchks; struct drm_tegra_cmdbuf __user *cmdbufs = - (void * __user)(uintptr_t)args->cmdbufs; + (void __user *)(uintptr_t)args->cmdbufs; struct drm_tegra_reloc __user *relocs = - (void * __user)(uintptr_t)args->relocs; + (void __user *)(uintptr_t)args->relocs; struct drm_tegra_waitchk __user *waitchks = - (void * __user)(uintptr_t)args->waitchks; + (void __user *)(uintptr_t)args->waitchks; struct drm_tegra_syncpt syncpt; struct host1x_job *job; int err; @@ -163,9 +163,10 @@ int tegra_drm_submit(struct tegra_drm_context *context, struct drm_tegra_cmdbuf cmdbuf; struct host1x_bo *bo; - err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf)); - if (err) + if (copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf))) { + err = -EFAULT; goto fail; + } bo = host1x_bo_lookup(drm, file, cmdbuf.handle); if (!bo) { @@ -178,10 +179,11 @@ int tegra_drm_submit(struct tegra_drm_context *context, cmdbufs++; } - err = copy_from_user(job->relocarray, relocs, - sizeof(*relocs) * num_relocs); - if (err) + if (copy_from_user(job->relocarray, relocs, + sizeof(*relocs) * num_relocs)) { + err = -EFAULT; goto fail; + } while (num_relocs--) { struct host1x_reloc *reloc = &job->relocarray[num_relocs]; @@ -199,15 +201,17 @@ int tegra_drm_submit(struct tegra_drm_context *context, } } - err = copy_from_user(job->waitchk, waitchks, - sizeof(*waitchks) * num_waitchks); - if (err) + if (copy_from_user(job->waitchk, waitchks, + sizeof(*waitchks) * num_waitchks)) { + err = -EFAULT; goto fail; + } - err = copy_from_user(&syncpt, (void * __user)(uintptr_t)args->syncpts, - sizeof(syncpt)); - if (err) + if (copy_from_user(&syncpt, (void __user *)(uintptr_t)args->syncpts, + sizeof(syncpt))) { + err = -EFAULT; goto fail; + } job->is_addr_reg = context->client->ops->is_addr_reg; job->syncpt_incrs = syncpt.incrs; @@ -573,7 +577,7 @@ static void tegra_debugfs_cleanup(struct drm_minor *minor) } #endif -struct drm_driver tegra_drm_driver = { +static struct drm_driver tegra_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM, .load = tegra_drm_load, .unload = tegra_drm_unload, diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h index fdfe259ed7f8..7da0b923131f 100644 --- a/drivers/gpu/drm/tegra/drm.h +++ b/drivers/gpu/drm/tegra/drm.h @@ -116,7 +116,7 @@ host1x_client_to_dc(struct host1x_client *client) static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc) { - return container_of(crtc, struct tegra_dc, base); + return crtc ? container_of(crtc, struct tegra_dc, base) : NULL; } static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long value, diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c index 490f7719e317..a3835e7de184 100644 --- a/drivers/gpu/drm/tegra/fb.c +++ b/drivers/gpu/drm/tegra/fb.c @@ -247,7 +247,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper, info->var.yoffset * fb->pitches[0]; drm->mode_config.fb_base = (resource_size_t)bo->paddr; - info->screen_base = bo->vaddr + offset; + info->screen_base = (void __iomem *)bo->vaddr + offset; info->screen_size = size; info->fix.smem_start = (unsigned long)(bo->paddr + offset); info->fix.smem_len = size; diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c index ba47ca4fb880..3b29018913a5 100644 --- a/drivers/gpu/drm/tegra/rgb.c +++ b/drivers/gpu/drm/tegra/rgb.c @@ -14,6 +14,8 @@ struct tegra_rgb { struct tegra_output output; + struct tegra_dc *dc; + struct clk *clk_parent; struct clk *clk; }; @@ -84,18 +86,18 @@ static void tegra_dc_write_regs(struct tegra_dc *dc, static int tegra_output_rgb_enable(struct tegra_output *output) { - struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); + struct tegra_rgb *rgb = to_rgb(output); - tegra_dc_write_regs(dc, rgb_enable, ARRAY_SIZE(rgb_enable)); + tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable)); return 0; } static int tegra_output_rgb_disable(struct tegra_output *output) { - struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); + struct tegra_rgb *rgb = to_rgb(output); - tegra_dc_write_regs(dc, rgb_disable, ARRAY_SIZE(rgb_disable)); + tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable)); return 0; } @@ -146,6 +148,7 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc) rgb->output.dev = dc->dev; rgb->output.of_node = np; + rgb->dc = dc; err = tegra_output_probe(&rgb->output); if (err < 0) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 8d5a646ebe6a..07e02c4bf5a8 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -151,7 +151,7 @@ static void ttm_bo_release_list(struct kref *list_kref) atomic_dec(&bo->glob->bo_count); if (bo->resv == &bo->ttm_resv) reservation_object_fini(&bo->ttm_resv); - + mutex_destroy(&bo->wu_mutex); if (bo->destroy) bo->destroy(bo); else { @@ -1123,6 +1123,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, INIT_LIST_HEAD(&bo->ddestroy); INIT_LIST_HEAD(&bo->swap); INIT_LIST_HEAD(&bo->io_reserve_lru); + mutex_init(&bo->wu_mutex); bo->bdev = bdev; bo->glob = bdev->glob; bo->type = type; @@ -1704,3 +1705,35 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev) ; } EXPORT_SYMBOL(ttm_bo_swapout_all); + +/** + * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become + * unreserved + * + * @bo: Pointer to buffer + */ +int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo) +{ + int ret; + + /* + * In the absense of a wait_unlocked API, + * Use the bo::wu_mutex to avoid triggering livelocks due to + * concurrent use of this function. Note that this use of + * bo::wu_mutex can go away if we change locking order to + * mmap_sem -> bo::reserve. + */ + ret = mutex_lock_interruptible(&bo->wu_mutex); + if (unlikely(ret != 0)) + return -ERESTARTSYS; + if (!ww_mutex_is_locked(&bo->resv->lock)) + goto out_unlock; + ret = ttm_bo_reserve_nolru(bo, true, false, false, NULL); + if (unlikely(ret != 0)) + goto out_unlock; + ww_mutex_unlock(&bo->resv->lock); + +out_unlock: + mutex_unlock(&bo->wu_mutex); + return ret; +} diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 4834c463c38b..15b86a94949d 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -350,10 +350,13 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, goto out2; /* - * Move nonexistent data. NOP. + * Don't move nonexistent data. Clear destination instead. */ - if (old_iomap == NULL && ttm == NULL) + if (old_iomap == NULL && + (ttm == NULL || ttm->state == tt_unpopulated)) { + memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); goto out2; + } /* * TTM might be null for moves within the same region. diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index ac617f3ecd0c..b249ab9b1eb2 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -107,13 +107,28 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) /* * Work around locking order reversal in fault / nopfn * between mmap_sem and bo_reserve: Perform a trylock operation - * for reserve, and if it fails, retry the fault after scheduling. + * for reserve, and if it fails, retry the fault after waiting + * for the buffer to become unreserved. */ - - ret = ttm_bo_reserve(bo, true, true, false, 0); + ret = ttm_bo_reserve(bo, true, true, false, NULL); if (unlikely(ret != 0)) { - if (ret == -EBUSY) - set_need_resched(); + if (ret != -EBUSY) + return VM_FAULT_NOPAGE; + + if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { + if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { + up_read(&vma->vm_mm->mmap_sem); + (void) ttm_bo_wait_unreserved(bo); + } + + return VM_FAULT_RETRY; + } + + /* + * If we'd want to change locking order to + * mmap_sem -> bo::reserve, we'd use a blocking reserve here + * instead of retrying the fault... + */ return VM_FAULT_NOPAGE; } @@ -123,7 +138,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) case 0: break; case -EBUSY: - set_need_resched(); case -ERESTARTSYS: retval = VM_FAULT_NOPAGE; goto out_unlock; diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index 6c911789ae5c..479e9418e3d7 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -32,8 +32,7 @@ #include <linux/sched.h> #include <linux/module.h> -static void ttm_eu_backoff_reservation_locked(struct list_head *list, - struct ww_acquire_ctx *ticket) +static void ttm_eu_backoff_reservation_locked(struct list_head *list) { struct ttm_validate_buffer *entry; @@ -93,8 +92,9 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, entry = list_first_entry(list, struct ttm_validate_buffer, head); glob = entry->bo->glob; spin_lock(&glob->lru_lock); - ttm_eu_backoff_reservation_locked(list, ticket); - ww_acquire_fini(ticket); + ttm_eu_backoff_reservation_locked(list); + if (ticket) + ww_acquire_fini(ticket); spin_unlock(&glob->lru_lock); } EXPORT_SYMBOL(ttm_eu_backoff_reservation); @@ -130,7 +130,8 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, entry = list_first_entry(list, struct ttm_validate_buffer, head); glob = entry->bo->glob; - ww_acquire_init(ticket, &reservation_ww_class); + if (ticket) + ww_acquire_init(ticket, &reservation_ww_class); retry: list_for_each_entry(entry, list, head) { struct ttm_buffer_object *bo = entry->bo; @@ -139,16 +140,17 @@ retry: if (entry->reserved) continue; - - ret = ttm_bo_reserve_nolru(bo, true, false, true, ticket); + ret = ttm_bo_reserve_nolru(bo, true, (ticket == NULL), true, + ticket); if (ret == -EDEADLK) { /* uh oh, we lost out, drop every reservation and try * to only reserve this buffer, then start over if * this succeeds. */ + BUG_ON(ticket == NULL); spin_lock(&glob->lru_lock); - ttm_eu_backoff_reservation_locked(list, ticket); + ttm_eu_backoff_reservation_locked(list); spin_unlock(&glob->lru_lock); ttm_eu_list_ref_sub(list); ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, @@ -175,7 +177,8 @@ retry: } } - ww_acquire_done(ticket); + if (ticket) + ww_acquire_done(ticket); spin_lock(&glob->lru_lock); ttm_eu_del_from_lru_locked(list); spin_unlock(&glob->lru_lock); @@ -184,12 +187,14 @@ retry: err: spin_lock(&glob->lru_lock); - ttm_eu_backoff_reservation_locked(list, ticket); + ttm_eu_backoff_reservation_locked(list); spin_unlock(&glob->lru_lock); ttm_eu_list_ref_sub(list); err_fini: - ww_acquire_done(ticket); - ww_acquire_fini(ticket); + if (ticket) { + ww_acquire_done(ticket); + ww_acquire_fini(ticket); + } return ret; } EXPORT_SYMBOL(ttm_eu_reserve_buffers); @@ -224,7 +229,8 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, } spin_unlock(&bdev->fence_lock); spin_unlock(&glob->lru_lock); - ww_acquire_fini(ticket); + if (ticket) + ww_acquire_fini(ticket); list_for_each_entry(entry, list, head) { if (entry->old_sync_obj) diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c index a868176c258a..6fe7b92a82d1 100644 --- a/drivers/gpu/drm/ttm/ttm_object.c +++ b/drivers/gpu/drm/ttm/ttm_object.c @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA + * Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -26,6 +26,12 @@ **************************************************************************/ /* * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> + * + * While no substantial code is shared, the prime code is inspired by + * drm_prime.c, with + * Authors: + * Dave Airlie <airlied@redhat.com> + * Rob Clark <rob.clark@linaro.org> */ /** @file ttm_ref_object.c * @@ -34,6 +40,7 @@ * and release on file close. */ + /** * struct ttm_object_file * @@ -84,6 +91,9 @@ struct ttm_object_device { struct drm_open_hash object_hash; atomic_t object_count; struct ttm_mem_global *mem_glob; + struct dma_buf_ops ops; + void (*dmabuf_release)(struct dma_buf *dma_buf); + size_t dma_buf_size; }; /** @@ -116,6 +126,8 @@ struct ttm_ref_object { struct ttm_object_file *tfile; }; +static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf); + static inline struct ttm_object_file * ttm_object_file_ref(struct ttm_object_file *tfile) { @@ -416,9 +428,10 @@ out_err: } EXPORT_SYMBOL(ttm_object_file_init); -struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global - *mem_glob, - unsigned int hash_order) +struct ttm_object_device * +ttm_object_device_init(struct ttm_mem_global *mem_glob, + unsigned int hash_order, + const struct dma_buf_ops *ops) { struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL); int ret; @@ -430,10 +443,17 @@ struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global spin_lock_init(&tdev->object_lock); atomic_set(&tdev->object_count, 0); ret = drm_ht_create(&tdev->object_hash, hash_order); + if (ret != 0) + goto out_no_object_hash; - if (likely(ret == 0)) - return tdev; + tdev->ops = *ops; + tdev->dmabuf_release = tdev->ops.release; + tdev->ops.release = ttm_prime_dmabuf_release; + tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) + + ttm_round_pot(sizeof(struct file)); + return tdev; +out_no_object_hash: kfree(tdev); return NULL; } @@ -452,3 +472,225 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev) kfree(tdev); } EXPORT_SYMBOL(ttm_object_device_release); + +/** + * get_dma_buf_unless_doomed - get a dma_buf reference if possible. + * + * @dma_buf: Non-refcounted pointer to a struct dma-buf. + * + * Obtain a file reference from a lookup structure that doesn't refcount + * the file, but synchronizes with its release method to make sure it has + * not been freed yet. See for example kref_get_unless_zero documentation. + * Returns true if refcounting succeeds, false otherwise. + * + * Nobody really wants this as a public API yet, so let it mature here + * for some time... + */ +static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf) +{ + return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L; +} + +/** + * ttm_prime_refcount_release - refcount release method for a prime object. + * + * @p_base: Pointer to ttm_base_object pointer. + * + * This is a wrapper that calls the refcount_release founction of the + * underlying object. At the same time it cleans up the prime object. + * This function is called when all references to the base object we + * derive from are gone. + */ +static void ttm_prime_refcount_release(struct ttm_base_object **p_base) +{ + struct ttm_base_object *base = *p_base; + struct ttm_prime_object *prime; + + *p_base = NULL; + prime = container_of(base, struct ttm_prime_object, base); + BUG_ON(prime->dma_buf != NULL); + mutex_destroy(&prime->mutex); + if (prime->refcount_release) + prime->refcount_release(&base); +} + +/** + * ttm_prime_dmabuf_release - Release method for the dma-bufs we export + * + * @dma_buf: + * + * This function first calls the dma_buf release method the driver + * provides. Then it cleans up our dma_buf pointer used for lookup, + * and finally releases the reference the dma_buf has on our base + * object. + */ +static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf) +{ + struct ttm_prime_object *prime = + (struct ttm_prime_object *) dma_buf->priv; + struct ttm_base_object *base = &prime->base; + struct ttm_object_device *tdev = base->tfile->tdev; + + if (tdev->dmabuf_release) + tdev->dmabuf_release(dma_buf); + mutex_lock(&prime->mutex); + if (prime->dma_buf == dma_buf) + prime->dma_buf = NULL; + mutex_unlock(&prime->mutex); + ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size); + ttm_base_object_unref(&base); +} + +/** + * ttm_prime_fd_to_handle - Get a base object handle from a prime fd + * + * @tfile: A struct ttm_object_file identifying the caller. + * @fd: The prime / dmabuf fd. + * @handle: The returned handle. + * + * This function returns a handle to an object that previously exported + * a dma-buf. Note that we don't handle imports yet, because we simply + * have no consumers of that implementation. + */ +int ttm_prime_fd_to_handle(struct ttm_object_file *tfile, + int fd, u32 *handle) +{ + struct ttm_object_device *tdev = tfile->tdev; + struct dma_buf *dma_buf; + struct ttm_prime_object *prime; + struct ttm_base_object *base; + int ret; + + dma_buf = dma_buf_get(fd); + if (IS_ERR(dma_buf)) + return PTR_ERR(dma_buf); + + if (dma_buf->ops != &tdev->ops) + return -ENOSYS; + + prime = (struct ttm_prime_object *) dma_buf->priv; + base = &prime->base; + *handle = base->hash.key; + ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); + + dma_buf_put(dma_buf); + + return ret; +} +EXPORT_SYMBOL_GPL(ttm_prime_fd_to_handle); + +/** + * ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object + * + * @tfile: Struct ttm_object_file identifying the caller. + * @handle: Handle to the object we're exporting from. + * @flags: flags for dma-buf creation. We just pass them on. + * @prime_fd: The returned file descriptor. + * + */ +int ttm_prime_handle_to_fd(struct ttm_object_file *tfile, + uint32_t handle, uint32_t flags, + int *prime_fd) +{ + struct ttm_object_device *tdev = tfile->tdev; + struct ttm_base_object *base; + struct dma_buf *dma_buf; + struct ttm_prime_object *prime; + int ret; + + base = ttm_base_object_lookup(tfile, handle); + if (unlikely(base == NULL || + base->object_type != ttm_prime_type)) { + ret = -ENOENT; + goto out_unref; + } + + prime = container_of(base, struct ttm_prime_object, base); + if (unlikely(!base->shareable)) { + ret = -EPERM; + goto out_unref; + } + + ret = mutex_lock_interruptible(&prime->mutex); + if (unlikely(ret != 0)) { + ret = -ERESTARTSYS; + goto out_unref; + } + + dma_buf = prime->dma_buf; + if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) { + + /* + * Need to create a new dma_buf, with memory accounting. + */ + ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size, + false, true); + if (unlikely(ret != 0)) { + mutex_unlock(&prime->mutex); + goto out_unref; + } + + dma_buf = dma_buf_export(prime, &tdev->ops, + prime->size, flags); + if (IS_ERR(dma_buf)) { + ret = PTR_ERR(dma_buf); + ttm_mem_global_free(tdev->mem_glob, + tdev->dma_buf_size); + mutex_unlock(&prime->mutex); + goto out_unref; + } + + /* + * dma_buf has taken the base object reference + */ + base = NULL; + prime->dma_buf = dma_buf; + } + mutex_unlock(&prime->mutex); + + ret = dma_buf_fd(dma_buf, flags); + if (ret >= 0) { + *prime_fd = ret; + ret = 0; + } else + dma_buf_put(dma_buf); + +out_unref: + if (base) + ttm_base_object_unref(&base); + return ret; +} +EXPORT_SYMBOL_GPL(ttm_prime_handle_to_fd); + +/** + * ttm_prime_object_init - Initialize a ttm_prime_object + * + * @tfile: struct ttm_object_file identifying the caller + * @size: The size of the dma_bufs we export. + * @prime: The object to be initialized. + * @shareable: See ttm_base_object_init + * @type: See ttm_base_object_init + * @refcount_release: See ttm_base_object_init + * @ref_obj_release: See ttm_base_object_init + * + * Initializes an object which is compatible with the drm_prime model + * for data sharing between processes and devices. + */ +int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size, + struct ttm_prime_object *prime, bool shareable, + enum ttm_object_type type, + void (*refcount_release) (struct ttm_base_object **), + void (*ref_obj_release) (struct ttm_base_object *, + enum ttm_ref_type ref_type)) +{ + mutex_init(&prime->mutex); + prime->size = PAGE_ALIGN(size); + prime->real_type = type; + prime->dma_buf = NULL; + prime->refcount_release = refcount_release; + return ttm_base_object_init(tfile, &prime->base, shareable, + ttm_prime_type, + ttm_prime_refcount_release, + ref_obj_release); +} +EXPORT_SYMBOL(ttm_prime_object_init); diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index 24ffbe990736..8d67b943ac05 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c @@ -125,6 +125,12 @@ static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask) static void udl_gem_put_pages(struct udl_gem_object *obj) { + if (obj->base.import_attach) { + drm_free_large(obj->pages); + obj->pages = NULL; + return; + } + drm_gem_put_pages(&obj->base, obj->pages, false, false); obj->pages = NULL; } diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index 2cc6cd91ac11..9f8b690bcf52 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile @@ -6,6 +6,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \ - vmwgfx_surface.o + vmwgfx_surface.o vmwgfx_prime.o obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 7776e6f0aef6..0489c6152482 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c @@ -150,6 +150,8 @@ struct vmw_ttm_tt { bool mapped; }; +const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt); + /** * Helper functions to advance a struct vmw_piter iterator. * diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 20d5485eaf98..c7a549694e59 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -677,7 +677,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) } dev_priv->tdev = ttm_object_device_init - (dev_priv->mem_global_ref.object, 12); + (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops); if (unlikely(dev_priv->tdev == NULL)) { DRM_ERROR("Unable to initialize TTM object management.\n"); @@ -1210,7 +1210,7 @@ static const struct file_operations vmwgfx_driver_fops = { static struct drm_driver driver = { .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | - DRIVER_MODESET, + DRIVER_MODESET | DRIVER_PRIME, .load = vmw_driver_load, .unload = vmw_driver_unload, .lastclose = vmw_lastclose, @@ -1235,6 +1235,9 @@ static struct drm_driver driver = { .dumb_map_offset = vmw_dumb_map_offset, .dumb_destroy = vmw_dumb_destroy, + .prime_fd_to_handle = vmw_prime_fd_to_handle, + .prime_handle_to_fd = vmw_prime_handle_to_fd, + .fops = &vmwgfx_driver_fops, .name = VMWGFX_DRIVER_NAME, .desc = VMWGFX_DRIVER_DESC, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index e401d5dbcb96..20890ad8408b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -615,6 +615,7 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); * TTM buffer object driver - vmwgfx_buffer.c */ +extern const size_t vmw_tt_size; extern struct ttm_placement vmw_vram_placement; extern struct ttm_placement vmw_vram_ne_placement; extern struct ttm_placement vmw_vram_sys_placement; @@ -819,6 +820,20 @@ int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv); extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func; /** + * Prime - vmwgfx_prime.c + */ + +extern const struct dma_buf_ops vmw_prime_dmabuf_ops; +extern int vmw_prime_fd_to_handle(struct drm_device *dev, + struct drm_file *file_priv, + int fd, u32 *handle); +extern int vmw_prime_handle_to_fd(struct drm_device *dev, + struct drm_file *file_priv, + uint32_t handle, uint32_t flags, + int *prime_fd); + + +/** * Inline helper functions */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index ecb3d867b426..03f1c2038631 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -75,6 +75,7 @@ void vmw_display_unit_cleanup(struct vmw_display_unit *du) vmw_surface_unreference(&du->cursor_surface); if (du->cursor_dmabuf) vmw_dmabuf_unreference(&du->cursor_dmabuf); + drm_sysfs_connector_remove(&du->connector); drm_crtc_cleanup(&du->crtc); drm_encoder_cleanup(&du->encoder); drm_connector_cleanup(&du->connector); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 79f7e8e60529..a055a26819c2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -260,6 +260,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set) connector->encoder = NULL; encoder->crtc = NULL; crtc->fb = NULL; + crtc->enabled = false; vmw_ldu_del_active(dev_priv, ldu); @@ -285,6 +286,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set) crtc->x = set->x; crtc->y = set->y; crtc->mode = *mode; + crtc->enabled = true; vmw_ldu_add_active(dev_priv, ldu, vfb); @@ -369,6 +371,8 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) encoder->possible_crtcs = (1 << unit); encoder->possible_clones = 0; + (void) drm_sysfs_connector_add(connector); + drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs); drm_mode_crtc_set_gamma_size(crtc, 256); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c new file mode 100644 index 000000000000..31fe32d8d65a --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c @@ -0,0 +1,137 @@ +/************************************************************************** + * + * Copyright © 2013 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: + * Thomas Hellstrom <thellstrom@vmware.com> + * + */ + +#include "vmwgfx_drv.h" +#include <linux/dma-buf.h> +#include <drm/ttm/ttm_object.h> + +/* + * DMA-BUF attach- and mapping methods. No need to implement + * these until we have other virtual devices use them. + */ + +static int vmw_prime_map_attach(struct dma_buf *dma_buf, + struct device *target_dev, + struct dma_buf_attachment *attach) +{ + return -ENOSYS; +} + +static void vmw_prime_map_detach(struct dma_buf *dma_buf, + struct dma_buf_attachment *attach) +{ +} + +static struct sg_table *vmw_prime_map_dma_buf(struct dma_buf_attachment *attach, + enum dma_data_direction dir) +{ + return ERR_PTR(-ENOSYS); +} + +static void vmw_prime_unmap_dma_buf(struct dma_buf_attachment *attach, + struct sg_table *sgb, + enum dma_data_direction dir) +{ +} + +static void *vmw_prime_dmabuf_vmap(struct dma_buf *dma_buf) +{ + return NULL; +} + +static void vmw_prime_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) +{ +} + +static void *vmw_prime_dmabuf_kmap_atomic(struct dma_buf *dma_buf, + unsigned long page_num) +{ + return NULL; +} + +static void vmw_prime_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, + unsigned long page_num, void *addr) +{ + +} +static void *vmw_prime_dmabuf_kmap(struct dma_buf *dma_buf, + unsigned long page_num) +{ + return NULL; +} + +static void vmw_prime_dmabuf_kunmap(struct dma_buf *dma_buf, + unsigned long page_num, void *addr) +{ + +} + +static int vmw_prime_dmabuf_mmap(struct dma_buf *dma_buf, + struct vm_area_struct *vma) +{ + WARN_ONCE(true, "Attempted use of dmabuf mmap. Bad.\n"); + return -ENOSYS; +} + +const struct dma_buf_ops vmw_prime_dmabuf_ops = { + .attach = vmw_prime_map_attach, + .detach = vmw_prime_map_detach, + .map_dma_buf = vmw_prime_map_dma_buf, + .unmap_dma_buf = vmw_prime_unmap_dma_buf, + .release = NULL, + .kmap = vmw_prime_dmabuf_kmap, + .kmap_atomic = vmw_prime_dmabuf_kmap_atomic, + .kunmap = vmw_prime_dmabuf_kunmap, + .kunmap_atomic = vmw_prime_dmabuf_kunmap_atomic, + .mmap = vmw_prime_dmabuf_mmap, + .vmap = vmw_prime_dmabuf_vmap, + .vunmap = vmw_prime_dmabuf_vunmap, +}; + +int vmw_prime_fd_to_handle(struct drm_device *dev, + struct drm_file *file_priv, + int fd, u32 *handle) +{ + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + + return ttm_prime_fd_to_handle(tfile, fd, handle); +} + +int vmw_prime_handle_to_fd(struct drm_device *dev, + struct drm_file *file_priv, + uint32_t handle, uint32_t flags, + int *prime_fd) +{ + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + + return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd); +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 252501a54def..9b5ea2ac7ddf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -35,7 +35,7 @@ #define VMW_RES_EVICT_ERR_COUNT 10 struct vmw_user_dma_buffer { - struct ttm_base_object base; + struct ttm_prime_object prime; struct vmw_dma_buffer dma; }; @@ -297,7 +297,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv, if (unlikely(base == NULL)) return -EINVAL; - if (unlikely(base->object_type != converter->object_type)) + if (unlikely(ttm_base_object_type(base) != converter->object_type)) goto out_bad_resource; res = converter->base_obj_to_res(base); @@ -352,6 +352,38 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv, /** * Buffer management. */ + +/** + * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers + * + * @dev_priv: Pointer to a struct vmw_private identifying the device. + * @size: The requested buffer size. + * @user: Whether this is an ordinary dma buffer or a user dma buffer. + */ +static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size, + bool user) +{ + static size_t struct_size, user_struct_size; + size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; + size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *)); + + if (unlikely(struct_size == 0)) { + size_t backend_size = ttm_round_pot(vmw_tt_size); + + struct_size = backend_size + + ttm_round_pot(sizeof(struct vmw_dma_buffer)); + user_struct_size = backend_size + + ttm_round_pot(sizeof(struct vmw_user_dma_buffer)); + } + + if (dev_priv->map_mode == vmw_dma_alloc_coherent) + page_array_size += + ttm_round_pot(num_pages * sizeof(dma_addr_t)); + + return ((user) ? user_struct_size : struct_size) + + page_array_size; +} + void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) { struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); @@ -359,6 +391,13 @@ void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) kfree(vmw_bo); } +static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) +{ + struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); + + ttm_prime_object_kfree(vmw_user_bo, prime); +} + int vmw_dmabuf_init(struct vmw_private *dev_priv, struct vmw_dma_buffer *vmw_bo, size_t size, struct ttm_placement *placement, @@ -368,28 +407,23 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv, struct ttm_bo_device *bdev = &dev_priv->bdev; size_t acc_size; int ret; + bool user = (bo_free == &vmw_user_dmabuf_destroy); - BUG_ON(!bo_free); + BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free))); - acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer)); + acc_size = vmw_dmabuf_acc_size(dev_priv, size, user); memset(vmw_bo, 0, sizeof(*vmw_bo)); INIT_LIST_HEAD(&vmw_bo->res_list); ret = ttm_bo_init(bdev, &vmw_bo->base, size, - ttm_bo_type_device, placement, + (user) ? ttm_bo_type_device : + ttm_bo_type_kernel, placement, 0, interruptible, NULL, acc_size, NULL, bo_free); return ret; } -static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) -{ - struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); - - ttm_base_object_kfree(vmw_user_bo, base); -} - static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) { struct vmw_user_dma_buffer *vmw_user_bo; @@ -401,7 +435,8 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) if (unlikely(base == NULL)) return; - vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base); + vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, + prime.base); bo = &vmw_user_bo->dma.base; ttm_bo_unref(&bo); } @@ -442,18 +477,19 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, return ret; tmp = ttm_bo_reference(&user_bo->dma.base); - ret = ttm_base_object_init(tfile, - &user_bo->base, - shareable, - ttm_buffer_type, - &vmw_user_dmabuf_release, NULL); + ret = ttm_prime_object_init(tfile, + size, + &user_bo->prime, + shareable, + ttm_buffer_type, + &vmw_user_dmabuf_release, NULL); if (unlikely(ret != 0)) { ttm_bo_unref(&tmp); goto out_no_base_object; } *p_dma_buf = &user_bo->dma; - *handle = user_bo->base.hash.key; + *handle = user_bo->prime.base.hash.key; out_no_base_object: return ret; @@ -475,8 +511,8 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, return -EPERM; vmw_user_bo = vmw_user_dma_buffer(bo); - return (vmw_user_bo->base.tfile == tfile || - vmw_user_bo->base.shareable) ? 0 : -EPERM; + return (vmw_user_bo->prime.base.tfile == tfile || + vmw_user_bo->prime.base.shareable) ? 0 : -EPERM; } int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, @@ -538,14 +574,15 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, return -ESRCH; } - if (unlikely(base->object_type != ttm_buffer_type)) { + if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) { ttm_base_object_unref(&base); printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n", (unsigned long)handle); return -EINVAL; } - vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base); + vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, + prime.base); (void)ttm_bo_reference(&vmw_user_bo->dma.base); ttm_base_object_unref(&base); *out = &vmw_user_bo->dma; @@ -562,7 +599,8 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, return -EINVAL; user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma); - return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL); + return ttm_ref_object_add(tfile, &user_bo->prime.base, + TTM_REF_USAGE, NULL); } /* @@ -777,53 +815,55 @@ err_ref: } +/** + * vmw_dumb_create - Create a dumb kms buffer + * + * @file_priv: Pointer to a struct drm_file identifying the caller. + * @dev: Pointer to the drm device. + * @args: Pointer to a struct drm_mode_create_dumb structure + * + * This is a driver callback for the core drm create_dumb functionality. + * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except + * that the arguments have a different format. + */ int vmw_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args) { struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_master *vmaster = vmw_master(file_priv->master); - struct vmw_user_dma_buffer *vmw_user_bo; - struct ttm_buffer_object *tmp; + struct vmw_dma_buffer *dma_buf; int ret; args->pitch = args->width * ((args->bpp + 7) / 8); args->size = args->pitch * args->height; - vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL); - if (vmw_user_bo == NULL) - return -ENOMEM; - ret = ttm_read_lock(&vmaster->lock, true); - if (ret != 0) { - kfree(vmw_user_bo); + if (unlikely(ret != 0)) return ret; - } - ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size, - &vmw_vram_sys_placement, true, - &vmw_user_dmabuf_destroy); - if (ret != 0) - goto out_no_dmabuf; - - tmp = ttm_bo_reference(&vmw_user_bo->dma.base); - ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile, - &vmw_user_bo->base, - false, - ttm_buffer_type, - &vmw_user_dmabuf_release, NULL); + ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, + args->size, false, &args->handle, + &dma_buf); if (unlikely(ret != 0)) - goto out_no_base_object; - - args->handle = vmw_user_bo->base.hash.key; + goto out_no_dmabuf; -out_no_base_object: - ttm_bo_unref(&tmp); + vmw_dmabuf_unreference(&dma_buf); out_no_dmabuf: ttm_read_unlock(&vmaster->lock); return ret; } +/** + * vmw_dumb_map_offset - Return the address space offset of a dumb buffer + * + * @file_priv: Pointer to a struct drm_file identifying the caller. + * @dev: Pointer to the drm device. + * @handle: Handle identifying the dumb buffer. + * @offset: The address space offset returned. + * + * This is a driver callback for the core drm dumb_map_offset functionality. + */ int vmw_dumb_map_offset(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset) @@ -841,6 +881,15 @@ int vmw_dumb_map_offset(struct drm_file *file_priv, return 0; } +/** + * vmw_dumb_destroy - Destroy a dumb boffer + * + * @file_priv: Pointer to a struct drm_file identifying the caller. + * @dev: Pointer to the drm device. + * @handle: Handle identifying the dumb buffer. + * + * This is a driver callback for the core drm dumb_destroy functionality. + */ int vmw_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle) @@ -994,7 +1043,6 @@ void vmw_resource_unreserve(struct vmw_resource *res, */ static int vmw_resource_check_buffer(struct vmw_resource *res, - struct ww_acquire_ctx *ticket, bool interruptible, struct ttm_validate_buffer *val_buf) { @@ -1011,7 +1059,7 @@ vmw_resource_check_buffer(struct vmw_resource *res, INIT_LIST_HEAD(&val_list); val_buf->bo = ttm_bo_reference(&res->backup->base); list_add_tail(&val_buf->head, &val_list); - ret = ttm_eu_reserve_buffers(ticket, &val_list); + ret = ttm_eu_reserve_buffers(NULL, &val_list); if (unlikely(ret != 0)) goto out_no_reserve; @@ -1029,7 +1077,7 @@ vmw_resource_check_buffer(struct vmw_resource *res, return 0; out_no_validate: - ttm_eu_backoff_reservation(ticket, &val_list); + ttm_eu_backoff_reservation(NULL, &val_list); out_no_reserve: ttm_bo_unref(&val_buf->bo); if (backup_dirty) @@ -1074,8 +1122,7 @@ int vmw_resource_reserve(struct vmw_resource *res, bool no_backup) * @val_buf: Backup buffer information. */ static void -vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket, - struct ttm_validate_buffer *val_buf) +vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf) { struct list_head val_list; @@ -1084,7 +1131,7 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket, INIT_LIST_HEAD(&val_list); list_add_tail(&val_buf->head, &val_list); - ttm_eu_backoff_reservation(ticket, &val_list); + ttm_eu_backoff_reservation(NULL, &val_list); ttm_bo_unref(&val_buf->bo); } @@ -1099,14 +1146,12 @@ int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible) { struct ttm_validate_buffer val_buf; const struct vmw_res_func *func = res->func; - struct ww_acquire_ctx ticket; int ret; BUG_ON(!func->may_evict); val_buf.bo = NULL; - ret = vmw_resource_check_buffer(res, &ticket, interruptible, - &val_buf); + ret = vmw_resource_check_buffer(res, interruptible, &val_buf); if (unlikely(ret != 0)) return ret; @@ -1121,7 +1166,7 @@ int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible) res->backup_dirty = true; res->res_dirty = false; out_no_unbind: - vmw_resource_backoff_reservation(&ticket, &val_buf); + vmw_resource_backoff_reservation(&val_buf); return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index 26387c3d5a21..22406c8651ea 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -310,6 +310,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set) crtc->fb = NULL; crtc->x = 0; crtc->y = 0; + crtc->enabled = false; vmw_sou_del_active(dev_priv, sou); @@ -370,6 +371,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set) crtc->fb = NULL; crtc->x = 0; crtc->y = 0; + crtc->enabled = false; return ret; } @@ -382,6 +384,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set) crtc->fb = fb; crtc->x = set->x; crtc->y = set->y; + crtc->enabled = true; return 0; } @@ -464,6 +467,8 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit) encoder->possible_crtcs = (1 << unit); encoder->possible_clones = 0; + (void) drm_sysfs_connector_add(connector); + drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs); drm_mode_crtc_set_gamma_size(crtc, 256); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 582814339748..7de2ea8bd553 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -38,7 +38,7 @@ * @size: TTM accounting size for the surface. */ struct vmw_user_surface { - struct ttm_base_object base; + struct ttm_prime_object prime; struct vmw_surface srf; uint32_t size; uint32_t backup_handle; @@ -580,7 +580,8 @@ static int vmw_surface_init(struct vmw_private *dev_priv, static struct vmw_resource * vmw_user_surface_base_to_res(struct ttm_base_object *base) { - return &(container_of(base, struct vmw_user_surface, base)->srf.res); + return &(container_of(base, struct vmw_user_surface, + prime.base)->srf.res); } /** @@ -599,7 +600,7 @@ static void vmw_user_surface_free(struct vmw_resource *res) kfree(srf->offsets); kfree(srf->sizes); kfree(srf->snooper.image); - ttm_base_object_kfree(user_srf, base); + ttm_prime_object_kfree(user_srf, prime); ttm_mem_global_free(vmw_mem_glob(dev_priv), size); } @@ -616,7 +617,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base) { struct ttm_base_object *base = *p_base; struct vmw_user_surface *user_srf = - container_of(base, struct vmw_user_surface, base); + container_of(base, struct vmw_user_surface, prime.base); struct vmw_resource *res = &user_srf->srf.res; *p_base = NULL; @@ -790,8 +791,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, } srf->snooper.crtc = NULL; - user_srf->base.shareable = false; - user_srf->base.tfile = NULL; + user_srf->prime.base.shareable = false; + user_srf->prime.base.tfile = NULL; /** * From this point, the generic resource management functions @@ -803,9 +804,9 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, goto out_unlock; tmp = vmw_resource_reference(&srf->res); - ret = ttm_base_object_init(tfile, &user_srf->base, - req->shareable, VMW_RES_SURFACE, - &vmw_user_surface_base_release, NULL); + ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, + req->shareable, VMW_RES_SURFACE, + &vmw_user_surface_base_release, NULL); if (unlikely(ret != 0)) { vmw_resource_unreference(&tmp); @@ -813,7 +814,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, goto out_unlock; } - rep->sid = user_srf->base.hash.key; + rep->sid = user_srf->prime.base.hash.key; vmw_resource_unreference(&res); ttm_read_unlock(&vmaster->lock); @@ -823,7 +824,7 @@ out_no_copy: out_no_offsets: kfree(srf->sizes); out_no_sizes: - ttm_base_object_kfree(user_srf, base); + ttm_prime_object_kfree(user_srf, prime); out_no_user_srf: ttm_mem_global_free(vmw_mem_glob(dev_priv), size); out_unlock: @@ -859,13 +860,14 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, return -EINVAL; } - if (unlikely(base->object_type != VMW_RES_SURFACE)) + if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) goto out_bad_resource; - user_srf = container_of(base, struct vmw_user_surface, base); + user_srf = container_of(base, struct vmw_user_surface, prime.base); srf = &user_srf->srf; - ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL); + ret = ttm_ref_object_add(tfile, &user_srf->prime.base, + TTM_REF_USAGE, NULL); if (unlikely(ret != 0)) { DRM_ERROR("Could not add a reference to a surface.\n"); goto out_no_reference; diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c index 509383f8be03..6a929591aa73 100644 --- a/drivers/gpu/host1x/bus.c +++ b/drivers/gpu/host1x/bus.c @@ -19,6 +19,7 @@ #include <linux/of.h> #include <linux/slab.h> +#include "bus.h" #include "dev.h" static DEFINE_MUTEX(clients_lock); @@ -257,7 +258,7 @@ static int host1x_unregister_client(struct host1x *host1x, return -ENODEV; } -struct bus_type host1x_bus_type = { +static struct bus_type host1x_bus_type = { .name = "host1x", }; @@ -301,7 +302,7 @@ static int host1x_device_add(struct host1x *host1x, device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask; device->dev.dma_mask = &device->dev.coherent_dma_mask; device->dev.release = host1x_device_release; - dev_set_name(&device->dev, driver->name); + dev_set_name(&device->dev, "%s", driver->name); device->dev.bus = &host1x_bus_type; device->dev.parent = host1x->dev; diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c index 37e2a63241a9..6b09b71940c2 100644 --- a/drivers/gpu/host1x/hw/cdma_hw.c +++ b/drivers/gpu/host1x/hw/cdma_hw.c @@ -54,8 +54,8 @@ static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr, u32 *p = (u32 *)((u32)pb->mapped + getptr); *(p++) = HOST1X_OPCODE_NOP; *(p++) = HOST1X_OPCODE_NOP; - dev_dbg(host1x->dev, "%s: NOP at 0x%x\n", __func__, - pb->phys + getptr); + dev_dbg(host1x->dev, "%s: NOP at %#llx\n", __func__, + (u64)pb->phys + getptr); getptr = (getptr + 8) & (pb->size_bytes - 1); } wmb(); diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c index 640c75ca5a8b..f72c873eff81 100644 --- a/drivers/gpu/host1x/hw/debug_hw.c +++ b/drivers/gpu/host1x/hw/debug_hw.c @@ -163,8 +163,8 @@ static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma) continue; } - host1x_debug_output(o, " GATHER at %08x+%04x, %d words\n", - g->base, g->offset, g->words); + host1x_debug_output(o, " GATHER at %#llx+%04x, %d words\n", + (u64)g->base, g->offset, g->words); show_gather(o, g->base + g->offset, g->words, cdma, g->base, mapped); diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 329fbb9b5976..34e2d39d4ce8 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -460,6 +460,7 @@ config HID_MULTITOUCH - Stantum multitouch panels - Touch International Panels - Unitec Panels + - Wistron optical touch panels - XAT optical touch panels - Xiroku optical touch panels - Zytronic touch panels diff --git a/drivers/hid/hid-appleir.c b/drivers/hid/hid-appleir.c index a42e6a394c5e..0e6a42d37eb6 100644 --- a/drivers/hid/hid-appleir.c +++ b/drivers/hid/hid-appleir.c @@ -297,6 +297,9 @@ static int appleir_probe(struct hid_device *hid, const struct hid_device_id *id) appleir->hid = hid; + /* force input as some remotes bypass the input registration */ + hid->quirks |= HID_QUIRK_HIDINPUT_FORCE; + spin_lock_init(&appleir->lock); setup_timer(&appleir->key_up_timer, key_up_tick, (unsigned long) appleir); diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 8c10f2742233..253fe23ef7fe 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1723,6 +1723,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) }, { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_MANTICORE) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) }, @@ -1879,7 +1880,6 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) }, - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO2, USB_DEVICE_ID_NINTENDO_WIIMOTE) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) }, { } }; diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 76559629568c..f9304cb37154 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -489,6 +489,7 @@ #define USB_VENDOR_ID_KYE 0x0458 #define USB_DEVICE_ID_KYE_ERGO_525V 0x0087 #define USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE 0x0138 +#define USB_DEVICE_ID_GENIUS_MANTICORE 0x0153 #define USB_DEVICE_ID_GENIUS_GX_IMPERATOR 0x4018 #define USB_DEVICE_ID_KYE_GPEN_560 0x5003 #define USB_DEVICE_ID_KYE_EASYPEN_I405X 0x5010 @@ -640,7 +641,6 @@ #define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003 #define USB_VENDOR_ID_NINTENDO 0x057e -#define USB_VENDOR_ID_NINTENDO2 0x054c #define USB_DEVICE_ID_NINTENDO_WIIMOTE 0x0306 #define USB_DEVICE_ID_NINTENDO_WIIMOTE2 0x0330 @@ -902,6 +902,9 @@ #define USB_DEVICE_ID_SUPER_DUAL_BOX_PRO 0x8802 #define USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO 0x8804 +#define USB_VENDOR_ID_WISTRON 0x0fb8 +#define USB_DEVICE_ID_WISTRON_OPTICAL_TOUCH 0x1109 + #define USB_VENDOR_ID_X_TENSIONS 0x1ae7 #define USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE 0x9001 diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c index 73845120295e..e77696367591 100644 --- a/drivers/hid/hid-kye.c +++ b/drivers/hid/hid-kye.c @@ -342,6 +342,10 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc, rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 83, "Genius Gx Imperator Keyboard"); break; + case USB_DEVICE_ID_GENIUS_MANTICORE: + rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 104, + "Genius Manticore Keyboard"); + break; } return rdesc; } @@ -418,6 +422,14 @@ static int kye_probe(struct hid_device *hdev, const struct hid_device_id *id) goto enabling_err; } break; + case USB_DEVICE_ID_GENIUS_MANTICORE: + /* + * The manticore keyboard needs to have all the interfaces + * opened at least once to be fully functional. + */ + if (hid_hw_open(hdev)) + hid_hw_close(hdev); + break; } return 0; @@ -439,6 +451,8 @@ static const struct hid_device_id kye_devices[] = { USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) }, + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, + USB_DEVICE_ID_GENIUS_MANTICORE) }, { } }; MODULE_DEVICE_TABLE(hid, kye_devices); diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index a2cedb8ae1c0..d83b1e8b505b 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -1335,6 +1335,12 @@ static const struct hid_device_id mt_devices[] = { { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_UNITEC, USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19) }, + + /* Wistron panels */ + { .driver_data = MT_CLS_NSMU, + MT_USB_DEVICE(USB_VENDOR_ID_WISTRON, + USB_DEVICE_ID_WISTRON_OPTICAL_TOUCH) }, + /* XAT */ { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_XAT, diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c index a184e1921c11..8fab82829f8b 100644 --- a/drivers/hid/hid-sensor-hub.c +++ b/drivers/hid/hid-sensor-hub.c @@ -112,13 +112,15 @@ static int sensor_hub_get_physical_device_count( static void sensor_hub_fill_attr_info( struct hid_sensor_hub_attribute_info *info, - s32 index, s32 report_id, s32 units, s32 unit_expo, s32 size) + s32 index, s32 report_id, struct hid_field *field) { info->index = index; info->report_id = report_id; - info->units = units; - info->unit_expo = unit_expo; - info->size = size/8; + info->units = field->unit; + info->unit_expo = field->unit_exponent; + info->size = (field->report_size * field->report_count)/8; + info->logical_minimum = field->logical_minimum; + info->logical_maximum = field->logical_maximum; } static struct hid_sensor_hub_callbacks *sensor_hub_get_callback( @@ -325,9 +327,7 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev, if (field->physical == usage_id && field->logical == attr_usage_id) { sensor_hub_fill_attr_info(info, i, report->id, - field->unit, field->unit_exponent, - field->report_size * - field->report_count); + field); ret = 0; } else { for (j = 0; j < field->maxusage; ++j) { @@ -336,11 +336,7 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev, field->usage[j].collection_index == collection_index) { sensor_hub_fill_attr_info(info, - i, report->id, - field->unit, - field->unit_exponent, - field->report_size * - field->report_count); + i, report->id, field); ret = 0; break; } @@ -573,6 +569,8 @@ static int sensor_hub_probe(struct hid_device *hdev, goto err_free_names; } sd->hid_sensor_hub_client_devs[ + sd->hid_sensor_client_cnt].id = PLATFORM_DEVID_AUTO; + sd->hid_sensor_hub_client_devs[ sd->hid_sensor_client_cnt].name = name; sd->hid_sensor_hub_client_devs[ sd->hid_sensor_client_cnt].platform_data = diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c index da551d113762..098af2f84b8c 100644 --- a/drivers/hid/hid-sony.c +++ b/drivers/hid/hid-sony.c @@ -225,6 +225,13 @@ static const unsigned int buzz_keymap[] = { struct sony_sc { unsigned long quirks; +#ifdef CONFIG_SONY_FF + struct work_struct rumble_worker; + struct hid_device *hdev; + __u8 left; + __u8 right; +#endif + void *extra; }; @@ -615,9 +622,9 @@ static void buzz_remove(struct hid_device *hdev) } #ifdef CONFIG_SONY_FF -static int sony_play_effect(struct input_dev *dev, void *data, - struct ff_effect *effect) +static void sony_rumble_worker(struct work_struct *work) { + struct sony_sc *sc = container_of(work, struct sony_sc, rumble_worker); unsigned char buf[] = { 0x01, 0x00, 0xff, 0x00, 0xff, 0x00, @@ -628,21 +635,28 @@ static int sony_play_effect(struct input_dev *dev, void *data, 0xff, 0x27, 0x10, 0x00, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00 }; - __u8 left; - __u8 right; + + buf[3] = sc->right; + buf[5] = sc->left; + + sc->hdev->hid_output_raw_report(sc->hdev, buf, sizeof(buf), + HID_OUTPUT_REPORT); +} + +static int sony_play_effect(struct input_dev *dev, void *data, + struct ff_effect *effect) +{ struct hid_device *hid = input_get_drvdata(dev); + struct sony_sc *sc = hid_get_drvdata(hid); if (effect->type != FF_RUMBLE) return 0; - left = effect->u.rumble.strong_magnitude / 256; - right = effect->u.rumble.weak_magnitude ? 1 : 0; - - buf[3] = right; - buf[5] = left; + sc->left = effect->u.rumble.strong_magnitude / 256; + sc->right = effect->u.rumble.weak_magnitude ? 1 : 0; - return hid->hid_output_raw_report(hid, buf, sizeof(buf), - HID_OUTPUT_REPORT); + schedule_work(&sc->rumble_worker); + return 0; } static int sony_init_ff(struct hid_device *hdev) @@ -650,16 +664,31 @@ static int sony_init_ff(struct hid_device *hdev) struct hid_input *hidinput = list_entry(hdev->inputs.next, struct hid_input, list); struct input_dev *input_dev = hidinput->input; + struct sony_sc *sc = hid_get_drvdata(hdev); + + sc->hdev = hdev; + INIT_WORK(&sc->rumble_worker, sony_rumble_worker); input_set_capability(input_dev, EV_FF, FF_RUMBLE); return input_ff_create_memless(input_dev, NULL, sony_play_effect); } +static void sony_destroy_ff(struct hid_device *hdev) +{ + struct sony_sc *sc = hid_get_drvdata(hdev); + + cancel_work_sync(&sc->rumble_worker); +} + #else static int sony_init_ff(struct hid_device *hdev) { return 0; } + +static void sony_destroy_ff(struct hid_device *hdev) +{ +} #endif static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) @@ -728,6 +757,8 @@ static void sony_remove(struct hid_device *hdev) if (sc->quirks & BUZZ_CONTROLLER) buzz_remove(hdev); + sony_destroy_ff(hdev); + hid_hw_stop(hdev); } diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c index 1446f526ee8b..abb20db2b443 100644 --- a/drivers/hid/hid-wiimote-core.c +++ b/drivers/hid/hid-wiimote-core.c @@ -834,8 +834,7 @@ static void wiimote_init_set_type(struct wiimote_data *wdata, goto done; } - if (vendor == USB_VENDOR_ID_NINTENDO || - vendor == USB_VENDOR_ID_NINTENDO2) { + if (vendor == USB_VENDOR_ID_NINTENDO) { if (product == USB_DEVICE_ID_NINTENDO_WIIMOTE) { devtype = WIIMOTE_DEV_GEN10; goto done; @@ -1856,8 +1855,6 @@ static void wiimote_hid_remove(struct hid_device *hdev) static const struct hid_device_id wiimote_hid_devices[] = { { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) }, - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO2, - USB_DEVICE_ID_NINTENDO_WIIMOTE) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) }, { } diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index ae48d18ee315..5f7e55f4b7f0 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c @@ -1008,7 +1008,7 @@ static int i2c_hid_probe(struct i2c_client *client, hid->hid_get_raw_report = i2c_hid_get_raw_report; hid->hid_output_raw_report = i2c_hid_output_raw_report; hid->dev.parent = &client->dev; - ACPI_HANDLE_SET(&hid->dev, ACPI_HANDLE(&client->dev)); + ACPI_COMPANION_SET(&hid->dev, ACPI_COMPANION(&client->dev)); hid->bus = BUS_I2C; hid->version = le16_to_cpu(ihid->hdesc.bcdVersion); hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID); diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c index 93b00d76374c..cedc6da93c19 100644 --- a/drivers/hid/uhid.c +++ b/drivers/hid/uhid.c @@ -287,7 +287,7 @@ static int uhid_event_from_user(const char __user *buffer, size_t len, */ struct uhid_create_req_compat *compat; - compat = kmalloc(sizeof(*compat), GFP_KERNEL); + compat = kzalloc(sizeof(*compat), GFP_KERNEL); if (!compat) return -ENOMEM; diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index b3ab9d43bb3e..52d548f1dc1d 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -656,6 +656,7 @@ config SENSORS_LM75 - Analog Devices ADT75 - Dallas Semiconductor DS75, DS1775 and DS7505 + - Global Mixed-mode Technology (GMT) G751 - Maxim MAX6625 and MAX6626 - Microchip MCP980x - National Semiconductor LM75, LM75A diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c index 8d40da314a8e..6a34f7f48eb9 100644 --- a/drivers/hwmon/acpi_power_meter.c +++ b/drivers/hwmon/acpi_power_meter.c @@ -602,9 +602,8 @@ static int read_domain_devices(struct acpi_power_meter_resource *resource) /* Create a symlink to domain objects */ resource->domain_devices[i] = NULL; - status = acpi_bus_get_device(element->reference.handle, - &resource->domain_devices[i]); - if (ACPI_FAILURE(status)) + if (acpi_bus_get_device(element->reference.handle, + &resource->domain_devices[i])) continue; obj = resource->domain_devices[i]; diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c index 1d7ff46812c3..dafc63c6932d 100644 --- a/drivers/hwmon/asus_atk0110.c +++ b/drivers/hwmon/asus_atk0110.c @@ -18,7 +18,6 @@ #include <linux/err.h> #include <acpi/acpi.h> -#include <acpi/acpixf.h> #include <acpi/acpi_drivers.h> #include <acpi/acpi_bus.h> diff --git a/drivers/hwmon/hih6130.c b/drivers/hwmon/hih6130.c index 2dc37c7c6947..7d68a08baaa8 100644 --- a/drivers/hwmon/hih6130.c +++ b/drivers/hwmon/hih6130.c @@ -43,6 +43,7 @@ * @last_update: time of last update (jiffies) * @temperature: cached temperature measurement value * @humidity: cached humidity measurement value + * @write_length: length for I2C measurement request */ struct hih6130 { struct device *hwmon_dev; @@ -51,6 +52,7 @@ struct hih6130 { unsigned long last_update; int temperature; int humidity; + size_t write_length; }; /** @@ -121,8 +123,15 @@ static int hih6130_update_measurements(struct i2c_client *client) */ if (time_after(jiffies, hih6130->last_update + HZ) || !hih6130->valid) { - /* write to slave address, no data, to request a measurement */ - ret = i2c_master_send(client, tmp, 0); + /* + * Write to slave address to request a measurement. + * According with the datasheet it should be with no data, but + * for systems with I2C bus drivers that do not allow zero + * length packets we write one dummy byte to allow sensor + * measurements on them. + */ + tmp[0] = 0; + ret = i2c_master_send(client, tmp, hih6130->write_length); if (ret < 0) goto out; @@ -252,6 +261,9 @@ static int hih6130_probe(struct i2c_client *client, goto fail_remove_sysfs; } + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_QUICK)) + hih6130->write_length = 1; + return 0; fail_remove_sysfs: diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c index c03b490bba81..7e3ef134f1d2 100644 --- a/drivers/hwmon/lm75.c +++ b/drivers/hwmon/lm75.c @@ -39,6 +39,7 @@ enum lm75_type { /* keep sorted in alphabetical order */ ds1775, ds75, ds7505, + g751, lm75, lm75a, max6625, @@ -208,6 +209,7 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id) data->resolution = 12; data->sample_time = HZ / 4; break; + case g751: case lm75: case lm75a: data->resolution = 9; @@ -296,6 +298,7 @@ static const struct i2c_device_id lm75_ids[] = { { "ds1775", ds1775, }, { "ds75", ds75, }, { "ds7505", ds7505, }, + { "g751", g751, }, { "lm75", lm75, }, { "lm75a", lm75a, }, { "max6625", max6625, }, diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c index 6cf6bff79003..a2f3b4a365e4 100644 --- a/drivers/hwmon/lm78.c +++ b/drivers/hwmon/lm78.c @@ -94,6 +94,8 @@ static inline u8 FAN_TO_REG(long rpm, int div) { if (rpm <= 0) return 255; + if (rpm > 1350000) + return 1; return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254); } diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c index 4c4c1421bf28..8b8f3aa49726 100644 --- a/drivers/hwmon/lm90.c +++ b/drivers/hwmon/lm90.c @@ -1610,12 +1610,14 @@ static int lm90_probe(struct i2c_client *client, "lm90", client); if (err < 0) { dev_err(dev, "cannot request IRQ %d\n", client->irq); - goto exit_remove_files; + goto exit_unregister; } } return 0; +exit_unregister: + hwmon_device_unregister(data->hwmon_dev); exit_remove_files: lm90_remove_files(client, data); exit_restore: diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index d17325db0ea3..cf811c1a1475 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -274,6 +274,8 @@ static const u16 NCT6775_FAN_PULSE_SHIFT[] = { 0, 0, 0, 0, 0, 0 }; static const u16 NCT6775_REG_TEMP[] = { 0x27, 0x150, 0x250, 0x62b, 0x62c, 0x62d }; +static const u16 NCT6775_REG_TEMP_MON[] = { 0x73, 0x75, 0x77 }; + static const u16 NCT6775_REG_TEMP_CONFIG[ARRAY_SIZE(NCT6775_REG_TEMP)] = { 0, 0x152, 0x252, 0x628, 0x629, 0x62A }; static const u16 NCT6775_REG_TEMP_HYST[ARRAY_SIZE(NCT6775_REG_TEMP)] = { @@ -454,6 +456,7 @@ static const u16 NCT6779_REG_CRITICAL_PWM[] = { 0x137, 0x237, 0x337, 0x837, 0x937, 0xa37 }; static const u16 NCT6779_REG_TEMP[] = { 0x27, 0x150 }; +static const u16 NCT6779_REG_TEMP_MON[] = { 0x73, 0x75, 0x77, 0x79, 0x7b }; static const u16 NCT6779_REG_TEMP_CONFIG[ARRAY_SIZE(NCT6779_REG_TEMP)] = { 0x18, 0x152 }; static const u16 NCT6779_REG_TEMP_HYST[ARRAY_SIZE(NCT6779_REG_TEMP)] = { @@ -507,6 +510,13 @@ static const u16 NCT6779_REG_TEMP_CRIT[ARRAY_SIZE(nct6779_temp_label) - 1] #define NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE 0x28 +static const u16 NCT6791_REG_WEIGHT_TEMP_SEL[6] = { 0, 0x239 }; +static const u16 NCT6791_REG_WEIGHT_TEMP_STEP[6] = { 0, 0x23a }; +static const u16 NCT6791_REG_WEIGHT_TEMP_STEP_TOL[6] = { 0, 0x23b }; +static const u16 NCT6791_REG_WEIGHT_DUTY_STEP[6] = { 0, 0x23c }; +static const u16 NCT6791_REG_WEIGHT_TEMP_BASE[6] = { 0, 0x23d }; +static const u16 NCT6791_REG_WEIGHT_DUTY_BASE[6] = { 0, 0x23e }; + static const u16 NCT6791_REG_ALARM[NUM_REG_ALARM] = { 0x459, 0x45A, 0x45B, 0x568, 0x45D }; @@ -534,6 +544,7 @@ static const u16 NCT6106_REG_IN[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x07, 0x08, 0x09 }; static const u16 NCT6106_REG_TEMP[] = { 0x10, 0x11, 0x12, 0x13, 0x14, 0x15 }; +static const u16 NCT6106_REG_TEMP_MON[] = { 0x18, 0x19, 0x1a }; static const u16 NCT6106_REG_TEMP_HYST[] = { 0xc3, 0xc7, 0xcb, 0xcf, 0xd3, 0xd7 }; static const u16 NCT6106_REG_TEMP_OVER[] = { @@ -1307,6 +1318,9 @@ static void nct6775_update_pwm(struct device *dev) if (reg & 0x80) data->pwm[2][i] = 0; + if (!data->REG_WEIGHT_TEMP_SEL[i]) + continue; + reg = nct6775_read_value(data, data->REG_WEIGHT_TEMP_SEL[i]); data->pwm_weight_temp_sel[i] = reg & 0x1f; /* If weight is disabled, report weight source as 0 */ @@ -2852,6 +2866,9 @@ static umode_t nct6775_pwm_is_visible(struct kobject *kobj, if (!(data->has_pwm & (1 << pwm))) return 0; + if ((nr >= 14 && nr <= 18) || nr == 21) /* weight */ + if (!data->REG_WEIGHT_TEMP_SEL[pwm]) + return 0; if (nr == 19 && data->REG_PWM[3] == NULL) /* pwm_max */ return 0; if (nr == 20 && data->REG_PWM[4] == NULL) /* pwm_step */ @@ -2945,11 +2962,11 @@ static struct sensor_device_template *nct6775_attributes_pwm_template[] = { &sensor_dev_template_pwm_step_down_time, &sensor_dev_template_pwm_start, &sensor_dev_template_pwm_floor, - &sensor_dev_template_pwm_weight_temp_sel, + &sensor_dev_template_pwm_weight_temp_sel, /* 14 */ &sensor_dev_template_pwm_weight_temp_step, &sensor_dev_template_pwm_weight_temp_step_tol, &sensor_dev_template_pwm_weight_temp_step_base, - &sensor_dev_template_pwm_weight_duty_step, + &sensor_dev_template_pwm_weight_duty_step, /* 18 */ &sensor_dev_template_pwm_max, /* 19 */ &sensor_dev_template_pwm_step, /* 20 */ &sensor_dev_template_pwm_weight_duty_base, /* 21 */ @@ -3253,9 +3270,9 @@ static int nct6775_probe(struct platform_device *pdev) int i, s, err = 0; int src, mask, available; const u16 *reg_temp, *reg_temp_over, *reg_temp_hyst, *reg_temp_config; - const u16 *reg_temp_alternate, *reg_temp_crit; + const u16 *reg_temp_mon, *reg_temp_alternate, *reg_temp_crit; const u16 *reg_temp_crit_l = NULL, *reg_temp_crit_h = NULL; - int num_reg_temp; + int num_reg_temp, num_reg_temp_mon; u8 cr2a; struct attribute_group *group; struct device *hwmon_dev; @@ -3338,7 +3355,9 @@ static int nct6775_probe(struct platform_device *pdev) data->BEEP_BITS = NCT6106_BEEP_BITS; reg_temp = NCT6106_REG_TEMP; + reg_temp_mon = NCT6106_REG_TEMP_MON; num_reg_temp = ARRAY_SIZE(NCT6106_REG_TEMP); + num_reg_temp_mon = ARRAY_SIZE(NCT6106_REG_TEMP_MON); reg_temp_over = NCT6106_REG_TEMP_OVER; reg_temp_hyst = NCT6106_REG_TEMP_HYST; reg_temp_config = NCT6106_REG_TEMP_CONFIG; @@ -3410,7 +3429,9 @@ static int nct6775_probe(struct platform_device *pdev) data->REG_BEEP = NCT6775_REG_BEEP; reg_temp = NCT6775_REG_TEMP; + reg_temp_mon = NCT6775_REG_TEMP_MON; num_reg_temp = ARRAY_SIZE(NCT6775_REG_TEMP); + num_reg_temp_mon = ARRAY_SIZE(NCT6775_REG_TEMP_MON); reg_temp_over = NCT6775_REG_TEMP_OVER; reg_temp_hyst = NCT6775_REG_TEMP_HYST; reg_temp_config = NCT6775_REG_TEMP_CONFIG; @@ -3480,7 +3501,9 @@ static int nct6775_probe(struct platform_device *pdev) data->REG_BEEP = NCT6776_REG_BEEP; reg_temp = NCT6775_REG_TEMP; + reg_temp_mon = NCT6775_REG_TEMP_MON; num_reg_temp = ARRAY_SIZE(NCT6775_REG_TEMP); + num_reg_temp_mon = ARRAY_SIZE(NCT6775_REG_TEMP_MON); reg_temp_over = NCT6775_REG_TEMP_OVER; reg_temp_hyst = NCT6775_REG_TEMP_HYST; reg_temp_config = NCT6776_REG_TEMP_CONFIG; @@ -3554,7 +3577,9 @@ static int nct6775_probe(struct platform_device *pdev) data->REG_BEEP = NCT6776_REG_BEEP; reg_temp = NCT6779_REG_TEMP; + reg_temp_mon = NCT6779_REG_TEMP_MON; num_reg_temp = ARRAY_SIZE(NCT6779_REG_TEMP); + num_reg_temp_mon = ARRAY_SIZE(NCT6779_REG_TEMP_MON); reg_temp_over = NCT6779_REG_TEMP_OVER; reg_temp_hyst = NCT6779_REG_TEMP_HYST; reg_temp_config = NCT6779_REG_TEMP_CONFIG; @@ -3603,8 +3628,8 @@ static int nct6775_probe(struct platform_device *pdev) data->REG_PWM[0] = NCT6775_REG_PWM; data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT; data->REG_PWM[2] = NCT6775_REG_FAN_STOP_OUTPUT; - data->REG_PWM[5] = NCT6775_REG_WEIGHT_DUTY_STEP; - data->REG_PWM[6] = NCT6776_REG_WEIGHT_DUTY_BASE; + data->REG_PWM[5] = NCT6791_REG_WEIGHT_DUTY_STEP; + data->REG_PWM[6] = NCT6791_REG_WEIGHT_DUTY_BASE; data->REG_PWM_READ = NCT6775_REG_PWM_READ; data->REG_PWM_MODE = NCT6776_REG_PWM_MODE; data->PWM_MODE_MASK = NCT6776_PWM_MODE_MASK; @@ -3620,15 +3645,17 @@ static int nct6775_probe(struct platform_device *pdev) data->REG_TEMP_OFFSET = NCT6779_REG_TEMP_OFFSET; data->REG_TEMP_SOURCE = NCT6775_REG_TEMP_SOURCE; data->REG_TEMP_SEL = NCT6775_REG_TEMP_SEL; - data->REG_WEIGHT_TEMP_SEL = NCT6775_REG_WEIGHT_TEMP_SEL; - data->REG_WEIGHT_TEMP[0] = NCT6775_REG_WEIGHT_TEMP_STEP; - data->REG_WEIGHT_TEMP[1] = NCT6775_REG_WEIGHT_TEMP_STEP_TOL; - data->REG_WEIGHT_TEMP[2] = NCT6775_REG_WEIGHT_TEMP_BASE; + data->REG_WEIGHT_TEMP_SEL = NCT6791_REG_WEIGHT_TEMP_SEL; + data->REG_WEIGHT_TEMP[0] = NCT6791_REG_WEIGHT_TEMP_STEP; + data->REG_WEIGHT_TEMP[1] = NCT6791_REG_WEIGHT_TEMP_STEP_TOL; + data->REG_WEIGHT_TEMP[2] = NCT6791_REG_WEIGHT_TEMP_BASE; data->REG_ALARM = NCT6791_REG_ALARM; data->REG_BEEP = NCT6776_REG_BEEP; reg_temp = NCT6779_REG_TEMP; + reg_temp_mon = NCT6779_REG_TEMP_MON; num_reg_temp = ARRAY_SIZE(NCT6779_REG_TEMP); + num_reg_temp_mon = ARRAY_SIZE(NCT6779_REG_TEMP_MON); reg_temp_over = NCT6779_REG_TEMP_OVER; reg_temp_hyst = NCT6779_REG_TEMP_HYST; reg_temp_config = NCT6779_REG_TEMP_CONFIG; @@ -3729,6 +3756,50 @@ static int nct6775_probe(struct platform_device *pdev) s++; } + /* + * Repeat with temperatures used for fan control. + * This set of registers does not support limits. + */ + for (i = 0; i < num_reg_temp_mon; i++) { + if (reg_temp_mon[i] == 0) + continue; + + src = nct6775_read_value(data, data->REG_TEMP_SEL[i]) & 0x1f; + if (!src || (mask & (1 << src))) + continue; + + if (src >= data->temp_label_num || + !strlen(data->temp_label[src])) { + dev_info(dev, + "Invalid temperature source %d at index %d, source register 0x%x, temp register 0x%x\n", + src, i, data->REG_TEMP_SEL[i], + reg_temp_mon[i]); + continue; + } + + mask |= 1 << src; + + /* Use fixed index for SYSTIN(1), CPUTIN(2), AUXTIN(3) */ + if (src <= data->temp_fixed_num) { + if (data->have_temp & (1 << (src - 1))) + continue; + data->have_temp |= 1 << (src - 1); + data->have_temp_fixed |= 1 << (src - 1); + data->reg_temp[0][src - 1] = reg_temp_mon[i]; + data->temp_src[src - 1] = src; + continue; + } + + if (s >= NUM_TEMP) + continue; + + /* Use dynamic index for other sources */ + data->have_temp |= 1 << s; + data->reg_temp[0][s] = reg_temp_mon[i]; + data->temp_src[s] = src; + s++; + } + #ifdef USE_ALTERNATE /* * Go through the list of alternate temp registers and enable diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c index 1404e6319deb..72a889702f0d 100644 --- a/drivers/hwmon/sis5595.c +++ b/drivers/hwmon/sis5595.c @@ -141,6 +141,8 @@ static inline u8 FAN_TO_REG(long rpm, int div) { if (rpm <= 0) return 255; + if (rpm > 1350000) + return 1; return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254); } diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c index 0e7017841f7d..aee14e2192f8 100644 --- a/drivers/hwmon/vt8231.c +++ b/drivers/hwmon/vt8231.c @@ -145,7 +145,7 @@ static const u8 regtempmin[] = { 0x3a, 0x3e, 0x2c, 0x2e, 0x30, 0x32 }; */ static inline u8 FAN_TO_REG(long rpm, int div) { - if (rpm == 0) + if (rpm <= 0 || rpm > 1310720) return 0; return clamp_val(1310720 / (rpm * div), 1, 255); } diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c index edb06cda5a68..6ed76ceb9270 100644 --- a/drivers/hwmon/w83l786ng.c +++ b/drivers/hwmon/w83l786ng.c @@ -481,9 +481,11 @@ store_pwm(struct device *dev, struct device_attribute *attr, if (err) return err; val = clamp_val(val, 0, 255); + val = DIV_ROUND_CLOSEST(val, 0x11); mutex_lock(&data->update_lock); - data->pwm[nr] = val; + data->pwm[nr] = val * 0x11; + val |= w83l786ng_read_value(client, W83L786NG_REG_PWM[nr]) & 0xf0; w83l786ng_write_value(client, W83L786NG_REG_PWM[nr], val); mutex_unlock(&data->update_lock); return count; @@ -510,7 +512,7 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr, mutex_lock(&data->update_lock); reg = w83l786ng_read_value(client, W83L786NG_REG_FAN_CFG); data->pwm_enable[nr] = val; - reg &= ~(0x02 << W83L786NG_PWM_ENABLE_SHIFT[nr]); + reg &= ~(0x03 << W83L786NG_PWM_ENABLE_SHIFT[nr]); reg |= (val - 1) << W83L786NG_PWM_ENABLE_SHIFT[nr]; w83l786ng_write_value(client, W83L786NG_REG_FAN_CFG, reg); mutex_unlock(&data->update_lock); @@ -776,9 +778,10 @@ static struct w83l786ng_data *w83l786ng_update_device(struct device *dev) ((pwmcfg >> W83L786NG_PWM_MODE_SHIFT[i]) & 1) ? 0 : 1; data->pwm_enable[i] = - ((pwmcfg >> W83L786NG_PWM_ENABLE_SHIFT[i]) & 2) + 1; - data->pwm[i] = w83l786ng_read_value(client, - W83L786NG_REG_PWM[i]); + ((pwmcfg >> W83L786NG_PWM_ENABLE_SHIFT[i]) & 3) + 1; + data->pwm[i] = + (w83l786ng_read_value(client, W83L786NG_REG_PWM[i]) + & 0x0f) * 0x11; } diff --git a/drivers/i2c/busses/i2c-bcm-kona.c b/drivers/i2c/busses/i2c-bcm-kona.c index 036cf03aeb61..18a74a6751a9 100644 --- a/drivers/i2c/busses/i2c-bcm-kona.c +++ b/drivers/i2c/busses/i2c-bcm-kona.c @@ -20,7 +20,6 @@ #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/io.h> -#include <linux/clk.h> #include <linux/slab.h> /* Hardware register offsets and field defintions */ @@ -891,7 +890,7 @@ static const struct of_device_id bcm_kona_i2c_of_match[] = { {.compatible = "brcm,kona-i2c",}, {}, }; -MODULE_DEVICE_TABLE(of, kona_i2c_of_match); +MODULE_DEVICE_TABLE(of, bcm_kona_i2c_of_match); static struct platform_driver bcm_kona_i2c_driver = { .driver = { diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c index d7e8600f31fb..77df97b932af 100644 --- a/drivers/i2c/busses/i2c-bcm2835.c +++ b/drivers/i2c/busses/i2c-bcm2835.c @@ -299,6 +299,7 @@ static int bcm2835_i2c_probe(struct platform_device *pdev) strlcpy(adap->name, "bcm2835 I2C adapter", sizeof(adap->name)); adap->algo = &bcm2835_i2c_algo; adap->dev.parent = &pdev->dev; + adap->dev.of_node = pdev->dev.of_node; bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, 0); diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c index ff05d9fef4a8..af0b5830303d 100644 --- a/drivers/i2c/busses/i2c-davinci.c +++ b/drivers/i2c/busses/i2c-davinci.c @@ -125,12 +125,12 @@ static struct davinci_i2c_platform_data davinci_i2c_platform_data_default = { static inline void davinci_i2c_write_reg(struct davinci_i2c_dev *i2c_dev, int reg, u16 val) { - __raw_writew(val, i2c_dev->base + reg); + writew_relaxed(val, i2c_dev->base + reg); } static inline u16 davinci_i2c_read_reg(struct davinci_i2c_dev *i2c_dev, int reg) { - return __raw_readw(i2c_dev->base + reg); + return readw_relaxed(i2c_dev->base + reg); } /* Generate a pulse on the i2c clock pin. */ diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c index dae3ddfe7619..721f7ebf9a3b 100644 --- a/drivers/i2c/busses/i2c-diolan-u2c.c +++ b/drivers/i2c/busses/i2c-diolan-u2c.c @@ -25,8 +25,6 @@ #define USB_VENDOR_ID_DIOLAN 0x0abf #define USB_DEVICE_ID_DIOLAN_U2C 0x3370 -#define DIOLAN_OUT_EP 0x02 -#define DIOLAN_IN_EP 0x84 /* commands via USB, must match command ids in the firmware */ #define CMD_I2C_READ 0x01 @@ -84,6 +82,7 @@ struct i2c_diolan_u2c { u8 obuffer[DIOLAN_OUTBUF_LEN]; /* output buffer */ u8 ibuffer[DIOLAN_INBUF_LEN]; /* input buffer */ + int ep_in, ep_out; /* Endpoints */ struct usb_device *usb_dev; /* the usb device for this device */ struct usb_interface *interface;/* the interface for this device */ struct i2c_adapter adapter; /* i2c related things */ @@ -109,7 +108,7 @@ static int diolan_usb_transfer(struct i2c_diolan_u2c *dev) return -EINVAL; ret = usb_bulk_msg(dev->usb_dev, - usb_sndbulkpipe(dev->usb_dev, DIOLAN_OUT_EP), + usb_sndbulkpipe(dev->usb_dev, dev->ep_out), dev->obuffer, dev->olen, &actual, DIOLAN_USB_TIMEOUT); if (!ret) { @@ -118,7 +117,7 @@ static int diolan_usb_transfer(struct i2c_diolan_u2c *dev) tmpret = usb_bulk_msg(dev->usb_dev, usb_rcvbulkpipe(dev->usb_dev, - DIOLAN_IN_EP), + dev->ep_in), dev->ibuffer, sizeof(dev->ibuffer), &actual, DIOLAN_USB_TIMEOUT); @@ -210,7 +209,7 @@ static void diolan_flush_input(struct i2c_diolan_u2c *dev) int ret; ret = usb_bulk_msg(dev->usb_dev, - usb_rcvbulkpipe(dev->usb_dev, DIOLAN_IN_EP), + usb_rcvbulkpipe(dev->usb_dev, dev->ep_in), dev->ibuffer, sizeof(dev->ibuffer), &actual, DIOLAN_USB_TIMEOUT); if (ret < 0 || actual == 0) @@ -445,9 +444,14 @@ static void diolan_u2c_free(struct i2c_diolan_u2c *dev) static int diolan_u2c_probe(struct usb_interface *interface, const struct usb_device_id *id) { + struct usb_host_interface *hostif = interface->cur_altsetting; struct i2c_diolan_u2c *dev; int ret; + if (hostif->desc.bInterfaceNumber != 0 + || hostif->desc.bNumEndpoints < 2) + return -ENODEV; + /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (dev == NULL) { @@ -455,6 +459,8 @@ static int diolan_u2c_probe(struct usb_interface *interface, ret = -ENOMEM; goto error; } + dev->ep_out = hostif->endpoint[0].desc.bEndpointAddress; + dev->ep_in = hostif->endpoint[1].desc.bEndpointAddress; dev->usb_dev = usb_get_dev(interface_to_usbdev(interface)); dev->interface = interface; diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 1d7efa3169cd..d0cfbb4cb964 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -312,7 +312,9 @@ static int i2c_imx_start(struct imx_i2c_struct *i2c_imx) dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); - clk_prepare_enable(i2c_imx->clk); + result = clk_prepare_enable(i2c_imx->clk); + if (result) + return result; imx_i2c_write_reg(i2c_imx->ifdr, i2c_imx, IMX_I2C_IFDR); /* Enable I2C controller */ imx_i2c_write_reg(i2c_imx->hwdata->i2sr_clr_opcode, i2c_imx, IMX_I2C_I2SR); diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index a6a891d7970d..90dcc2eaac5f 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -266,13 +266,13 @@ static const u8 reg_map_ip_v2[] = { static inline void omap_i2c_write_reg(struct omap_i2c_dev *i2c_dev, int reg, u16 val) { - __raw_writew(val, i2c_dev->base + + writew_relaxed(val, i2c_dev->base + (i2c_dev->regs[reg] << i2c_dev->reg_shift)); } static inline u16 omap_i2c_read_reg(struct omap_i2c_dev *i2c_dev, int reg) { - return __raw_readw(i2c_dev->base + + return readw_relaxed(i2c_dev->base + (i2c_dev->regs[reg] << i2c_dev->reg_shift)); } @@ -1037,6 +1037,20 @@ static const struct i2c_algorithm omap_i2c_algo = { }; #ifdef CONFIG_OF +static struct omap_i2c_bus_platform_data omap2420_pdata = { + .rev = OMAP_I2C_IP_VERSION_1, + .flags = OMAP_I2C_FLAG_NO_FIFO | + OMAP_I2C_FLAG_SIMPLE_CLOCK | + OMAP_I2C_FLAG_16BIT_DATA_REG | + OMAP_I2C_FLAG_BUS_SHIFT_2, +}; + +static struct omap_i2c_bus_platform_data omap2430_pdata = { + .rev = OMAP_I2C_IP_VERSION_1, + .flags = OMAP_I2C_FLAG_BUS_SHIFT_2 | + OMAP_I2C_FLAG_FORCE_19200_INT_CLK, +}; + static struct omap_i2c_bus_platform_data omap3_pdata = { .rev = OMAP_I2C_IP_VERSION_1, .flags = OMAP_I2C_FLAG_BUS_SHIFT_2, @@ -1055,6 +1069,14 @@ static const struct of_device_id omap_i2c_of_match[] = { .compatible = "ti,omap3-i2c", .data = &omap3_pdata, }, + { + .compatible = "ti,omap2430-i2c", + .data = &omap2430_pdata, + }, + { + .compatible = "ti,omap2420-i2c", + .data = &omap2420_pdata, + }, { }, }; MODULE_DEVICE_TABLE(of, omap_i2c_of_match); @@ -1140,9 +1162,9 @@ omap_i2c_probe(struct platform_device *pdev) * Read the Rev hi bit-[15:14] ie scheme this is 1 indicates ver2. * On omap1/3/2 Offset 4 is IE Reg the bit [15:14] is 0 at reset. * Also since the omap_i2c_read_reg uses reg_map_ip_* a - * raw_readw is done. + * readw_relaxed is done. */ - rev = __raw_readw(dev->base + 0x04); + rev = readw_relaxed(dev->base + 0x04); dev->scheme = OMAP_I2C_SCHEME(rev); switch (dev->scheme) { diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 5923cfa390c8..d74c0b34248e 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -615,6 +615,22 @@ void i2c_unlock_adapter(struct i2c_adapter *adapter) } EXPORT_SYMBOL_GPL(i2c_unlock_adapter); +static void i2c_dev_set_name(struct i2c_adapter *adap, + struct i2c_client *client) +{ + struct acpi_device *adev = ACPI_COMPANION(&client->dev); + + if (adev) { + dev_set_name(&client->dev, "i2c-%s", acpi_dev_name(adev)); + return; + } + + /* For 10-bit clients, add an arbitrary offset to avoid collisions */ + dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap), + client->addr | ((client->flags & I2C_CLIENT_TEN) + ? 0xa000 : 0)); +} + /** * i2c_new_device - instantiate an i2c device * @adap: the adapter managing the device @@ -671,12 +687,9 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info) client->dev.bus = &i2c_bus_type; client->dev.type = &i2c_client_type; client->dev.of_node = info->of_node; - ACPI_HANDLE_SET(&client->dev, info->acpi_node.handle); + ACPI_COMPANION_SET(&client->dev, info->acpi_node.companion); - /* For 10-bit clients, add an arbitrary offset to avoid collisions */ - dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap), - client->addr | ((client->flags & I2C_CLIENT_TEN) - ? 0xa000 : 0)); + i2c_dev_set_name(adap, client); status = device_register(&client->dev); if (status) goto out_err; @@ -1100,7 +1113,7 @@ static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level, return AE_OK; memset(&info, 0, sizeof(info)); - info.acpi_node.handle = handle; + info.acpi_node.companion = adev; info.irq = -1; INIT_LIST_HEAD(&resource_list); diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c index 797e3117bef7..2d0847b6be62 100644 --- a/drivers/i2c/i2c-mux.c +++ b/drivers/i2c/i2c-mux.c @@ -139,6 +139,8 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent, priv->adap.algo = &priv->algo; priv->adap.algo_data = priv; priv->adap.dev.parent = &parent->dev; + priv->adap.retries = parent->retries; + priv->adap.timeout = parent->timeout; /* Sanity check on class */ if (i2c_mux_parent_classes(parent) & class) diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c index 140c8ef50529..d9e1f7ccfe6f 100644 --- a/drivers/ide/ide-acpi.c +++ b/drivers/ide/ide-acpi.c @@ -7,6 +7,7 @@ * Copyright (C) 2006 Hannes Reinecke */ +#include <linux/acpi.h> #include <linux/ata.h> #include <linux/delay.h> #include <linux/device.h> @@ -19,8 +20,6 @@ #include <linux/dmi.h> #include <linux/module.h> -#include <acpi/acpi_bus.h> - #define REGS_PER_GTF 7 struct GTM_buffer { @@ -128,7 +127,7 @@ static int ide_get_dev_handle(struct device *dev, acpi_handle *handle, DEBPRINT("ENTER: pci %02x:%02x.%01x\n", bus, devnum, func); - dev_handle = DEVICE_ACPI_HANDLE(dev); + dev_handle = ACPI_HANDLE(dev); if (!dev_handle) { DEBPRINT("no acpi handle for device\n"); goto err; diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 3226ce98fb18..92d1206482a6 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -1,7 +1,7 @@ /* * intel_idle.c - native hardware idle loop for modern Intel processors * - * Copyright (c) 2010, Intel Corporation. + * Copyright (c) 2013, Intel Corporation. * Len Brown <len.brown@intel.com> * * This program is free software; you can redistribute it and/or modify it @@ -329,6 +329,22 @@ static struct cpuidle_state atom_cstates[] __initdata = { { .enter = NULL } }; +static struct cpuidle_state avn_cstates[] __initdata = { + { + .name = "C1-AVN", + .desc = "MWAIT 0x00", + .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID, + .exit_latency = 2, + .target_residency = 2, + .enter = &intel_idle }, + { + .name = "C6-AVN", + .desc = "MWAIT 0x51", + .flags = MWAIT2flg(0x51) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 15, + .target_residency = 45, + .enter = &intel_idle }, +}; /** * intel_idle @@ -462,6 +478,11 @@ static const struct idle_cpu idle_cpu_hsw = { .disable_promotion_to_c1e = true, }; +static const struct idle_cpu idle_cpu_avn = { + .state_table = avn_cstates, + .disable_promotion_to_c1e = true, +}; + #define ICPU(model, cpu) \ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu } @@ -483,6 +504,7 @@ static const struct x86_cpu_id intel_idle_ids[] = { ICPU(0x3f, idle_cpu_hsw), ICPU(0x45, idle_cpu_hsw), ICPU(0x46, idle_cpu_hsw), + ICPU(0x4D, idle_cpu_avn), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids); diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c index dcda17395c4e..1cae4e920c9b 100644 --- a/drivers/iio/accel/hid-sensor-accel-3d.c +++ b/drivers/iio/accel/hid-sensor-accel-3d.c @@ -350,7 +350,7 @@ static int hid_accel_3d_probe(struct platform_device *pdev) error_iio_unreg: iio_device_unregister(indio_dev); error_remove_trigger: - hid_sensor_remove_trigger(indio_dev); + hid_sensor_remove_trigger(&accel_state->common_attributes); error_unreg_buffer_funcs: iio_triggered_buffer_cleanup(indio_dev); error_free_dev_mem: @@ -363,10 +363,11 @@ static int hid_accel_3d_remove(struct platform_device *pdev) { struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data; struct iio_dev *indio_dev = platform_get_drvdata(pdev); + struct accel_3d_state *accel_state = iio_priv(indio_dev); sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_ACCEL_3D); iio_device_unregister(indio_dev); - hid_sensor_remove_trigger(indio_dev); + hid_sensor_remove_trigger(&accel_state->common_attributes); iio_triggered_buffer_cleanup(indio_dev); kfree(indio_dev->channels); diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c index d72118d1189c..98ba761cbb9c 100644 --- a/drivers/iio/accel/kxsd9.c +++ b/drivers/iio/accel/kxsd9.c @@ -112,9 +112,10 @@ static int kxsd9_read(struct iio_dev *indio_dev, u8 address) mutex_lock(&st->buf_lock); st->tx[0] = KXSD9_READ(address); ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers)); - if (ret) - return ret; - return (((u16)(st->rx[0])) << 8) | (st->rx[1] & 0xF0); + if (!ret) + ret = (((u16)(st->rx[0])) << 8) | (st->rx[1] & 0xF0); + mutex_unlock(&st->buf_lock); + return ret; } static IIO_CONST_ATTR(accel_scale_available, diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c index 17df74908db1..5b1aa027c034 100644 --- a/drivers/iio/adc/at91_adc.c +++ b/drivers/iio/adc/at91_adc.c @@ -1047,6 +1047,7 @@ static int at91_adc_probe(struct platform_device *pdev) } else { if (!st->caps->has_tsmr) { dev_err(&pdev->dev, "We don't support non-TSMR adc\n"); + ret = -ENODEV; goto error_disable_adc_clk; } diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c index 12948325431c..c8c1baaec6c1 100644 --- a/drivers/iio/adc/mcp3422.c +++ b/drivers/iio/adc/mcp3422.c @@ -88,10 +88,10 @@ static const int mcp3422_sample_rates[4] = { /* sample rates to sign extension table */ static const int mcp3422_sign_extend[4] = { - [MCP3422_SRATE_240] = 12, - [MCP3422_SRATE_60] = 14, - [MCP3422_SRATE_15] = 16, - [MCP3422_SRATE_3] = 18 }; + [MCP3422_SRATE_240] = 11, + [MCP3422_SRATE_60] = 13, + [MCP3422_SRATE_15] = 15, + [MCP3422_SRATE_3] = 17 }; /* Client data (each client gets its own) */ struct mcp3422 { diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c index 728411ec7642..d4d748214e4b 100644 --- a/drivers/iio/adc/ti_am335x_adc.c +++ b/drivers/iio/adc/ti_am335x_adc.c @@ -229,12 +229,15 @@ static int tiadc_iio_buffered_hardware_setup(struct iio_dev *indio_dev, unsigned long flags, const struct iio_buffer_setup_ops *setup_ops) { + struct iio_buffer *buffer; int ret; - indio_dev->buffer = iio_kfifo_allocate(indio_dev); - if (!indio_dev->buffer) + buffer = iio_kfifo_allocate(indio_dev); + if (!buffer) return -ENOMEM; + iio_device_attach_buffer(indio_dev, buffer); + ret = request_threaded_irq(irq, pollfunc_th, pollfunc_bh, flags, indio_dev->name, indio_dev); if (ret) diff --git a/drivers/iio/common/hid-sensors/Kconfig b/drivers/iio/common/hid-sensors/Kconfig index 1178121b55b0..39188b72cd3b 100644 --- a/drivers/iio/common/hid-sensors/Kconfig +++ b/drivers/iio/common/hid-sensors/Kconfig @@ -25,13 +25,4 @@ config HID_SENSOR_IIO_TRIGGER If this driver is compiled as a module, it will be named hid-sensor-trigger. -config HID_SENSOR_ENUM_BASE_QUIRKS - bool "ENUM base quirks for HID Sensor IIO drivers" - depends on HID_SENSOR_IIO_COMMON - help - Say yes here to build support for sensor hub FW using - enumeration, which is using 1 as base instead of 0. - Since logical minimum is still set 0 instead of 1, - there is no easy way to differentiate. - endmenu diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c index b6e77e0fc420..7dcf83998e6f 100644 --- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c +++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c @@ -33,33 +33,42 @@ static int hid_sensor_data_rdy_trigger_set_state(struct iio_trigger *trig, { struct hid_sensor_common *st = iio_trigger_get_drvdata(trig); int state_val; + int report_val; if (state) { if (sensor_hub_device_open(st->hsdev)) return -EIO; - } else + state_val = + HID_USAGE_SENSOR_PROP_POWER_STATE_D0_FULL_POWER_ENUM; + report_val = + HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM; + + } else { sensor_hub_device_close(st->hsdev); + state_val = + HID_USAGE_SENSOR_PROP_POWER_STATE_D4_POWER_OFF_ENUM; + report_val = + HID_USAGE_SENSOR_PROP_REPORTING_STATE_NO_EVENTS_ENUM; + } - state_val = state ? 1 : 0; - if (IS_ENABLED(CONFIG_HID_SENSOR_ENUM_BASE_QUIRKS)) - ++state_val; st->data_ready = state; + state_val += st->power_state.logical_minimum; + report_val += st->report_state.logical_minimum; sensor_hub_set_feature(st->hsdev, st->power_state.report_id, st->power_state.index, (s32)state_val); sensor_hub_set_feature(st->hsdev, st->report_state.report_id, st->report_state.index, - (s32)state_val); + (s32)report_val); return 0; } -void hid_sensor_remove_trigger(struct iio_dev *indio_dev) +void hid_sensor_remove_trigger(struct hid_sensor_common *attrb) { - iio_trigger_unregister(indio_dev->trig); - iio_trigger_free(indio_dev->trig); - indio_dev->trig = NULL; + iio_trigger_unregister(attrb->trigger); + iio_trigger_free(attrb->trigger); } EXPORT_SYMBOL(hid_sensor_remove_trigger); @@ -90,7 +99,7 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name, dev_err(&indio_dev->dev, "Trigger Register Failed\n"); goto error_free_trig; } - indio_dev->trig = trig; + indio_dev->trig = attrb->trigger = trig; return ret; diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.h b/drivers/iio/common/hid-sensors/hid-sensor-trigger.h index 9a8731478eda..ca02f7811aa8 100644 --- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.h +++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.h @@ -21,6 +21,6 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name, struct hid_sensor_common *attrb); -void hid_sensor_remove_trigger(struct iio_dev *indio_dev); +void hid_sensor_remove_trigger(struct hid_sensor_common *attrb); #endif diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c index ea01c6bcfb56..e54f0f4959d3 100644 --- a/drivers/iio/gyro/hid-sensor-gyro-3d.c +++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c @@ -348,7 +348,7 @@ static int hid_gyro_3d_probe(struct platform_device *pdev) error_iio_unreg: iio_device_unregister(indio_dev); error_remove_trigger: - hid_sensor_remove_trigger(indio_dev); + hid_sensor_remove_trigger(&gyro_state->common_attributes); error_unreg_buffer_funcs: iio_triggered_buffer_cleanup(indio_dev); error_free_dev_mem: @@ -361,10 +361,11 @@ static int hid_gyro_3d_remove(struct platform_device *pdev) { struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data; struct iio_dev *indio_dev = platform_get_drvdata(pdev); + struct gyro_3d_state *gyro_state = iio_priv(indio_dev); sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_GYRO_3D); iio_device_unregister(indio_dev); - hid_sensor_remove_trigger(indio_dev); + hid_sensor_remove_trigger(&gyro_state->common_attributes); iio_triggered_buffer_cleanup(indio_dev); kfree(indio_dev->channels); diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig index f98c2b509254..a022f27c6690 100644 --- a/drivers/iio/light/Kconfig +++ b/drivers/iio/light/Kconfig @@ -43,6 +43,7 @@ config GP2AP020A00F depends on I2C select IIO_BUFFER select IIO_TRIGGERED_BUFFER + select IRQ_WORK help Say Y here if you have a Sharp GP2AP020A00F proximity/ALS combo-chip hooked to an I2C bus. @@ -81,6 +82,8 @@ config SENSORS_LM3533 config TCS3472 tristate "TAOS TCS3472 color light-to-digital converter" depends on I2C + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER help If you say yes here you get support for the TAOS TCS3472 family of color light-to-digital converters with IR filter. diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c index fa6ae8cf89ea..8e8b9d722853 100644 --- a/drivers/iio/light/hid-sensor-als.c +++ b/drivers/iio/light/hid-sensor-als.c @@ -314,7 +314,7 @@ static int hid_als_probe(struct platform_device *pdev) error_iio_unreg: iio_device_unregister(indio_dev); error_remove_trigger: - hid_sensor_remove_trigger(indio_dev); + hid_sensor_remove_trigger(&als_state->common_attributes); error_unreg_buffer_funcs: iio_triggered_buffer_cleanup(indio_dev); error_free_dev_mem: @@ -327,10 +327,11 @@ static int hid_als_remove(struct platform_device *pdev) { struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data; struct iio_dev *indio_dev = platform_get_drvdata(pdev); + struct als_state *als_state = iio_priv(indio_dev); sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_ALS); iio_device_unregister(indio_dev); - hid_sensor_remove_trigger(indio_dev); + hid_sensor_remove_trigger(&als_state->common_attributes); iio_triggered_buffer_cleanup(indio_dev); kfree(indio_dev->channels); diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig index 0cf09637b35b..d86d226dcd67 100644 --- a/drivers/iio/magnetometer/Kconfig +++ b/drivers/iio/magnetometer/Kconfig @@ -19,6 +19,8 @@ config AK8975 config MAG3110 tristate "Freescale MAG3110 3-Axis Magnetometer" depends on I2C + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER help Say yes here to build support for the Freescale MAG3110 3-Axis magnetometer. diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c index 2634920562fb..b26e1028a0a0 100644 --- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c +++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c @@ -351,7 +351,7 @@ static int hid_magn_3d_probe(struct platform_device *pdev) error_iio_unreg: iio_device_unregister(indio_dev); error_remove_trigger: - hid_sensor_remove_trigger(indio_dev); + hid_sensor_remove_trigger(&magn_state->common_attributes); error_unreg_buffer_funcs: iio_triggered_buffer_cleanup(indio_dev); error_free_dev_mem: @@ -364,10 +364,11 @@ static int hid_magn_3d_remove(struct platform_device *pdev) { struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data; struct iio_dev *indio_dev = platform_get_drvdata(pdev); + struct magn_3d_state *magn_state = iio_priv(indio_dev); sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_COMPASS_3D); iio_device_unregister(indio_dev); - hid_sensor_remove_trigger(indio_dev); + hid_sensor_remove_trigger(&magn_state->common_attributes); iio_triggered_buffer_cleanup(indio_dev); kfree(indio_dev->channels); diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c index 783c5b417356..becf54496967 100644 --- a/drivers/iio/magnetometer/mag3110.c +++ b/drivers/iio/magnetometer/mag3110.c @@ -250,7 +250,12 @@ done: .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \ BIT(IIO_CHAN_INFO_SCALE), \ .scan_index = idx, \ - .scan_type = IIO_ST('s', 16, 16, IIO_BE), \ + .scan_type = { \ + .sign = 's', \ + .realbits = 16, \ + .storagebits = 16, \ + .endianness = IIO_BE, \ + }, \ } static const struct iio_chan_spec mag3110_channels[] = { diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 6df23502059a..6be57c38638d 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -22,6 +22,7 @@ #include <linux/socket.h> #include <linux/in.h> #include <linux/in6.h> +#include <linux/llist.h> #include <rdma/ib_verbs.h> #include <rdma/rdma_cm.h> #include <target/target_core_base.h> @@ -489,6 +490,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) kref_init(&isert_conn->conn_kref); kref_get(&isert_conn->conn_kref); mutex_init(&isert_conn->conn_mutex); + mutex_init(&isert_conn->conn_comp_mutex); spin_lock_init(&isert_conn->conn_lock); cma_id->context = isert_conn; @@ -843,14 +845,32 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn, } static void -isert_init_send_wr(struct isert_cmd *isert_cmd, struct ib_send_wr *send_wr) +isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, + struct ib_send_wr *send_wr, bool coalesce) { + struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc; + isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND; send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc; send_wr->opcode = IB_WR_SEND; - send_wr->send_flags = IB_SEND_SIGNALED; - send_wr->sg_list = &isert_cmd->tx_desc.tx_sg[0]; + send_wr->sg_list = &tx_desc->tx_sg[0]; send_wr->num_sge = isert_cmd->tx_desc.num_sge; + /* + * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED + * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls. + */ + mutex_lock(&isert_conn->conn_comp_mutex); + if (coalesce && + ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) { + llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist); + mutex_unlock(&isert_conn->conn_comp_mutex); + return; + } + isert_conn->conn_comp_batch = 0; + tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist); + mutex_unlock(&isert_conn->conn_comp_mutex); + + send_wr->send_flags = IB_SEND_SIGNALED; } static int @@ -1582,8 +1602,8 @@ isert_response_completion(struct iser_tx_desc *tx_desc, } static void -isert_send_completion(struct iser_tx_desc *tx_desc, - struct isert_conn *isert_conn) +__isert_send_completion(struct iser_tx_desc *tx_desc, + struct isert_conn *isert_conn) { struct ib_device *ib_dev = isert_conn->conn_cm_id->device; struct isert_cmd *isert_cmd = tx_desc->isert_cmd; @@ -1624,6 +1644,24 @@ isert_send_completion(struct iser_tx_desc *tx_desc, } static void +isert_send_completion(struct iser_tx_desc *tx_desc, + struct isert_conn *isert_conn) +{ + struct llist_node *llnode = tx_desc->comp_llnode_batch; + struct iser_tx_desc *t; + /* + * Drain coalesced completion llist starting from comp_llnode_batch + * setup in isert_init_send_wr(), and then complete trailing tx_desc. + */ + while (llnode) { + t = llist_entry(llnode, struct iser_tx_desc, comp_llnode); + llnode = llist_next(llnode); + __isert_send_completion(t, isert_conn); + } + __isert_send_completion(tx_desc, isert_conn); +} + +static void isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn) { struct ib_device *ib_dev = isert_conn->conn_cm_id->device; @@ -1793,7 +1831,7 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd) isert_cmd->tx_desc.num_sge = 2; } - isert_init_send_wr(isert_cmd, send_wr); + isert_init_send_wr(isert_conn, isert_cmd, send_wr, true); pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); @@ -1813,7 +1851,7 @@ isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn, &isert_cmd->tx_desc.iscsi_header, nopout_response); isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); - isert_init_send_wr(isert_cmd, send_wr); + isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); @@ -1831,7 +1869,7 @@ isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *) &isert_cmd->tx_desc.iscsi_header); isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); - isert_init_send_wr(isert_cmd, send_wr); + isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); @@ -1849,7 +1887,7 @@ isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *) &isert_cmd->tx_desc.iscsi_header); isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); - isert_init_send_wr(isert_cmd, send_wr); + isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); @@ -1881,7 +1919,7 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn) tx_dsg->lkey = isert_conn->conn_mr->lkey; isert_cmd->tx_desc.num_sge = 2; - isert_init_send_wr(isert_cmd, send_wr); + isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); @@ -1921,7 +1959,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) tx_dsg->lkey = isert_conn->conn_mr->lkey; isert_cmd->tx_desc.num_sge = 2; } - isert_init_send_wr(isert_cmd, send_wr); + isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); @@ -1991,8 +2029,6 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { data_left = se_cmd->data_length; - iscsit_increment_maxcmdsn(cmd, conn->sess); - cmd->stat_sn = conn->stat_sn++; } else { sg_off = cmd->write_data_done / PAGE_SIZE; data_left = se_cmd->data_length - cmd->write_data_done; @@ -2204,8 +2240,6 @@ isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd, if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { data_left = se_cmd->data_length; - iscsit_increment_maxcmdsn(cmd, conn->sess); - cmd->stat_sn = conn->stat_sn++; } else { sg_off = cmd->write_data_done / PAGE_SIZE; data_left = se_cmd->data_length - cmd->write_data_done; @@ -2259,18 +2293,26 @@ isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd, data_len = min(data_left, rdma_write_max); wr->cur_rdma_length = data_len; - spin_lock_irqsave(&isert_conn->conn_lock, flags); - fr_desc = list_first_entry(&isert_conn->conn_frwr_pool, - struct fast_reg_descriptor, list); - list_del(&fr_desc->list); - spin_unlock_irqrestore(&isert_conn->conn_lock, flags); - wr->fr_desc = fr_desc; + /* if there is a single dma entry, dma mr is sufficient */ + if (count == 1) { + ib_sge->addr = ib_sg_dma_address(ib_dev, &sg_start[0]); + ib_sge->length = ib_sg_dma_len(ib_dev, &sg_start[0]); + ib_sge->lkey = isert_conn->conn_mr->lkey; + wr->fr_desc = NULL; + } else { + spin_lock_irqsave(&isert_conn->conn_lock, flags); + fr_desc = list_first_entry(&isert_conn->conn_frwr_pool, + struct fast_reg_descriptor, list); + list_del(&fr_desc->list); + spin_unlock_irqrestore(&isert_conn->conn_lock, flags); + wr->fr_desc = fr_desc; - ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn, - ib_sge, offset, data_len); - if (ret) { - list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool); - goto unmap_sg; + ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn, + ib_sge, offset, data_len); + if (ret) { + list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool); + goto unmap_sg; + } } return 0; @@ -2306,10 +2348,11 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) * Build isert_conn->tx_desc for iSCSI response PDU and attach */ isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); - iscsit_build_rsp_pdu(cmd, conn, false, (struct iscsi_scsi_rsp *) + iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *) &isert_cmd->tx_desc.iscsi_header); isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); - isert_init_send_wr(isert_cmd, &isert_cmd->tx_desc.send_wr); + isert_init_send_wr(isert_conn, isert_cmd, + &isert_cmd->tx_desc.send_wr, true); atomic_inc(&isert_conn->post_send_buf_count); diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 631f2090f0b8..691f90ff2d83 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -43,6 +43,8 @@ struct iser_tx_desc { struct ib_sge tx_sg[2]; int num_sge; struct isert_cmd *isert_cmd; + struct llist_node *comp_llnode_batch; + struct llist_node comp_llnode; struct ib_send_wr send_wr; } __packed; @@ -121,6 +123,10 @@ struct isert_conn { int conn_frwr_pool_size; /* lock to protect frwr_pool */ spinlock_t conn_lock; +#define ISERT_COMP_BATCH_COUNT 8 + int conn_comp_batch; + struct llist_head conn_comp_llist; + struct mutex conn_comp_mutex; }; #define ISERT_MAX_CQ 64 diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 6c923c7039a1..520a7e5a490b 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -1352,11 +1352,8 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx) /* XXX(hch): this is a horrible layering violation.. */ spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags); - ioctx->cmd.transport_state |= CMD_T_LUN_STOP; ioctx->cmd.transport_state &= ~CMD_T_ACTIVE; spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags); - - complete(&ioctx->cmd.transport_lun_stop_comp); break; case SRPT_STATE_CMD_RSP_SENT: /* @@ -1364,9 +1361,6 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx) * not been received in time. */ srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx); - spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags); - ioctx->cmd.transport_state |= CMD_T_LUN_STOP; - spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags); target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd); break; case SRPT_STATE_MGMT_RSP_SENT: @@ -1476,7 +1470,6 @@ static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch, { struct se_cmd *cmd; enum srpt_command_state state; - unsigned long flags; cmd = &ioctx->cmd; state = srpt_get_cmd_state(ioctx); @@ -1496,9 +1489,6 @@ static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch, __func__, __LINE__, state); break; case SRPT_RDMA_WRITE_LAST: - spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags); - ioctx->cmd.transport_state |= CMD_T_LUN_STOP; - spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags); break; default: printk(KERN_ERR "%s[%d]: opcode = %u\n", __func__, diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c index dbd2047f1641..3ed23513d881 100644 --- a/drivers/input/keyboard/adp5588-keys.c +++ b/drivers/input/keyboard/adp5588-keys.c @@ -536,7 +536,8 @@ static int adp5588_probe(struct i2c_client *client, __set_bit(EV_REP, input->evbit); for (i = 0; i < input->keycodemax; i++) - __set_bit(kpad->keycode[i] & KEY_MAX, input->keybit); + if (kpad->keycode[i] <= KEY_MAX) + __set_bit(kpad->keycode[i], input->keybit); __clear_bit(KEY_RESERVED, input->keybit); if (kpad->gpimapsize) diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c index 67d12b3427c9..60dafd4fa692 100644 --- a/drivers/input/keyboard/adp5589-keys.c +++ b/drivers/input/keyboard/adp5589-keys.c @@ -992,7 +992,8 @@ static int adp5589_probe(struct i2c_client *client, __set_bit(EV_REP, input->evbit); for (i = 0; i < input->keycodemax; i++) - __set_bit(kpad->keycode[i] & KEY_MAX, input->keybit); + if (kpad->keycode[i] <= KEY_MAX) + __set_bit(kpad->keycode[i], input->keybit); __clear_bit(KEY_RESERVED, input->keybit); if (kpad->gpimapsize) diff --git a/drivers/input/keyboard/bf54x-keys.c b/drivers/input/keyboard/bf54x-keys.c index fc88fb48d70d..09b91d093087 100644 --- a/drivers/input/keyboard/bf54x-keys.c +++ b/drivers/input/keyboard/bf54x-keys.c @@ -289,7 +289,8 @@ static int bfin_kpad_probe(struct platform_device *pdev) __set_bit(EV_REP, input->evbit); for (i = 0; i < input->keycodemax; i++) - __set_bit(bf54x_kpad->keycode[i] & KEY_MAX, input->keybit); + if (bf54x_kpad->keycode[i] <= KEY_MAX) + __set_bit(bf54x_kpad->keycode[i], input->keybit); __clear_bit(KEY_RESERVED, input->keybit); error = input_register_device(input); diff --git a/drivers/input/misc/adxl34x.c b/drivers/input/misc/adxl34x.c index 0735de3a6468..1cb1da294419 100644 --- a/drivers/input/misc/adxl34x.c +++ b/drivers/input/misc/adxl34x.c @@ -158,7 +158,7 @@ /* ORIENT ADXL346 only */ #define ADXL346_2D_VALID (1 << 6) -#define ADXL346_2D_ORIENT(x) (((x) & 0x3) >> 4) +#define ADXL346_2D_ORIENT(x) (((x) & 0x30) >> 4) #define ADXL346_3D_VALID (1 << 3) #define ADXL346_3D_ORIENT(x) ((x) & 0x7) #define ADXL346_2D_PORTRAIT_POS 0 /* +X */ diff --git a/drivers/input/misc/hp_sdc_rtc.c b/drivers/input/misc/hp_sdc_rtc.c index 86b822806e95..45e0e3e55de2 100644 --- a/drivers/input/misc/hp_sdc_rtc.c +++ b/drivers/input/misc/hp_sdc_rtc.c @@ -180,7 +180,10 @@ static int64_t hp_sdc_rtc_read_i8042timer (uint8_t loadcmd, int numreg) if (WARN_ON(down_interruptible(&i8042tregs))) return -1; - if (hp_sdc_enqueue_transaction(&t)) return -1; + if (hp_sdc_enqueue_transaction(&t)) { + up(&i8042tregs); + return -1; + } /* Sleep until results come back. */ if (WARN_ON(down_interruptible(&i8042tregs))) diff --git a/drivers/input/misc/pcf8574_keypad.c b/drivers/input/misc/pcf8574_keypad.c index e37392976fdd..0deca5a3c87f 100644 --- a/drivers/input/misc/pcf8574_keypad.c +++ b/drivers/input/misc/pcf8574_keypad.c @@ -113,9 +113,12 @@ static int pcf8574_kp_probe(struct i2c_client *client, const struct i2c_device_i idev->keycodemax = ARRAY_SIZE(lp->btncode); for (i = 0; i < ARRAY_SIZE(pcf8574_kp_btncode); i++) { - lp->btncode[i] = pcf8574_kp_btncode[i]; - __set_bit(lp->btncode[i] & KEY_MAX, idev->keybit); + if (lp->btncode[i] <= KEY_MAX) { + lp->btncode[i] = pcf8574_kp_btncode[i]; + __set_bit(lp->btncode[i], idev->keybit); + } } + __clear_bit(KEY_RESERVED, idev->keybit); sprintf(lp->name, DRV_NAME); sprintf(lp->phys, "kp_data/input0"); diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index ca7a26f1dce8..5cf62e315218 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -70,6 +70,25 @@ static const struct alps_nibble_commands alps_v4_nibble_commands[] = { { PSMOUSE_CMD_SETSCALE11, 0x00 }, /* f */ }; +static const struct alps_nibble_commands alps_v6_nibble_commands[] = { + { PSMOUSE_CMD_ENABLE, 0x00 }, /* 0 */ + { PSMOUSE_CMD_SETRATE, 0x0a }, /* 1 */ + { PSMOUSE_CMD_SETRATE, 0x14 }, /* 2 */ + { PSMOUSE_CMD_SETRATE, 0x28 }, /* 3 */ + { PSMOUSE_CMD_SETRATE, 0x3c }, /* 4 */ + { PSMOUSE_CMD_SETRATE, 0x50 }, /* 5 */ + { PSMOUSE_CMD_SETRATE, 0x64 }, /* 6 */ + { PSMOUSE_CMD_SETRATE, 0xc8 }, /* 7 */ + { PSMOUSE_CMD_GETID, 0x00 }, /* 8 */ + { PSMOUSE_CMD_GETINFO, 0x00 }, /* 9 */ + { PSMOUSE_CMD_SETRES, 0x00 }, /* a */ + { PSMOUSE_CMD_SETRES, 0x01 }, /* b */ + { PSMOUSE_CMD_SETRES, 0x02 }, /* c */ + { PSMOUSE_CMD_SETRES, 0x03 }, /* d */ + { PSMOUSE_CMD_SETSCALE21, 0x00 }, /* e */ + { PSMOUSE_CMD_SETSCALE11, 0x00 }, /* f */ +}; + #define ALPS_DUALPOINT 0x02 /* touchpad has trackstick */ #define ALPS_PASS 0x04 /* device has a pass-through port */ @@ -103,6 +122,7 @@ static const struct alps_model_info alps_model_data[] = { /* Dell Latitude E5500, E6400, E6500, Precision M4400 */ { { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, + { { 0x73, 0x00, 0x14 }, 0x00, ALPS_PROTO_V6, 0xff, 0xff, ALPS_DUALPOINT }, /* Dell XT2 */ { { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */ { { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */ @@ -645,6 +665,76 @@ static void alps_process_packet_v3(struct psmouse *psmouse) alps_process_touchpad_packet_v3(psmouse); } +static void alps_process_packet_v6(struct psmouse *psmouse) +{ + struct alps_data *priv = psmouse->private; + unsigned char *packet = psmouse->packet; + struct input_dev *dev = psmouse->dev; + struct input_dev *dev2 = priv->dev2; + int x, y, z, left, right, middle; + + /* + * We can use Byte5 to distinguish if the packet is from Touchpad + * or Trackpoint. + * Touchpad: 0 - 0x7E + * Trackpoint: 0x7F + */ + if (packet[5] == 0x7F) { + /* It should be a DualPoint when received Trackpoint packet */ + if (!(priv->flags & ALPS_DUALPOINT)) + return; + + /* Trackpoint packet */ + x = packet[1] | ((packet[3] & 0x20) << 2); + y = packet[2] | ((packet[3] & 0x40) << 1); + z = packet[4]; + left = packet[3] & 0x01; + right = packet[3] & 0x02; + middle = packet[3] & 0x04; + + /* To prevent the cursor jump when finger lifted */ + if (x == 0x7F && y == 0x7F && z == 0x7F) + x = y = z = 0; + + /* Divide 4 since trackpoint's speed is too fast */ + input_report_rel(dev2, REL_X, (char)x / 4); + input_report_rel(dev2, REL_Y, -((char)y / 4)); + + input_report_key(dev2, BTN_LEFT, left); + input_report_key(dev2, BTN_RIGHT, right); + input_report_key(dev2, BTN_MIDDLE, middle); + + input_sync(dev2); + return; + } + + /* Touchpad packet */ + x = packet[1] | ((packet[3] & 0x78) << 4); + y = packet[2] | ((packet[4] & 0x78) << 4); + z = packet[5]; + left = packet[3] & 0x01; + right = packet[3] & 0x02; + + if (z > 30) + input_report_key(dev, BTN_TOUCH, 1); + if (z < 25) + input_report_key(dev, BTN_TOUCH, 0); + + if (z > 0) { + input_report_abs(dev, ABS_X, x); + input_report_abs(dev, ABS_Y, y); + } + + input_report_abs(dev, ABS_PRESSURE, z); + input_report_key(dev, BTN_TOOL_FINGER, z > 0); + + /* v6 touchpad does not have middle button */ + input_report_key(dev, BTN_LEFT, left); + input_report_key(dev, BTN_RIGHT, right); + + input_sync(dev); +} + static void alps_process_packet_v4(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; @@ -897,7 +987,7 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse) } /* Bytes 2 - pktsize should have 0 in the highest bit */ - if (priv->proto_version != ALPS_PROTO_V5 && + if ((priv->proto_version < ALPS_PROTO_V5) && psmouse->pktcnt >= 2 && psmouse->pktcnt <= psmouse->pktsize && (psmouse->packet[psmouse->pktcnt - 1] & 0x80)) { psmouse_dbg(psmouse, "refusing packet[%i] = %x\n", @@ -1085,6 +1175,80 @@ static int alps_absolute_mode_v1_v2(struct psmouse *psmouse) return ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETPOLL); } +static int alps_monitor_mode_send_word(struct psmouse *psmouse, u16 word) +{ + int i, nibble; + + /* + * b0-b11 are valid bits, send sequence is inverse. + * e.g. when word = 0x0123, nibble send sequence is 3, 2, 1 + */ + for (i = 0; i <= 8; i += 4) { + nibble = (word >> i) & 0xf; + if (alps_command_mode_send_nibble(psmouse, nibble)) + return -1; + } + + return 0; +} + +static int alps_monitor_mode_write_reg(struct psmouse *psmouse, + u16 addr, u16 value) +{ + struct ps2dev *ps2dev = &psmouse->ps2dev; + + /* 0x0A0 is the command to write the word */ + if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE) || + alps_monitor_mode_send_word(psmouse, 0x0A0) || + alps_monitor_mode_send_word(psmouse, addr) || + alps_monitor_mode_send_word(psmouse, value) || + ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE)) + return -1; + + return 0; +} + +static int alps_monitor_mode(struct psmouse *psmouse, bool enable) +{ + struct ps2dev *ps2dev = &psmouse->ps2dev; + + if (enable) { + /* EC E9 F5 F5 E7 E6 E7 E9 to enter monitor mode */ + if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) || + ps2_command(ps2dev, NULL, PSMOUSE_CMD_GETINFO) || + ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) || + ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) || + ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) || + ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) || + ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) || + ps2_command(ps2dev, NULL, PSMOUSE_CMD_GETINFO)) + return -1; + } else { + /* EC to exit monitor mode */ + if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP)) + return -1; + } + + return 0; +} + +static int alps_absolute_mode_v6(struct psmouse *psmouse) +{ + u16 reg_val = 0x181; + int ret = -1; + + /* enter monitor mode, to write the register */ + if (alps_monitor_mode(psmouse, true)) + return -1; + + ret = alps_monitor_mode_write_reg(psmouse, 0x000, reg_val); + + if (alps_monitor_mode(psmouse, false)) + ret = -1; + + return ret; +} + static int alps_get_status(struct psmouse *psmouse, char *param) { /* Get status: 0xF5 0xF5 0xF5 0xE9 */ @@ -1189,6 +1353,32 @@ static int alps_hw_init_v1_v2(struct psmouse *psmouse) return 0; } +static int alps_hw_init_v6(struct psmouse *psmouse) +{ + unsigned char param[2] = {0xC8, 0x14}; + + /* Enter passthrough mode to let trackpoint enter 6byte raw mode */ + if (alps_passthrough_mode_v2(psmouse, true)) + return -1; + + if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) || + ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) || + ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) || + ps2_command(&psmouse->ps2dev, ¶m[0], PSMOUSE_CMD_SETRATE) || + ps2_command(&psmouse->ps2dev, ¶m[1], PSMOUSE_CMD_SETRATE)) + return -1; + + if (alps_passthrough_mode_v2(psmouse, false)) + return -1; + + if (alps_absolute_mode_v6(psmouse)) { + psmouse_err(psmouse, "Failed to enable absolute mode\n"); + return -1; + } + + return 0; +} + /* * Enable or disable passthrough mode to the trackstick. */ @@ -1553,6 +1743,8 @@ static void alps_set_defaults(struct alps_data *priv) priv->hw_init = alps_hw_init_v1_v2; priv->process_packet = alps_process_packet_v1_v2; priv->set_abs_params = alps_set_abs_params_st; + priv->x_max = 1023; + priv->y_max = 767; break; case ALPS_PROTO_V3: priv->hw_init = alps_hw_init_v3; @@ -1584,6 +1776,14 @@ static void alps_set_defaults(struct alps_data *priv) priv->x_bits = 23; priv->y_bits = 12; break; + case ALPS_PROTO_V6: + priv->hw_init = alps_hw_init_v6; + priv->process_packet = alps_process_packet_v6; + priv->set_abs_params = alps_set_abs_params_st; + priv->nibble_commands = alps_v6_nibble_commands; + priv->x_max = 2047; + priv->y_max = 1535; + break; } } @@ -1705,8 +1905,8 @@ static void alps_disconnect(struct psmouse *psmouse) static void alps_set_abs_params_st(struct alps_data *priv, struct input_dev *dev1) { - input_set_abs_params(dev1, ABS_X, 0, 1023, 0, 0); - input_set_abs_params(dev1, ABS_Y, 0, 767, 0, 0); + input_set_abs_params(dev1, ABS_X, 0, priv->x_max, 0, 0); + input_set_abs_params(dev1, ABS_Y, 0, priv->y_max, 0, 0); } static void alps_set_abs_params_mt(struct alps_data *priv, diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h index eee59853b9ce..704f0f924307 100644 --- a/drivers/input/mouse/alps.h +++ b/drivers/input/mouse/alps.h @@ -17,6 +17,7 @@ #define ALPS_PROTO_V3 3 #define ALPS_PROTO_V4 4 #define ALPS_PROTO_V5 5 +#define ALPS_PROTO_V6 6 /** * struct alps_model_info - touchpad ID table diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 8551dcaf24db..597e9b8fc18d 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -1313,6 +1313,7 @@ static int elantech_set_properties(struct elantech_data *etd) break; case 6: case 7: + case 8: etd->hw_version = 4; break; default: diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c index 98707fb2cb5d..8f4c4ab04bc2 100644 --- a/drivers/input/serio/serio.c +++ b/drivers/input/serio/serio.c @@ -455,16 +455,26 @@ static DEVICE_ATTR_RO(type); static DEVICE_ATTR_RO(proto); static DEVICE_ATTR_RO(id); static DEVICE_ATTR_RO(extra); -static DEVICE_ATTR_RO(modalias); -static DEVICE_ATTR_WO(drvctl); -static DEVICE_ATTR(description, S_IRUGO, serio_show_description, NULL); -static DEVICE_ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode); static struct attribute *serio_device_id_attrs[] = { &dev_attr_type.attr, &dev_attr_proto.attr, &dev_attr_id.attr, &dev_attr_extra.attr, + NULL +}; + +static struct attribute_group serio_id_attr_group = { + .name = "id", + .attrs = serio_device_id_attrs, +}; + +static DEVICE_ATTR_RO(modalias); +static DEVICE_ATTR_WO(drvctl); +static DEVICE_ATTR(description, S_IRUGO, serio_show_description, NULL); +static DEVICE_ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode); + +static struct attribute *serio_device_attrs[] = { &dev_attr_modalias.attr, &dev_attr_description.attr, &dev_attr_drvctl.attr, @@ -472,13 +482,13 @@ static struct attribute *serio_device_id_attrs[] = { NULL }; -static struct attribute_group serio_id_attr_group = { - .name = "id", - .attrs = serio_device_id_attrs, +static struct attribute_group serio_device_attr_group = { + .attrs = serio_device_attrs, }; static const struct attribute_group *serio_device_attr_groups[] = { &serio_id_attr_group, + &serio_device_attr_group, NULL }; diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index 00d1e547b211..961d58d32647 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig @@ -906,6 +906,17 @@ config TOUCHSCREEN_STMPE To compile this driver as a module, choose M here: the module will be called stmpe-ts. +config TOUCHSCREEN_SUR40 + tristate "Samsung SUR40 (Surface 2.0/PixelSense) touchscreen" + depends on USB + select INPUT_POLLDEV + help + Say Y here if you want support for the Samsung SUR40 touchscreen + (also known as Microsoft Surface 2.0 or Microsoft PixelSense). + + To compile this driver as a module, choose M here: the + module will be called sur40. + config TOUCHSCREEN_TPS6507X tristate "TPS6507x based touchscreens" depends on I2C diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile index 7587883b8d38..62801f213346 100644 --- a/drivers/input/touchscreen/Makefile +++ b/drivers/input/touchscreen/Makefile @@ -54,6 +54,7 @@ obj-$(CONFIG_TOUCHSCREEN_PIXCIR) += pixcir_i2c_ts.o obj-$(CONFIG_TOUCHSCREEN_S3C2410) += s3c2410_ts.o obj-$(CONFIG_TOUCHSCREEN_ST1232) += st1232.o obj-$(CONFIG_TOUCHSCREEN_STMPE) += stmpe-ts.o +obj-$(CONFIG_TOUCHSCREEN_SUR40) += sur40.o obj-$(CONFIG_TOUCHSCREEN_TI_AM335X_TSC) += ti_am335x_tsc.o obj-$(CONFIG_TOUCHSCREEN_TNETV107X) += tnetv107x-ts.o obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213) += touchit213.o diff --git a/drivers/input/touchscreen/atmel-wm97xx.c b/drivers/input/touchscreen/atmel-wm97xx.c index 268a35e55d7f..279c0e42b8a7 100644 --- a/drivers/input/touchscreen/atmel-wm97xx.c +++ b/drivers/input/touchscreen/atmel-wm97xx.c @@ -391,7 +391,7 @@ static int __exit atmel_wm97xx_remove(struct platform_device *pdev) } #ifdef CONFIG_PM_SLEEP -static int atmel_wm97xx_suspend(struct *dev) +static int atmel_wm97xx_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct atmel_wm97xx *atmel_wm97xx = platform_get_drvdata(pdev); diff --git a/drivers/input/touchscreen/cyttsp4_core.c b/drivers/input/touchscreen/cyttsp4_core.c index 42d830efa316..a035a390f8e2 100644 --- a/drivers/input/touchscreen/cyttsp4_core.c +++ b/drivers/input/touchscreen/cyttsp4_core.c @@ -1246,8 +1246,7 @@ static void cyttsp4_watchdog_timer(unsigned long handle) dev_vdbg(cd->dev, "%s: Watchdog timer triggered\n", __func__); - if (!work_pending(&cd->watchdog_work)) - schedule_work(&cd->watchdog_work); + schedule_work(&cd->watchdog_work); return; } diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c new file mode 100644 index 000000000000..f1cb05148b46 --- /dev/null +++ b/drivers/input/touchscreen/sur40.c @@ -0,0 +1,466 @@ +/* + * Surface2.0/SUR40/PixelSense input driver + * + * Copyright (c) 2013 by Florian 'floe' Echtler <floe@butterbrot.org> + * + * Derived from the USB Skeleton driver 1.1, + * Copyright (c) 2003 Greg Kroah-Hartman (greg@kroah.com) + * + * and from the Apple USB BCM5974 multitouch driver, + * Copyright (c) 2008 Henrik Rydberg (rydberg@euromail.se) + * + * and from the generic hid-multitouch driver, + * Copyright (c) 2010-2012 Stephane Chatty <chatty@enac.fr> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/completion.h> +#include <linux/uaccess.h> +#include <linux/usb.h> +#include <linux/printk.h> +#include <linux/input-polldev.h> +#include <linux/input/mt.h> +#include <linux/usb/input.h> + +/* read 512 bytes from endpoint 0x86 -> get header + blobs */ +struct sur40_header { + + __le16 type; /* always 0x0001 */ + __le16 count; /* count of blobs (if 0: continue prev. packet) */ + + __le32 packet_id; /* unique ID for all packets in one frame */ + + __le32 timestamp; /* milliseconds (inc. by 16 or 17 each frame) */ + __le32 unknown; /* "epoch?" always 02/03 00 00 00 */ + +} __packed; + +struct sur40_blob { + + __le16 blob_id; + + u8 action; /* 0x02 = enter/exit, 0x03 = update (?) */ + u8 unknown; /* always 0x01 or 0x02 (no idea what this is?) */ + + __le16 bb_pos_x; /* upper left corner of bounding box */ + __le16 bb_pos_y; + + __le16 bb_size_x; /* size of bounding box */ + __le16 bb_size_y; + + __le16 pos_x; /* finger tip position */ + __le16 pos_y; + + __le16 ctr_x; /* centroid position */ + __le16 ctr_y; + + __le16 axis_x; /* somehow related to major/minor axis, mostly: */ + __le16 axis_y; /* axis_x == bb_size_y && axis_y == bb_size_x */ + + __le32 angle; /* orientation in radians relative to x axis - + actually an IEEE754 float, don't use in kernel */ + + __le32 area; /* size in pixels/pressure (?) */ + + u8 padding[32]; + +} __packed; + +/* combined header/blob data */ +struct sur40_data { + struct sur40_header header; + struct sur40_blob blobs[]; +} __packed; + + +/* version information */ +#define DRIVER_SHORT "sur40" +#define DRIVER_AUTHOR "Florian 'floe' Echtler <floe@butterbrot.org>" +#define DRIVER_DESC "Surface2.0/SUR40/PixelSense input driver" + +/* vendor and device IDs */ +#define ID_MICROSOFT 0x045e +#define ID_SUR40 0x0775 + +/* sensor resolution */ +#define SENSOR_RES_X 1920 +#define SENSOR_RES_Y 1080 + +/* touch data endpoint */ +#define TOUCH_ENDPOINT 0x86 + +/* polling interval (ms) */ +#define POLL_INTERVAL 10 + +/* maximum number of contacts FIXME: this is a guess? */ +#define MAX_CONTACTS 64 + +/* control commands */ +#define SUR40_GET_VERSION 0xb0 /* 12 bytes string */ +#define SUR40_UNKNOWN1 0xb3 /* 5 bytes */ +#define SUR40_UNKNOWN2 0xc1 /* 24 bytes */ + +#define SUR40_GET_STATE 0xc5 /* 4 bytes state (?) */ +#define SUR40_GET_SENSORS 0xb1 /* 8 bytes sensors */ + +/* + * Note: an earlier, non-public version of this driver used USB_RECIP_ENDPOINT + * here by mistake which is very likely to have corrupted the firmware EEPROM + * on two separate SUR40 devices. Thanks to Alan Stern who spotted this bug. + * Should you ever run into a similar problem, the background story to this + * incident and instructions on how to fix the corrupted EEPROM are available + * at https://floe.butterbrot.org/matrix/hacking/surface/brick.html +*/ + +struct sur40_state { + + struct usb_device *usbdev; + struct device *dev; + struct input_polled_dev *input; + + struct sur40_data *bulk_in_buffer; + size_t bulk_in_size; + u8 bulk_in_epaddr; + + char phys[64]; +}; + +static int sur40_command(struct sur40_state *dev, + u8 command, u16 index, void *buffer, u16 size) +{ + return usb_control_msg(dev->usbdev, usb_rcvctrlpipe(dev->usbdev, 0), + command, + USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, + 0x00, index, buffer, size, 1000); +} + +/* Initialization routine, called from sur40_open */ +static int sur40_init(struct sur40_state *dev) +{ + int result; + u8 buffer[24]; + + /* stupidly replay the original MS driver init sequence */ + result = sur40_command(dev, SUR40_GET_VERSION, 0x00, buffer, 12); + if (result < 0) + return result; + + result = sur40_command(dev, SUR40_GET_VERSION, 0x01, buffer, 12); + if (result < 0) + return result; + + result = sur40_command(dev, SUR40_GET_VERSION, 0x02, buffer, 12); + if (result < 0) + return result; + + result = sur40_command(dev, SUR40_UNKNOWN2, 0x00, buffer, 24); + if (result < 0) + return result; + + result = sur40_command(dev, SUR40_UNKNOWN1, 0x00, buffer, 5); + if (result < 0) + return result; + + result = sur40_command(dev, SUR40_GET_VERSION, 0x03, buffer, 12); + + /* + * Discard the result buffer - no known data inside except + * some version strings, maybe extract these sometime... + */ + + return result; +} + +/* + * Callback routines from input_polled_dev + */ + +/* Enable the device, polling will now start. */ +static void sur40_open(struct input_polled_dev *polldev) +{ + struct sur40_state *sur40 = polldev->private; + + dev_dbg(sur40->dev, "open\n"); + sur40_init(sur40); +} + +/* Disable device, polling has stopped. */ +static void sur40_close(struct input_polled_dev *polldev) +{ + struct sur40_state *sur40 = polldev->private; + + dev_dbg(sur40->dev, "close\n"); + /* + * There is no known way to stop the device, so we simply + * stop polling. + */ +} + +/* + * This function is called when a whole contact has been processed, + * so that it can assign it to a slot and store the data there. + */ +static void sur40_report_blob(struct sur40_blob *blob, struct input_dev *input) +{ + int wide, major, minor; + + int bb_size_x = le16_to_cpu(blob->bb_size_x); + int bb_size_y = le16_to_cpu(blob->bb_size_y); + + int pos_x = le16_to_cpu(blob->pos_x); + int pos_y = le16_to_cpu(blob->pos_y); + + int ctr_x = le16_to_cpu(blob->ctr_x); + int ctr_y = le16_to_cpu(blob->ctr_y); + + int slotnum = input_mt_get_slot_by_key(input, blob->blob_id); + if (slotnum < 0 || slotnum >= MAX_CONTACTS) + return; + + input_mt_slot(input, slotnum); + input_mt_report_slot_state(input, MT_TOOL_FINGER, 1); + wide = (bb_size_x > bb_size_y); + major = max(bb_size_x, bb_size_y); + minor = min(bb_size_x, bb_size_y); + + input_report_abs(input, ABS_MT_POSITION_X, pos_x); + input_report_abs(input, ABS_MT_POSITION_Y, pos_y); + input_report_abs(input, ABS_MT_TOOL_X, ctr_x); + input_report_abs(input, ABS_MT_TOOL_Y, ctr_y); + + /* TODO: use a better orientation measure */ + input_report_abs(input, ABS_MT_ORIENTATION, wide); + input_report_abs(input, ABS_MT_TOUCH_MAJOR, major); + input_report_abs(input, ABS_MT_TOUCH_MINOR, minor); +} + +/* core function: poll for new input data */ +static void sur40_poll(struct input_polled_dev *polldev) +{ + + struct sur40_state *sur40 = polldev->private; + struct input_dev *input = polldev->input; + int result, bulk_read, need_blobs, packet_blobs, i; + u32 uninitialized_var(packet_id); + + struct sur40_header *header = &sur40->bulk_in_buffer->header; + struct sur40_blob *inblob = &sur40->bulk_in_buffer->blobs[0]; + + dev_dbg(sur40->dev, "poll\n"); + + need_blobs = -1; + + do { + + /* perform a blocking bulk read to get data from the device */ + result = usb_bulk_msg(sur40->usbdev, + usb_rcvbulkpipe(sur40->usbdev, sur40->bulk_in_epaddr), + sur40->bulk_in_buffer, sur40->bulk_in_size, + &bulk_read, 1000); + + dev_dbg(sur40->dev, "received %d bytes\n", bulk_read); + + if (result < 0) { + dev_err(sur40->dev, "error in usb_bulk_read\n"); + return; + } + + result = bulk_read - sizeof(struct sur40_header); + + if (result % sizeof(struct sur40_blob) != 0) { + dev_err(sur40->dev, "transfer size mismatch\n"); + return; + } + + /* first packet? */ + if (need_blobs == -1) { + need_blobs = le16_to_cpu(header->count); + dev_dbg(sur40->dev, "need %d blobs\n", need_blobs); + packet_id = le32_to_cpu(header->packet_id); + } + + /* + * Sanity check. when video data is also being retrieved, the + * packet ID will usually increase in the middle of a series + * instead of at the end. + */ + if (packet_id != header->packet_id) + dev_warn(sur40->dev, "packet ID mismatch\n"); + + packet_blobs = result / sizeof(struct sur40_blob); + dev_dbg(sur40->dev, "received %d blobs\n", packet_blobs); + + /* packets always contain at least 4 blobs, even if empty */ + if (packet_blobs > need_blobs) + packet_blobs = need_blobs; + + for (i = 0; i < packet_blobs; i++) { + need_blobs--; + dev_dbg(sur40->dev, "processing blob\n"); + sur40_report_blob(&(inblob[i]), input); + } + + } while (need_blobs > 0); + + input_mt_sync_frame(input); + input_sync(input); +} + +/* Initialize input device parameters. */ +static void sur40_input_setup(struct input_dev *input_dev) +{ + __set_bit(EV_KEY, input_dev->evbit); + __set_bit(EV_ABS, input_dev->evbit); + + input_set_abs_params(input_dev, ABS_MT_POSITION_X, + 0, SENSOR_RES_X, 0, 0); + input_set_abs_params(input_dev, ABS_MT_POSITION_Y, + 0, SENSOR_RES_Y, 0, 0); + + input_set_abs_params(input_dev, ABS_MT_TOOL_X, + 0, SENSOR_RES_X, 0, 0); + input_set_abs_params(input_dev, ABS_MT_TOOL_Y, + 0, SENSOR_RES_Y, 0, 0); + + /* max value unknown, but major/minor axis + * can never be larger than screen */ + input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, + 0, SENSOR_RES_X, 0, 0); + input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR, + 0, SENSOR_RES_Y, 0, 0); + + input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 0, 1, 0, 0); + + input_mt_init_slots(input_dev, MAX_CONTACTS, + INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED); +} + +/* Check candidate USB interface. */ +static int sur40_probe(struct usb_interface *interface, + const struct usb_device_id *id) +{ + struct usb_device *usbdev = interface_to_usbdev(interface); + struct sur40_state *sur40; + struct usb_host_interface *iface_desc; + struct usb_endpoint_descriptor *endpoint; + struct input_polled_dev *poll_dev; + int error; + + /* Check if we really have the right interface. */ + iface_desc = &interface->altsetting[0]; + if (iface_desc->desc.bInterfaceClass != 0xFF) + return -ENODEV; + + /* Use endpoint #4 (0x86). */ + endpoint = &iface_desc->endpoint[4].desc; + if (endpoint->bEndpointAddress != TOUCH_ENDPOINT) + return -ENODEV; + + /* Allocate memory for our device state and initialize it. */ + sur40 = kzalloc(sizeof(struct sur40_state), GFP_KERNEL); + if (!sur40) + return -ENOMEM; + + poll_dev = input_allocate_polled_device(); + if (!poll_dev) { + error = -ENOMEM; + goto err_free_dev; + } + + /* Set up polled input device control structure */ + poll_dev->private = sur40; + poll_dev->poll_interval = POLL_INTERVAL; + poll_dev->open = sur40_open; + poll_dev->poll = sur40_poll; + poll_dev->close = sur40_close; + + /* Set up regular input device structure */ + sur40_input_setup(poll_dev->input); + + poll_dev->input->name = "Samsung SUR40"; + usb_to_input_id(usbdev, &poll_dev->input->id); + usb_make_path(usbdev, sur40->phys, sizeof(sur40->phys)); + strlcat(sur40->phys, "/input0", sizeof(sur40->phys)); + poll_dev->input->phys = sur40->phys; + poll_dev->input->dev.parent = &interface->dev; + + sur40->usbdev = usbdev; + sur40->dev = &interface->dev; + sur40->input = poll_dev; + + /* use the bulk-in endpoint tested above */ + sur40->bulk_in_size = usb_endpoint_maxp(endpoint); + sur40->bulk_in_epaddr = endpoint->bEndpointAddress; + sur40->bulk_in_buffer = kmalloc(sur40->bulk_in_size, GFP_KERNEL); + if (!sur40->bulk_in_buffer) { + dev_err(&interface->dev, "Unable to allocate input buffer."); + error = -ENOMEM; + goto err_free_polldev; + } + + error = input_register_polled_device(poll_dev); + if (error) { + dev_err(&interface->dev, + "Unable to register polled input device."); + goto err_free_buffer; + } + + /* we can register the device now, as it is ready */ + usb_set_intfdata(interface, sur40); + dev_dbg(&interface->dev, "%s is now attached\n", DRIVER_DESC); + + return 0; + +err_free_buffer: + kfree(sur40->bulk_in_buffer); +err_free_polldev: + input_free_polled_device(sur40->input); +err_free_dev: + kfree(sur40); + + return error; +} + +/* Unregister device & clean up. */ +static void sur40_disconnect(struct usb_interface *interface) +{ + struct sur40_state *sur40 = usb_get_intfdata(interface); + + input_unregister_polled_device(sur40->input); + input_free_polled_device(sur40->input); + kfree(sur40->bulk_in_buffer); + kfree(sur40); + + usb_set_intfdata(interface, NULL); + dev_dbg(&interface->dev, "%s is now disconnected\n", DRIVER_DESC); +} + +static const struct usb_device_id sur40_table[] = { + { USB_DEVICE(ID_MICROSOFT, ID_SUR40) }, /* Samsung SUR40 */ + { } /* terminating null entry */ +}; +MODULE_DEVICE_TABLE(usb, sur40_table); + +/* USB-specific object needed to register this driver with the USB subsystem. */ +static struct usb_driver sur40_driver = { + .name = DRIVER_SHORT, + .probe = sur40_probe, + .disconnect = sur40_disconnect, + .id_table = sur40_table, +}; + +module_usb_driver(sur40_driver); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c index ae4b6b903629..5f87bed05467 100644 --- a/drivers/input/touchscreen/usbtouchscreen.c +++ b/drivers/input/touchscreen/usbtouchscreen.c @@ -106,6 +106,7 @@ struct usbtouch_device_info { struct usbtouch_usb { unsigned char *data; dma_addr_t data_dma; + int data_size; unsigned char *buffer; int buf_len; struct urb *irq; @@ -1521,7 +1522,7 @@ static int usbtouch_reset_resume(struct usb_interface *intf) static void usbtouch_free_buffers(struct usb_device *udev, struct usbtouch_usb *usbtouch) { - usb_free_coherent(udev, usbtouch->type->rept_size, + usb_free_coherent(udev, usbtouch->data_size, usbtouch->data, usbtouch->data_dma); kfree(usbtouch->buffer); } @@ -1566,7 +1567,20 @@ static int usbtouch_probe(struct usb_interface *intf, if (!type->process_pkt) type->process_pkt = usbtouch_process_pkt; - usbtouch->data = usb_alloc_coherent(udev, type->rept_size, + usbtouch->data_size = type->rept_size; + if (type->get_pkt_len) { + /* + * When dealing with variable-length packets we should + * not request more than wMaxPacketSize bytes at once + * as we do not know if there is more data coming or + * we filled exactly wMaxPacketSize bytes and there is + * nothing else. + */ + usbtouch->data_size = min(usbtouch->data_size, + usb_endpoint_maxp(endpoint)); + } + + usbtouch->data = usb_alloc_coherent(udev, usbtouch->data_size, GFP_KERNEL, &usbtouch->data_dma); if (!usbtouch->data) goto out_free; @@ -1626,12 +1640,12 @@ static int usbtouch_probe(struct usb_interface *intf, if (usb_endpoint_type(endpoint) == USB_ENDPOINT_XFER_INT) usb_fill_int_urb(usbtouch->irq, udev, usb_rcvintpipe(udev, endpoint->bEndpointAddress), - usbtouch->data, type->rept_size, + usbtouch->data, usbtouch->data_size, usbtouch_irq, usbtouch, endpoint->bInterval); else usb_fill_bulk_urb(usbtouch->irq, udev, usb_rcvbulkpipe(udev, endpoint->bEndpointAddress), - usbtouch->data, type->rept_size, + usbtouch->data, usbtouch->data_size, usbtouch_irq, usbtouch); usbtouch->irq->dev = udev; diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 1abfb5684ab7..e46a88700b68 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -392,7 +392,7 @@ struct arm_smmu_domain { struct arm_smmu_cfg root_cfg; phys_addr_t output_mask; - spinlock_t lock; + struct mutex lock; }; static DEFINE_SPINLOCK(arm_smmu_devices_lock); @@ -900,7 +900,7 @@ static int arm_smmu_domain_init(struct iommu_domain *domain) goto out_free_domain; smmu_domain->root_cfg.pgd = pgd; - spin_lock_init(&smmu_domain->lock); + mutex_init(&smmu_domain->lock); domain->priv = smmu_domain; return 0; @@ -1137,7 +1137,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) * Sanity check the domain. We don't currently support domains * that cross between different SMMU chains. */ - spin_lock(&smmu_domain->lock); + mutex_lock(&smmu_domain->lock); if (!smmu_domain->leaf_smmu) { /* Now that we have a master, we can finalise the domain */ ret = arm_smmu_init_domain_context(domain, dev); @@ -1152,7 +1152,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) dev_name(device_smmu->dev)); goto err_unlock; } - spin_unlock(&smmu_domain->lock); + mutex_unlock(&smmu_domain->lock); /* Looks ok, so add the device to the domain */ master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node); @@ -1162,7 +1162,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) return arm_smmu_domain_add_master(smmu_domain, master); err_unlock: - spin_unlock(&smmu_domain->lock); + mutex_unlock(&smmu_domain->lock); return ret; } @@ -1394,7 +1394,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, if (paddr & ~output_mask) return -ERANGE; - spin_lock(&smmu_domain->lock); + mutex_lock(&smmu_domain->lock); pgd += pgd_index(iova); end = iova + size; do { @@ -1410,7 +1410,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, } while (pgd++, iova != end); out_unlock: - spin_unlock(&smmu_domain->lock); + mutex_unlock(&smmu_domain->lock); /* Ensure new page tables are visible to the hardware walker */ if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) @@ -1423,9 +1423,8 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int flags) { struct arm_smmu_domain *smmu_domain = domain->priv; - struct arm_smmu_device *smmu = smmu_domain->leaf_smmu; - if (!smmu_domain || !smmu) + if (!smmu_domain) return -ENODEV; /* Check for silent address truncation up the SMMU chain. */ @@ -1449,44 +1448,34 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) { - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; + pgd_t *pgdp, pgd; + pud_t pud; + pmd_t pmd; + pte_t pte; struct arm_smmu_domain *smmu_domain = domain->priv; struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; - struct arm_smmu_device *smmu = root_cfg->smmu; - spin_lock(&smmu_domain->lock); - pgd = root_cfg->pgd; - if (!pgd) - goto err_unlock; + pgdp = root_cfg->pgd; + if (!pgdp) + return 0; - pgd += pgd_index(iova); - if (pgd_none_or_clear_bad(pgd)) - goto err_unlock; + pgd = *(pgdp + pgd_index(iova)); + if (pgd_none(pgd)) + return 0; - pud = pud_offset(pgd, iova); - if (pud_none_or_clear_bad(pud)) - goto err_unlock; + pud = *pud_offset(&pgd, iova); + if (pud_none(pud)) + return 0; - pmd = pmd_offset(pud, iova); - if (pmd_none_or_clear_bad(pmd)) - goto err_unlock; + pmd = *pmd_offset(&pud, iova); + if (pmd_none(pmd)) + return 0; - pte = pmd_page_vaddr(*pmd) + pte_index(iova); + pte = *(pmd_page_vaddr(pmd) + pte_index(iova)); if (pte_none(pte)) - goto err_unlock; - - spin_unlock(&smmu_domain->lock); - return __pfn_to_phys(pte_pfn(*pte)) | (iova & ~PAGE_MASK); + return 0; -err_unlock: - spin_unlock(&smmu_domain->lock); - dev_warn(smmu->dev, - "invalid (corrupt?) page tables detected for iova 0x%llx\n", - (unsigned long long)iova); - return -EINVAL; + return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK); } static int arm_smmu_domain_has_cap(struct iommu_domain *domain, @@ -1863,6 +1852,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) dev_err(dev, "found only %d context interrupt(s) but %d required\n", smmu->num_context_irqs, smmu->num_context_banks); + err = -ENODEV; goto out_put_parent; } diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 9031171c141b..341c6016812d 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -957,12 +957,13 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, if (WARN_ON(!gic->domain)) return; + if (gic_nr == 0) { #ifdef CONFIG_SMP - set_smp_cross_call(gic_raise_softirq); - register_cpu_notifier(&gic_cpu_notifier); + set_smp_cross_call(gic_raise_softirq); + register_cpu_notifier(&gic_cpu_notifier); #endif - - set_handle_irq(gic_handle_irq); + set_handle_irq(gic_handle_irq); + } gic_chip.flags |= gic_arch_extn.flags; gic_dist_init(gic); diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c index baf2686aa8eb..02125e6a9109 100644 --- a/drivers/isdn/isdnloop/isdnloop.c +++ b/drivers/isdn/isdnloop/isdnloop.c @@ -1083,8 +1083,10 @@ isdnloop_start(isdnloop_card *card, isdnloop_sdef *sdefp) spin_unlock_irqrestore(&card->isdnloop_lock, flags); return -ENOMEM; } - for (i = 0; i < 3; i++) - strcpy(card->s0num[i], sdef.num[i]); + for (i = 0; i < 3; i++) { + strlcpy(card->s0num[i], sdef.num[i], + sizeof(card->s0num[0])); + } break; case ISDN_PTYPE_1TR6: if (isdnloop_fake(card, "DRV1.04TC-1TR6-CAPI-CNS-BASIS-29.11.95", @@ -1097,7 +1099,7 @@ isdnloop_start(isdnloop_card *card, isdnloop_sdef *sdefp) spin_unlock_irqrestore(&card->isdnloop_lock, flags); return -ENOMEM; } - strcpy(card->s0num[0], sdef.num[0]); + strlcpy(card->s0num[0], sdef.num[0], sizeof(card->s0num[0])); card->s0num[1][0] = '\0'; card->s0num[2][0] = '\0'; break; diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c index e47dcb9d1e91..5cefb479c707 100644 --- a/drivers/isdn/mISDN/socket.c +++ b/drivers/isdn/mISDN/socket.c @@ -117,7 +117,6 @@ mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock, { struct sk_buff *skb; struct sock *sk = sock->sk; - struct sockaddr_mISDN *maddr; int copied, err; @@ -135,9 +134,9 @@ mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock, if (!skb) return err; - if (msg->msg_namelen >= sizeof(struct sockaddr_mISDN)) { - msg->msg_namelen = sizeof(struct sockaddr_mISDN); - maddr = (struct sockaddr_mISDN *)msg->msg_name; + if (msg->msg_name) { + struct sockaddr_mISDN *maddr = msg->msg_name; + maddr->family = AF_ISDN; maddr->dev = _pms(sk)->dev->id; if ((sk->sk_protocol == ISDN_P_LAPD_TE) || @@ -150,11 +149,7 @@ mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock, maddr->sapi = _pms(sk)->ch.addr & 0xFF; maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xFF; } - } else { - if (msg->msg_namelen) - printk(KERN_WARNING "%s: too small namelen %d\n", - __func__, msg->msg_namelen); - msg->msg_namelen = 0; + msg->msg_namelen = sizeof(*maddr); } copied = skb->len + MISDN_HEADER_LEN; diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c index 2848171b8576..b31d8e99c419 100644 --- a/drivers/leds/leds-pwm.c +++ b/drivers/leds/leds-pwm.c @@ -82,22 +82,12 @@ static inline size_t sizeof_pwm_leds_priv(int num_leds) (sizeof(struct led_pwm_data) * num_leds); } -static struct led_pwm_priv *led_pwm_create_of(struct platform_device *pdev) +static int led_pwm_create_of(struct platform_device *pdev, + struct led_pwm_priv *priv) { struct device_node *node = pdev->dev.of_node; struct device_node *child; - struct led_pwm_priv *priv; - int count, ret; - - /* count LEDs in this device, so we know how much to allocate */ - count = of_get_child_count(node); - if (!count) - return NULL; - - priv = devm_kzalloc(&pdev->dev, sizeof_pwm_leds_priv(count), - GFP_KERNEL); - if (!priv) - return NULL; + int ret; for_each_child_of_node(node, child) { struct led_pwm_data *led_dat = &priv->leds[priv->num_leds]; @@ -109,6 +99,7 @@ static struct led_pwm_priv *led_pwm_create_of(struct platform_device *pdev) if (IS_ERR(led_dat->pwm)) { dev_err(&pdev->dev, "unable to request PWM for %s\n", led_dat->cdev.name); + ret = PTR_ERR(led_dat->pwm); goto err; } /* Get the period from PWM core when n*/ @@ -137,28 +128,36 @@ static struct led_pwm_priv *led_pwm_create_of(struct platform_device *pdev) priv->num_leds++; } - return priv; + return 0; err: while (priv->num_leds--) led_classdev_unregister(&priv->leds[priv->num_leds].cdev); - return NULL; + return ret; } static int led_pwm_probe(struct platform_device *pdev) { struct led_pwm_platform_data *pdata = dev_get_platdata(&pdev->dev); struct led_pwm_priv *priv; - int i, ret = 0; + int count, i; + int ret = 0; + + if (pdata) + count = pdata->num_leds; + else + count = of_get_child_count(pdev->dev.of_node); + + if (!count) + return -EINVAL; - if (pdata && pdata->num_leds) { - priv = devm_kzalloc(&pdev->dev, - sizeof_pwm_leds_priv(pdata->num_leds), - GFP_KERNEL); - if (!priv) - return -ENOMEM; + priv = devm_kzalloc(&pdev->dev, sizeof_pwm_leds_priv(count), + GFP_KERNEL); + if (!priv) + return -ENOMEM; - for (i = 0; i < pdata->num_leds; i++) { + if (pdata) { + for (i = 0; i < count; i++) { struct led_pwm *cur_led = &pdata->leds[i]; struct led_pwm_data *led_dat = &priv->leds[i]; @@ -188,11 +187,11 @@ static int led_pwm_probe(struct platform_device *pdev) if (ret < 0) goto err; } - priv->num_leds = pdata->num_leds; + priv->num_leds = count; } else { - priv = led_pwm_create_of(pdev); - if (!priv) - return -ENODEV; + ret = led_pwm_create_of(pdev, priv); + if (ret) + return ret; } platform_set_drvdata(pdev, priv); diff --git a/drivers/macintosh/Makefile b/drivers/macintosh/Makefile index 6753b65f8ede..d2f0120bc878 100644 --- a/drivers/macintosh/Makefile +++ b/drivers/macintosh/Makefile @@ -40,6 +40,7 @@ obj-$(CONFIG_WINDFARM_RM31) += windfarm_fcu_controls.o \ windfarm_ad7417_sensor.o \ windfarm_lm75_sensor.o \ windfarm_lm87_sensor.o \ + windfarm_max6690_sensor.o \ windfarm_pid.o \ windfarm_cpufreq_clamp.o \ windfarm_rm31.o diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 173cbb20d104..54bdd923316f 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -1717,6 +1717,11 @@ static int __init dm_bufio_init(void) { __u64 mem; + dm_bufio_allocated_kmem_cache = 0; + dm_bufio_allocated_get_free_pages = 0; + dm_bufio_allocated_vmalloc = 0; + dm_bufio_current_allocated = 0; + memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches); memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names); diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c index 416b7b752a6e..64780ad73bb0 100644 --- a/drivers/md/dm-cache-policy-mq.c +++ b/drivers/md/dm-cache-policy-mq.c @@ -730,15 +730,18 @@ static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e, int r = 0; bool updated = updated_this_tick(mq, e); - requeue_and_update_tick(mq, e); - if ((!discarded_oblock && updated) || - !should_promote(mq, e, discarded_oblock, data_dir)) + !should_promote(mq, e, discarded_oblock, data_dir)) { + requeue_and_update_tick(mq, e); result->op = POLICY_MISS; - else if (!can_migrate) + + } else if (!can_migrate) r = -EWOULDBLOCK; - else + + else { + requeue_and_update_tick(mq, e); r = pre_cache_to_cache(mq, e, result); + } return r; } diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 9efcf1059b99..1b1469ebe5cb 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -2755,7 +2755,7 @@ static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size) { int r; - r = dm_cache_resize(cache->cmd, cache->cache_size); + r = dm_cache_resize(cache->cmd, new_size); if (r) { DMERR("could not resize cache metadata"); return r; diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index 496d5f3646a5..2f91d6d4a2cc 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c @@ -20,6 +20,7 @@ struct delay_c { struct timer_list delay_timer; struct mutex timer_lock; + struct workqueue_struct *kdelayd_wq; struct work_struct flush_expired_bios; struct list_head delayed_bios; atomic_t may_delay; @@ -45,14 +46,13 @@ struct dm_delay_info { static DEFINE_MUTEX(delayed_bios_lock); -static struct workqueue_struct *kdelayd_wq; static struct kmem_cache *delayed_cache; static void handle_delayed_timer(unsigned long data) { struct delay_c *dc = (struct delay_c *)data; - queue_work(kdelayd_wq, &dc->flush_expired_bios); + queue_work(dc->kdelayd_wq, &dc->flush_expired_bios); } static void queue_timeout(struct delay_c *dc, unsigned long expires) @@ -191,6 +191,12 @@ out: goto bad_dev_write; } + dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0); + if (!dc->kdelayd_wq) { + DMERR("Couldn't start kdelayd"); + goto bad_queue; + } + setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc); INIT_WORK(&dc->flush_expired_bios, flush_expired_bios); @@ -203,6 +209,8 @@ out: ti->private = dc; return 0; +bad_queue: + mempool_destroy(dc->delayed_pool); bad_dev_write: if (dc->dev_write) dm_put_device(ti, dc->dev_write); @@ -217,7 +225,7 @@ static void delay_dtr(struct dm_target *ti) { struct delay_c *dc = ti->private; - flush_workqueue(kdelayd_wq); + destroy_workqueue(dc->kdelayd_wq); dm_put_device(ti, dc->dev_read); @@ -350,12 +358,6 @@ static int __init dm_delay_init(void) { int r = -ENOMEM; - kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0); - if (!kdelayd_wq) { - DMERR("Couldn't start kdelayd"); - goto bad_queue; - } - delayed_cache = KMEM_CACHE(dm_delay_info, 0); if (!delayed_cache) { DMERR("Couldn't create delayed bio cache."); @@ -373,8 +375,6 @@ static int __init dm_delay_init(void) bad_register: kmem_cache_destroy(delayed_cache); bad_memcache: - destroy_workqueue(kdelayd_wq); -bad_queue: return r; } @@ -382,7 +382,6 @@ static void __exit dm_delay_exit(void) { dm_unregister_target(&delay_target); kmem_cache_destroy(delayed_cache); - destroy_workqueue(kdelayd_wq); } /* Module hooks */ diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index aec57d76db5d..944690bafd93 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -66,6 +66,18 @@ struct dm_snapshot { atomic_t pending_exceptions_count; + /* Protected by "lock" */ + sector_t exception_start_sequence; + + /* Protected by kcopyd single-threaded callback */ + sector_t exception_complete_sequence; + + /* + * A list of pending exceptions that completed out of order. + * Protected by kcopyd single-threaded callback. + */ + struct list_head out_of_order_list; + mempool_t *pending_pool; struct dm_exception_table pending; @@ -173,6 +185,14 @@ struct dm_snap_pending_exception { */ int started; + /* There was copying error. */ + int copy_error; + + /* A sequence number, it is used for in-order completion. */ + sector_t exception_sequence; + + struct list_head out_of_order_entry; + /* * For writing a complete chunk, bypassing the copy. */ @@ -1094,6 +1114,9 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) s->valid = 1; s->active = 0; atomic_set(&s->pending_exceptions_count, 0); + s->exception_start_sequence = 0; + s->exception_complete_sequence = 0; + INIT_LIST_HEAD(&s->out_of_order_list); init_rwsem(&s->lock); INIT_LIST_HEAD(&s->list); spin_lock_init(&s->pe_lock); @@ -1443,6 +1466,19 @@ static void commit_callback(void *context, int success) pending_complete(pe, success); } +static void complete_exception(struct dm_snap_pending_exception *pe) +{ + struct dm_snapshot *s = pe->snap; + + if (unlikely(pe->copy_error)) + pending_complete(pe, 0); + + else + /* Update the metadata if we are persistent */ + s->store->type->commit_exception(s->store, &pe->e, + commit_callback, pe); +} + /* * Called when the copy I/O has finished. kcopyd actually runs * this code so don't block. @@ -1452,13 +1488,32 @@ static void copy_callback(int read_err, unsigned long write_err, void *context) struct dm_snap_pending_exception *pe = context; struct dm_snapshot *s = pe->snap; - if (read_err || write_err) - pending_complete(pe, 0); + pe->copy_error = read_err || write_err; - else - /* Update the metadata if we are persistent */ - s->store->type->commit_exception(s->store, &pe->e, - commit_callback, pe); + if (pe->exception_sequence == s->exception_complete_sequence) { + s->exception_complete_sequence++; + complete_exception(pe); + + while (!list_empty(&s->out_of_order_list)) { + pe = list_entry(s->out_of_order_list.next, + struct dm_snap_pending_exception, out_of_order_entry); + if (pe->exception_sequence != s->exception_complete_sequence) + break; + s->exception_complete_sequence++; + list_del(&pe->out_of_order_entry); + complete_exception(pe); + } + } else { + struct list_head *lh; + struct dm_snap_pending_exception *pe2; + + list_for_each_prev(lh, &s->out_of_order_list) { + pe2 = list_entry(lh, struct dm_snap_pending_exception, out_of_order_entry); + if (pe2->exception_sequence < pe->exception_sequence) + break; + } + list_add(&pe->out_of_order_entry, lh); + } } /* @@ -1553,6 +1608,8 @@ __find_pending_exception(struct dm_snapshot *s, return NULL; } + pe->exception_sequence = s->exception_start_sequence++; + dm_insert_exception(&s->pending, &pe->e); return pe; @@ -2192,7 +2249,7 @@ static struct target_type origin_target = { static struct target_type snapshot_target = { .name = "snapshot", - .version = {1, 11, 1}, + .version = {1, 12, 0}, .module = THIS_MODULE, .ctr = snapshot_ctr, .dtr = snapshot_dtr, diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c index 3d404c1371ed..28a90122a5a8 100644 --- a/drivers/md/dm-stats.c +++ b/drivers/md/dm-stats.c @@ -964,6 +964,7 @@ int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv, int __init dm_statistics_init(void) { + shared_memory_amount = 0; dm_stat_need_rcu_barrier = 0; return 0; } diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 465f08ca62b1..3ba6a3859ce3 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -200,6 +200,11 @@ int dm_table_create(struct dm_table **result, fmode_t mode, num_targets = dm_round_up(num_targets, KEYS_PER_NODE); + if (!num_targets) { + kfree(t); + return -ENOMEM; + } + if (alloc_targets(t, num_targets)) { kfree(t); return -ENOMEM; diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 60bce435f4fa..8a30ad54bd46 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -1697,6 +1697,14 @@ void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd) up_write(&pmd->root_lock); } +void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd) +{ + down_write(&pmd->root_lock); + pmd->read_only = false; + dm_bm_set_read_write(pmd->bm); + up_write(&pmd->root_lock); +} + int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, dm_block_t threshold, dm_sm_threshold_fn fn, diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h index 845ebbe589a9..7bcc0e1d6238 100644 --- a/drivers/md/dm-thin-metadata.h +++ b/drivers/md/dm-thin-metadata.h @@ -193,6 +193,7 @@ int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_siz * that nothing is changing. */ void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd); +void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd); int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, dm_block_t threshold, diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 2c0cf511ec23..ee29037ffc2e 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -640,7 +640,9 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m) */ r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block); if (r) { - DMERR_LIMIT("dm_thin_insert_block() failed"); + DMERR_LIMIT("%s: dm_thin_insert_block() failed: error = %d", + dm_device_name(pool->pool_md), r); + set_pool_mode(pool, PM_READ_ONLY); cell_error(pool, m->cell); goto out; } @@ -881,32 +883,23 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, } } -static int commit(struct pool *pool) -{ - int r; - - r = dm_pool_commit_metadata(pool->pmd); - if (r) - DMERR_LIMIT("%s: commit failed: error = %d", - dm_device_name(pool->pool_md), r); - - return r; -} - /* * A non-zero return indicates read_only or fail_io mode. * Many callers don't care about the return value. */ -static int commit_or_fallback(struct pool *pool) +static int commit(struct pool *pool) { int r; if (get_pool_mode(pool) != PM_WRITE) return -EINVAL; - r = commit(pool); - if (r) + r = dm_pool_commit_metadata(pool->pmd); + if (r) { + DMERR_LIMIT("%s: dm_pool_commit_metadata failed: error = %d", + dm_device_name(pool->pool_md), r); set_pool_mode(pool, PM_READ_ONLY); + } return r; } @@ -943,7 +936,9 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result) * Try to commit to see if that will free up some * more space. */ - (void) commit_or_fallback(pool); + r = commit(pool); + if (r) + return r; r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); if (r) @@ -957,7 +952,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result) * table reload). */ if (!free_blocks) { - DMWARN("%s: no free space available.", + DMWARN("%s: no free data space available.", dm_device_name(pool->pool_md)); spin_lock_irqsave(&pool->lock, flags); pool->no_free_space = 1; @@ -967,8 +962,16 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result) } r = dm_pool_alloc_data_block(pool->pmd, result); - if (r) + if (r) { + if (r == -ENOSPC && + !dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks) && + !free_blocks) { + DMWARN("%s: no free metadata space available.", + dm_device_name(pool->pool_md)); + set_pool_mode(pool, PM_READ_ONLY); + } return r; + } return 0; } @@ -1349,7 +1352,7 @@ static void process_deferred_bios(struct pool *pool) if (bio_list_empty(&bios) && !need_commit_due_to_time(pool)) return; - if (commit_or_fallback(pool)) { + if (commit(pool)) { while ((bio = bio_list_pop(&bios))) bio_io_error(bio); return; @@ -1397,6 +1400,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode) case PM_FAIL: DMERR("%s: switching pool to failure mode", dm_device_name(pool->pool_md)); + dm_pool_metadata_read_only(pool->pmd); pool->process_bio = process_bio_fail; pool->process_discard = process_bio_fail; pool->process_prepared_mapping = process_prepared_mapping_fail; @@ -1421,6 +1425,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode) break; case PM_WRITE: + dm_pool_metadata_read_write(pool->pmd); pool->process_bio = process_bio; pool->process_discard = process_discard; pool->process_prepared_mapping = process_prepared_mapping; @@ -1637,12 +1642,19 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti) struct pool_c *pt = ti->private; /* - * We want to make sure that degraded pools are never upgraded. + * We want to make sure that a pool in PM_FAIL mode is never upgraded. */ enum pool_mode old_mode = pool->pf.mode; enum pool_mode new_mode = pt->adjusted_pf.mode; - if (old_mode > new_mode) + /* + * If we were in PM_FAIL mode, rollback of metadata failed. We're + * not going to recover without a thin_repair. So we never let the + * pool move out of the old mode. On the other hand a PM_READ_ONLY + * may have been due to a lack of metadata or data space, and may + * now work (ie. if the underlying devices have been resized). + */ + if (old_mode == PM_FAIL) new_mode = old_mode; pool->ti = ti; @@ -2266,7 +2278,7 @@ static int pool_preresume(struct dm_target *ti) return r; if (need_commit1 || need_commit2) - (void) commit_or_fallback(pool); + (void) commit(pool); return 0; } @@ -2293,7 +2305,7 @@ static void pool_postsuspend(struct dm_target *ti) cancel_delayed_work(&pool->waker); flush_workqueue(pool->wq); - (void) commit_or_fallback(pool); + (void) commit(pool); } static int check_arg_count(unsigned argc, unsigned args_required) @@ -2427,7 +2439,7 @@ static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct if (r) return r; - (void) commit_or_fallback(pool); + (void) commit(pool); r = dm_pool_reserve_metadata_snap(pool->pmd); if (r) @@ -2489,7 +2501,7 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv) DMWARN("Unrecognised thin pool target message received: %s", argv[0]); if (!r) - (void) commit_or_fallback(pool); + (void) commit(pool); return r; } @@ -2544,7 +2556,7 @@ static void pool_status(struct dm_target *ti, status_type_t type, /* Commit to ensure statistics aren't out-of-date */ if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) - (void) commit_or_fallback(pool); + (void) commit(pool); r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id); if (r) { diff --git a/drivers/md/md.c b/drivers/md/md.c index 8766eabb0014..21f4d7ff0da2 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -112,7 +112,7 @@ static inline int speed_max(struct mddev *mddev) static struct ctl_table_header *raid_table_header; -static ctl_table raid_table[] = { +static struct ctl_table raid_table[] = { { .procname = "speed_limit_min", .data = &sysctl_speed_limit_min, @@ -130,7 +130,7 @@ static ctl_table raid_table[] = { { } }; -static ctl_table raid_dir_table[] = { +static struct ctl_table raid_dir_table[] = { { .procname = "raid", .maxlen = 0, @@ -140,7 +140,7 @@ static ctl_table raid_dir_table[] = { { } }; -static ctl_table raid_root_table[] = { +static struct ctl_table raid_root_table[] = { { .procname = "dev", .maxlen = 0, @@ -562,11 +562,19 @@ static struct mddev * mddev_find(dev_t unit) goto retry; } -static inline int mddev_lock(struct mddev * mddev) +static inline int __must_check mddev_lock(struct mddev * mddev) { return mutex_lock_interruptible(&mddev->reconfig_mutex); } +/* Sometimes we need to take the lock in a situation where + * failure due to interrupts is not acceptable. + */ +static inline void mddev_lock_nointr(struct mddev * mddev) +{ + mutex_lock(&mddev->reconfig_mutex); +} + static inline int mddev_is_locked(struct mddev *mddev) { return mutex_is_locked(&mddev->reconfig_mutex); @@ -768,16 +776,10 @@ void md_super_wait(struct mddev *mddev) finish_wait(&mddev->sb_wait, &wq); } -static void bi_complete(struct bio *bio, int error) -{ - complete((struct completion*)bio->bi_private); -} - int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, struct page *page, int rw, bool metadata_op) { struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev); - struct completion event; int ret; rw |= REQ_SYNC; @@ -793,11 +795,7 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, else bio->bi_sector = sector + rdev->data_offset; bio_add_page(bio, page, size, 0); - init_completion(&event); - bio->bi_private = &event; - bio->bi_end_io = bi_complete; - submit_bio(rw, bio); - wait_for_completion(&event); + submit_bio_wait(rw, bio); ret = test_bit(BIO_UPTODATE, &bio->bi_flags); bio_put(bio); @@ -2978,7 +2976,7 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) for_each_mddev(mddev, tmp) { struct md_rdev *rdev2; - mddev_lock(mddev); + mddev_lock_nointr(mddev); rdev_for_each(rdev2, mddev) if (rdev->bdev == rdev2->bdev && rdev != rdev2 && @@ -2994,7 +2992,7 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) break; } } - mddev_lock(my_mddev); + mddev_lock_nointr(my_mddev); if (overlap) { /* Someone else could have slipped in a size * change here, but doing so is just silly. @@ -3580,6 +3578,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len) mddev->in_sync = 1; del_timer_sync(&mddev->safemode_timer); } + blk_set_stacking_limits(&mddev->queue->limits); pers->run(mddev); set_bit(MD_CHANGE_DEVS, &mddev->flags); mddev_resume(mddev); @@ -5258,7 +5257,7 @@ static void __md_stop_writes(struct mddev *mddev) void md_stop_writes(struct mddev *mddev) { - mddev_lock(mddev); + mddev_lock_nointr(mddev); __md_stop_writes(mddev); mddev_unlock(mddev); } @@ -5291,20 +5290,35 @@ EXPORT_SYMBOL_GPL(md_stop); static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) { int err = 0; + int did_freeze = 0; + + if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { + did_freeze = 1; + set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + md_wakeup_thread(mddev->thread); + } + if (mddev->sync_thread) { + set_bit(MD_RECOVERY_INTR, &mddev->recovery); + /* Thread might be blocked waiting for metadata update + * which will now never happen */ + wake_up_process(mddev->sync_thread->tsk); + } + mddev_unlock(mddev); + wait_event(resync_wait, mddev->sync_thread == NULL); + mddev_lock_nointr(mddev); + mutex_lock(&mddev->open_mutex); - if (atomic_read(&mddev->openers) > !!bdev) { + if (atomic_read(&mddev->openers) > !!bdev || + mddev->sync_thread || + (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) { printk("md: %s still in use.\n",mdname(mddev)); + if (did_freeze) { + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + md_wakeup_thread(mddev->thread); + } err = -EBUSY; goto out; } - if (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags)) { - /* Someone opened the device since we flushed it - * so page cache could be dirty and it is too late - * to flush. So abort - */ - mutex_unlock(&mddev->open_mutex); - return -EBUSY; - } if (mddev->pers) { __md_stop_writes(mddev); @@ -5315,7 +5329,7 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) set_disk_ro(mddev->gendisk, 1); clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); sysfs_notify_dirent_safe(mddev->sysfs_state); - err = 0; + err = 0; } out: mutex_unlock(&mddev->open_mutex); @@ -5331,20 +5345,34 @@ static int do_md_stop(struct mddev * mddev, int mode, { struct gendisk *disk = mddev->gendisk; struct md_rdev *rdev; + int did_freeze = 0; + + if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { + did_freeze = 1; + set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + md_wakeup_thread(mddev->thread); + } + if (mddev->sync_thread) { + set_bit(MD_RECOVERY_INTR, &mddev->recovery); + /* Thread might be blocked waiting for metadata update + * which will now never happen */ + wake_up_process(mddev->sync_thread->tsk); + } + mddev_unlock(mddev); + wait_event(resync_wait, mddev->sync_thread == NULL); + mddev_lock_nointr(mddev); mutex_lock(&mddev->open_mutex); if (atomic_read(&mddev->openers) > !!bdev || - mddev->sysfs_active) { + mddev->sysfs_active || + mddev->sync_thread || + (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) { printk("md: %s still in use.\n",mdname(mddev)); mutex_unlock(&mddev->open_mutex); - return -EBUSY; - } - if (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags)) { - /* Someone opened the device since we flushed it - * so page cache could be dirty and it is too late - * to flush. So abort - */ - mutex_unlock(&mddev->open_mutex); + if (did_freeze) { + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + md_wakeup_thread(mddev->thread); + } return -EBUSY; } if (mddev->pers) { @@ -6551,7 +6579,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, wait_event(mddev->sb_wait, !test_bit(MD_CHANGE_DEVS, &mddev->flags) && !test_bit(MD_CHANGE_PENDING, &mddev->flags)); - mddev_lock(mddev); + mddev_lock_nointr(mddev); } } else { err = -EROFS; @@ -7361,9 +7389,6 @@ void md_do_sync(struct md_thread *thread) mddev->curr_resync = 2; try_again: - if (kthread_should_stop()) - set_bit(MD_RECOVERY_INTR, &mddev->recovery); - if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) goto skip; for_each_mddev(mddev2, tmp) { @@ -7388,7 +7413,7 @@ void md_do_sync(struct md_thread *thread) * be caught by 'softlockup' */ prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); - if (!kthread_should_stop() && + if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && mddev2->curr_resync >= mddev->curr_resync) { printk(KERN_INFO "md: delaying %s of %s" " until %s has finished (they" @@ -7464,7 +7489,7 @@ void md_do_sync(struct md_thread *thread) last_check = 0; if (j>2) { - printk(KERN_INFO + printk(KERN_INFO "md: resuming %s of %s from checkpoint.\n", desc, mdname(mddev)); mddev->curr_resync = j; @@ -7501,7 +7526,8 @@ void md_do_sync(struct md_thread *thread) sysfs_notify(&mddev->kobj, NULL, "sync_completed"); } - while (j >= mddev->resync_max && !kthread_should_stop()) { + while (j >= mddev->resync_max && + !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { /* As this condition is controlled by user-space, * we can block indefinitely, so use '_interruptible' * to avoid triggering warnings. @@ -7509,17 +7535,18 @@ void md_do_sync(struct md_thread *thread) flush_signals(current); /* just in case */ wait_event_interruptible(mddev->recovery_wait, mddev->resync_max > j - || kthread_should_stop()); + || test_bit(MD_RECOVERY_INTR, + &mddev->recovery)); } - if (kthread_should_stop()) - goto interrupted; + if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) + break; sectors = mddev->pers->sync_request(mddev, j, &skipped, currspeed < speed_min(mddev)); if (sectors == 0) { set_bit(MD_RECOVERY_INTR, &mddev->recovery); - goto out; + break; } if (!skipped) { /* actual IO requested */ @@ -7556,10 +7583,8 @@ void md_do_sync(struct md_thread *thread) last_mark = next; } - - if (kthread_should_stop()) - goto interrupted; - + if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) + break; /* * this loop exits only if either when we are slower than @@ -7582,11 +7607,12 @@ void md_do_sync(struct md_thread *thread) } } } - printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc); + printk(KERN_INFO "md: %s: %s %s.\n",mdname(mddev), desc, + test_bit(MD_RECOVERY_INTR, &mddev->recovery) + ? "interrupted" : "done"); /* * this also signals 'finished resyncing' to md_stop */ - out: blk_finish_plug(&plug); wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); @@ -7640,16 +7666,6 @@ void md_do_sync(struct md_thread *thread) set_bit(MD_RECOVERY_DONE, &mddev->recovery); md_wakeup_thread(mddev->thread); return; - - interrupted: - /* - * got a signal, exit. - */ - printk(KERN_INFO - "md: md_do_sync() got signal ... exiting\n"); - set_bit(MD_RECOVERY_INTR, &mddev->recovery); - goto out; - } EXPORT_SYMBOL_GPL(md_do_sync); @@ -7751,7 +7767,7 @@ void md_check_recovery(struct mddev *mddev) if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) return; if ( ! ( - (mddev->flags & ~ (1<<MD_CHANGE_PENDING)) || + (mddev->flags & MD_UPDATE_SB_FLAGS & ~ (1<<MD_CHANGE_PENDING)) || test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || test_bit(MD_RECOVERY_DONE, &mddev->recovery) || (mddev->external == 0 && mddev->safemode == 1) || @@ -7894,6 +7910,7 @@ void md_reap_sync_thread(struct mddev *mddev) /* resync has finished, collect result */ md_unregister_thread(&mddev->sync_thread); + wake_up(&resync_wait); if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { /* success...*/ diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c index af96e24ec328..1d75b1dc1e2e 100644 --- a/drivers/md/persistent-data/dm-array.c +++ b/drivers/md/persistent-data/dm-array.c @@ -317,8 +317,16 @@ static int shadow_ablock(struct dm_array_info *info, dm_block_t *root, * The shadow op will often be a noop. Only insert if it really * copied data. */ - if (dm_block_location(*block) != b) + if (dm_block_location(*block) != b) { + /* + * dm_tm_shadow_block will have already decremented the old + * block, but it is still referenced by the btree. We + * increment to stop the insert decrementing it below zero + * when overwriting the old value. + */ + dm_tm_inc(info->btree_info.tm, b); r = insert_ablock(info, index, *block, root); + } return r; } diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c index a7e8bf296388..064a3c271baa 100644 --- a/drivers/md/persistent-data/dm-block-manager.c +++ b/drivers/md/persistent-data/dm-block-manager.c @@ -626,6 +626,12 @@ void dm_bm_set_read_only(struct dm_block_manager *bm) } EXPORT_SYMBOL_GPL(dm_bm_set_read_only); +void dm_bm_set_read_write(struct dm_block_manager *bm) +{ + bm->read_only = false; +} +EXPORT_SYMBOL_GPL(dm_bm_set_read_write); + u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor) { return crc32c(~(u32) 0, data, len) ^ init_xor; diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h index 9a82083a66b6..13cd58e1fe69 100644 --- a/drivers/md/persistent-data/dm-block-manager.h +++ b/drivers/md/persistent-data/dm-block-manager.h @@ -108,9 +108,9 @@ int dm_bm_unlock(struct dm_block *b); int dm_bm_flush_and_unlock(struct dm_block_manager *bm, struct dm_block *superblock); - /* - * Request data be prefetched into the cache. - */ +/* + * Request data is prefetched into the cache. + */ void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b); /* @@ -125,6 +125,7 @@ void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b); * be returned if you do. */ void dm_bm_set_read_only(struct dm_block_manager *bm); +void dm_bm_set_read_write(struct dm_block_manager *bm); u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor); diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c index 6058569fe86c..466a60bbd716 100644 --- a/drivers/md/persistent-data/dm-space-map-common.c +++ b/drivers/md/persistent-data/dm-space-map-common.c @@ -381,7 +381,7 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin, } static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b, - uint32_t (*mutator)(void *context, uint32_t old), + int (*mutator)(void *context, uint32_t old, uint32_t *new), void *context, enum allocation_event *ev) { int r; @@ -410,11 +410,17 @@ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b, if (old > 2) { r = sm_ll_lookup_big_ref_count(ll, b, &old); - if (r < 0) + if (r < 0) { + dm_tm_unlock(ll->tm, nb); return r; + } } - ref_count = mutator(context, old); + r = mutator(context, old, &ref_count); + if (r) { + dm_tm_unlock(ll->tm, nb); + return r; + } if (ref_count <= 2) { sm_set_bitmap(bm_le, bit, ref_count); @@ -465,9 +471,10 @@ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b, return ll->save_ie(ll, index, &ie_disk); } -static uint32_t set_ref_count(void *context, uint32_t old) +static int set_ref_count(void *context, uint32_t old, uint32_t *new) { - return *((uint32_t *) context); + *new = *((uint32_t *) context); + return 0; } int sm_ll_insert(struct ll_disk *ll, dm_block_t b, @@ -476,9 +483,10 @@ int sm_ll_insert(struct ll_disk *ll, dm_block_t b, return sm_ll_mutate(ll, b, set_ref_count, &ref_count, ev); } -static uint32_t inc_ref_count(void *context, uint32_t old) +static int inc_ref_count(void *context, uint32_t old, uint32_t *new) { - return old + 1; + *new = old + 1; + return 0; } int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev) @@ -486,9 +494,15 @@ int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev) return sm_ll_mutate(ll, b, inc_ref_count, NULL, ev); } -static uint32_t dec_ref_count(void *context, uint32_t old) +static int dec_ref_count(void *context, uint32_t old, uint32_t *new) { - return old - 1; + if (!old) { + DMERR_LIMIT("unable to decrement a reference count below 0"); + return -EINVAL; + } + + *new = old - 1; + return 0; } int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev) diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c index 1c959684caef..58fc1eef7499 100644 --- a/drivers/md/persistent-data/dm-space-map-metadata.c +++ b/drivers/md/persistent-data/dm-space-map-metadata.c @@ -384,12 +384,16 @@ static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b) struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); int r = sm_metadata_new_block_(sm, b); - if (r) + if (r) { DMERR("unable to allocate new metadata block"); + return r; + } r = sm_metadata_get_nr_free(sm, &count); - if (r) + if (r) { DMERR("couldn't get free block count"); + return r; + } check_threshold(&smm->threshold, count); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index af6681b19776..1e5a540995e9 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -66,7 +66,8 @@ */ static int max_queued_requests = 1024; -static void allow_barrier(struct r1conf *conf); +static void allow_barrier(struct r1conf *conf, sector_t start_next_window, + sector_t bi_sector); static void lower_barrier(struct r1conf *conf); static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) @@ -84,10 +85,12 @@ static void r1bio_pool_free(void *r1_bio, void *data) } #define RESYNC_BLOCK_SIZE (64*1024) -//#define RESYNC_BLOCK_SIZE PAGE_SIZE +#define RESYNC_DEPTH 32 #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9) #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) -#define RESYNC_WINDOW (2048*1024) +#define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH) +#define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9) +#define NEXT_NORMALIO_DISTANCE (3 * RESYNC_WINDOW_SECTORS) static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) { @@ -225,6 +228,8 @@ static void call_bio_endio(struct r1bio *r1_bio) struct bio *bio = r1_bio->master_bio; int done; struct r1conf *conf = r1_bio->mddev->private; + sector_t start_next_window = r1_bio->start_next_window; + sector_t bi_sector = bio->bi_sector; if (bio->bi_phys_segments) { unsigned long flags; @@ -232,6 +237,11 @@ static void call_bio_endio(struct r1bio *r1_bio) bio->bi_phys_segments--; done = (bio->bi_phys_segments == 0); spin_unlock_irqrestore(&conf->device_lock, flags); + /* + * make_request() might be waiting for + * bi_phys_segments to decrease + */ + wake_up(&conf->wait_barrier); } else done = 1; @@ -243,7 +253,7 @@ static void call_bio_endio(struct r1bio *r1_bio) * Wake up any possible resync thread that waits for the device * to go idle. */ - allow_barrier(conf); + allow_barrier(conf, start_next_window, bi_sector); } } @@ -814,8 +824,6 @@ static void flush_pending_writes(struct r1conf *conf) * there is no normal IO happeing. It must arrange to call * lower_barrier when the particular background IO completes. */ -#define RESYNC_DEPTH 32 - static void raise_barrier(struct r1conf *conf) { spin_lock_irq(&conf->resync_lock); @@ -827,9 +835,19 @@ static void raise_barrier(struct r1conf *conf) /* block any new IO from starting */ conf->barrier++; - /* Now wait for all pending IO to complete */ + /* For these conditions we must wait: + * A: while the array is in frozen state + * B: while barrier >= RESYNC_DEPTH, meaning resync reach + * the max count which allowed. + * C: next_resync + RESYNC_SECTORS > start_next_window, meaning + * next resync will reach to the window which normal bios are + * handling. + */ wait_event_lock_irq(conf->wait_barrier, - !conf->nr_pending && conf->barrier < RESYNC_DEPTH, + !conf->array_frozen && + conf->barrier < RESYNC_DEPTH && + (conf->start_next_window >= + conf->next_resync + RESYNC_SECTORS), conf->resync_lock); spin_unlock_irq(&conf->resync_lock); @@ -845,10 +863,33 @@ static void lower_barrier(struct r1conf *conf) wake_up(&conf->wait_barrier); } -static void wait_barrier(struct r1conf *conf) +static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio) { + bool wait = false; + + if (conf->array_frozen || !bio) + wait = true; + else if (conf->barrier && bio_data_dir(bio) == WRITE) { + if (conf->next_resync < RESYNC_WINDOW_SECTORS) + wait = true; + else if ((conf->next_resync - RESYNC_WINDOW_SECTORS + >= bio_end_sector(bio)) || + (conf->next_resync + NEXT_NORMALIO_DISTANCE + <= bio->bi_sector)) + wait = false; + else + wait = true; + } + + return wait; +} + +static sector_t wait_barrier(struct r1conf *conf, struct bio *bio) +{ + sector_t sector = 0; + spin_lock_irq(&conf->resync_lock); - if (conf->barrier) { + if (need_to_wait_for_sync(conf, bio)) { conf->nr_waiting++; /* Wait for the barrier to drop. * However if there are already pending @@ -860,22 +901,67 @@ static void wait_barrier(struct r1conf *conf) * count down. */ wait_event_lock_irq(conf->wait_barrier, - !conf->barrier || - (conf->nr_pending && + !conf->array_frozen && + (!conf->barrier || + ((conf->start_next_window < + conf->next_resync + RESYNC_SECTORS) && current->bio_list && - !bio_list_empty(current->bio_list)), + !bio_list_empty(current->bio_list))), conf->resync_lock); conf->nr_waiting--; } + + if (bio && bio_data_dir(bio) == WRITE) { + if (conf->next_resync + NEXT_NORMALIO_DISTANCE + <= bio->bi_sector) { + if (conf->start_next_window == MaxSector) + conf->start_next_window = + conf->next_resync + + NEXT_NORMALIO_DISTANCE; + + if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE) + <= bio->bi_sector) + conf->next_window_requests++; + else + conf->current_window_requests++; + } + if (bio->bi_sector >= conf->start_next_window) + sector = conf->start_next_window; + } + conf->nr_pending++; spin_unlock_irq(&conf->resync_lock); + return sector; } -static void allow_barrier(struct r1conf *conf) +static void allow_barrier(struct r1conf *conf, sector_t start_next_window, + sector_t bi_sector) { unsigned long flags; + spin_lock_irqsave(&conf->resync_lock, flags); conf->nr_pending--; + if (start_next_window) { + if (start_next_window == conf->start_next_window) { + if (conf->start_next_window + NEXT_NORMALIO_DISTANCE + <= bi_sector) + conf->next_window_requests--; + else + conf->current_window_requests--; + } else + conf->current_window_requests--; + + if (!conf->current_window_requests) { + if (conf->next_window_requests) { + conf->current_window_requests = + conf->next_window_requests; + conf->next_window_requests = 0; + conf->start_next_window += + NEXT_NORMALIO_DISTANCE; + } else + conf->start_next_window = MaxSector; + } + } spin_unlock_irqrestore(&conf->resync_lock, flags); wake_up(&conf->wait_barrier); } @@ -884,8 +970,7 @@ static void freeze_array(struct r1conf *conf, int extra) { /* stop syncio and normal IO and wait for everything to * go quite. - * We increment barrier and nr_waiting, and then - * wait until nr_pending match nr_queued+extra + * We wait until nr_pending match nr_queued+extra * This is called in the context of one normal IO request * that has failed. Thus any sync request that might be pending * will be blocked by nr_pending, and we need to wait for @@ -895,8 +980,7 @@ static void freeze_array(struct r1conf *conf, int extra) * we continue. */ spin_lock_irq(&conf->resync_lock); - conf->barrier++; - conf->nr_waiting++; + conf->array_frozen = 1; wait_event_lock_irq_cmd(conf->wait_barrier, conf->nr_pending == conf->nr_queued+extra, conf->resync_lock, @@ -907,8 +991,7 @@ static void unfreeze_array(struct r1conf *conf) { /* reverse the effect of the freeze */ spin_lock_irq(&conf->resync_lock); - conf->barrier--; - conf->nr_waiting--; + conf->array_frozen = 0; wake_up(&conf->wait_barrier); spin_unlock_irq(&conf->resync_lock); } @@ -1013,6 +1096,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) int first_clone; int sectors_handled; int max_sectors; + sector_t start_next_window; /* * Register the new request and wait if the reconstruction @@ -1042,7 +1126,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) finish_wait(&conf->wait_barrier, &w); } - wait_barrier(conf); + start_next_window = wait_barrier(conf, bio); bitmap = mddev->bitmap; @@ -1163,6 +1247,7 @@ read_again: disks = conf->raid_disks * 2; retry_write: + r1_bio->start_next_window = start_next_window; blocked_rdev = NULL; rcu_read_lock(); max_sectors = r1_bio->sectors; @@ -1231,14 +1316,24 @@ read_again: if (unlikely(blocked_rdev)) { /* Wait for this device to become unblocked */ int j; + sector_t old = start_next_window; for (j = 0; j < i; j++) if (r1_bio->bios[j]) rdev_dec_pending(conf->mirrors[j].rdev, mddev); r1_bio->state = 0; - allow_barrier(conf); + allow_barrier(conf, start_next_window, bio->bi_sector); md_wait_for_blocked_rdev(blocked_rdev, mddev); - wait_barrier(conf); + start_next_window = wait_barrier(conf, bio); + /* + * We must make sure the multi r1bios of bio have + * the same value of bi_phys_segments + */ + if (bio->bi_phys_segments && old && + old != start_next_window) + /* Wait for the former r1bio(s) to complete */ + wait_event(conf->wait_barrier, + bio->bi_phys_segments == 1); goto retry_write; } @@ -1438,11 +1533,14 @@ static void print_conf(struct r1conf *conf) static void close_sync(struct r1conf *conf) { - wait_barrier(conf); - allow_barrier(conf); + wait_barrier(conf, NULL); + allow_barrier(conf, 0, 0); mempool_destroy(conf->r1buf_pool); conf->r1buf_pool = NULL; + + conf->next_resync = 0; + conf->start_next_window = MaxSector; } static int raid1_spare_active(struct mddev *mddev) @@ -2714,6 +2812,9 @@ static struct r1conf *setup_conf(struct mddev *mddev) conf->pending_count = 0; conf->recovery_disabled = mddev->recovery_disabled - 1; + conf->start_next_window = MaxSector; + conf->current_window_requests = conf->next_window_requests = 0; + err = -EIO; for (i = 0; i < conf->raid_disks * 2; i++) { @@ -2871,8 +2972,8 @@ static int stop(struct mddev *mddev) atomic_read(&bitmap->behind_writes) == 0); } - raise_barrier(conf); - lower_barrier(conf); + freeze_array(conf, 0); + unfreeze_array(conf); md_unregister_thread(&mddev->thread); if (conf->r1bio_pool) @@ -3031,10 +3132,10 @@ static void raid1_quiesce(struct mddev *mddev, int state) wake_up(&conf->wait_barrier); break; case 1: - raise_barrier(conf); + freeze_array(conf, 0); break; case 0: - lower_barrier(conf); + unfreeze_array(conf); break; } } @@ -3051,7 +3152,8 @@ static void *raid1_takeover(struct mddev *mddev) mddev->new_chunk_sectors = 0; conf = setup_conf(mddev); if (!IS_ERR(conf)) - conf->barrier = 1; + /* Array must appear to be quiesced */ + conf->array_frozen = 1; return conf; } return ERR_PTR(-EINVAL); diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h index 0ff3715fb7eb..9bebca7bff2f 100644 --- a/drivers/md/raid1.h +++ b/drivers/md/raid1.h @@ -41,6 +41,19 @@ struct r1conf { */ sector_t next_resync; + /* When raid1 starts resync, we divide array into four partitions + * |---------|--------------|---------------------|-------------| + * next_resync start_next_window end_window + * start_next_window = next_resync + NEXT_NORMALIO_DISTANCE + * end_window = start_next_window + NEXT_NORMALIO_DISTANCE + * current_window_requests means the count of normalIO between + * start_next_window and end_window. + * next_window_requests means the count of normalIO after end_window. + * */ + sector_t start_next_window; + int current_window_requests; + int next_window_requests; + spinlock_t device_lock; /* list of 'struct r1bio' that need to be processed by raid1d, @@ -65,6 +78,7 @@ struct r1conf { int nr_waiting; int nr_queued; int barrier; + int array_frozen; /* Set to 1 if a full sync is needed, (fresh device added). * Cleared when a sync completes. @@ -111,6 +125,7 @@ struct r1bio { * in this BehindIO request */ sector_t sector; + sector_t start_next_window; int sectors; unsigned long state; struct mddev *mddev; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 7c3508abb5e1..c504e8389e69 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -4384,7 +4384,11 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, set_bit(MD_CHANGE_DEVS, &mddev->flags); md_wakeup_thread(mddev->thread); wait_event(mddev->sb_wait, mddev->flags == 0 || - kthread_should_stop()); + test_bit(MD_RECOVERY_INTR, &mddev->recovery)); + if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { + allow_barrier(conf); + return sectors_done; + } conf->reshape_safe = mddev->reshape_position; allow_barrier(conf); } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 7f0e17a27aeb..cc055da02e2a 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -85,6 +85,42 @@ static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect) return &conf->stripe_hashtbl[hash]; } +static inline int stripe_hash_locks_hash(sector_t sect) +{ + return (sect >> STRIPE_SHIFT) & STRIPE_HASH_LOCKS_MASK; +} + +static inline void lock_device_hash_lock(struct r5conf *conf, int hash) +{ + spin_lock_irq(conf->hash_locks + hash); + spin_lock(&conf->device_lock); +} + +static inline void unlock_device_hash_lock(struct r5conf *conf, int hash) +{ + spin_unlock(&conf->device_lock); + spin_unlock_irq(conf->hash_locks + hash); +} + +static inline void lock_all_device_hash_locks_irq(struct r5conf *conf) +{ + int i; + local_irq_disable(); + spin_lock(conf->hash_locks); + for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) + spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks); + spin_lock(&conf->device_lock); +} + +static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf) +{ + int i; + spin_unlock(&conf->device_lock); + for (i = NR_STRIPE_HASH_LOCKS; i; i--) + spin_unlock(conf->hash_locks + i - 1); + local_irq_enable(); +} + /* bio's attached to a stripe+device for I/O are linked together in bi_sector * order without overlap. There may be several bio's per stripe+device, and * a bio could span several devices. @@ -249,7 +285,8 @@ static void raid5_wakeup_stripe_thread(struct stripe_head *sh) } } -static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh) +static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, + struct list_head *temp_inactive_list) { BUG_ON(!list_empty(&sh->lru)); BUG_ON(atomic_read(&conf->active_stripes)==0); @@ -278,23 +315,68 @@ static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh) < IO_THRESHOLD) md_wakeup_thread(conf->mddev->thread); atomic_dec(&conf->active_stripes); - if (!test_bit(STRIPE_EXPANDING, &sh->state)) { - list_add_tail(&sh->lru, &conf->inactive_list); - wake_up(&conf->wait_for_stripe); - if (conf->retry_read_aligned) - md_wakeup_thread(conf->mddev->thread); - } + if (!test_bit(STRIPE_EXPANDING, &sh->state)) + list_add_tail(&sh->lru, temp_inactive_list); } } -static void __release_stripe(struct r5conf *conf, struct stripe_head *sh) +static void __release_stripe(struct r5conf *conf, struct stripe_head *sh, + struct list_head *temp_inactive_list) { if (atomic_dec_and_test(&sh->count)) - do_release_stripe(conf, sh); + do_release_stripe(conf, sh, temp_inactive_list); +} + +/* + * @hash could be NR_STRIPE_HASH_LOCKS, then we have a list of inactive_list + * + * Be careful: Only one task can add/delete stripes from temp_inactive_list at + * given time. Adding stripes only takes device lock, while deleting stripes + * only takes hash lock. + */ +static void release_inactive_stripe_list(struct r5conf *conf, + struct list_head *temp_inactive_list, + int hash) +{ + int size; + bool do_wakeup = false; + unsigned long flags; + + if (hash == NR_STRIPE_HASH_LOCKS) { + size = NR_STRIPE_HASH_LOCKS; + hash = NR_STRIPE_HASH_LOCKS - 1; + } else + size = 1; + while (size) { + struct list_head *list = &temp_inactive_list[size - 1]; + + /* + * We don't hold any lock here yet, get_active_stripe() might + * remove stripes from the list + */ + if (!list_empty_careful(list)) { + spin_lock_irqsave(conf->hash_locks + hash, flags); + if (list_empty(conf->inactive_list + hash) && + !list_empty(list)) + atomic_dec(&conf->empty_inactive_list_nr); + list_splice_tail_init(list, conf->inactive_list + hash); + do_wakeup = true; + spin_unlock_irqrestore(conf->hash_locks + hash, flags); + } + size--; + hash--; + } + + if (do_wakeup) { + wake_up(&conf->wait_for_stripe); + if (conf->retry_read_aligned) + md_wakeup_thread(conf->mddev->thread); + } } /* should hold conf->device_lock already */ -static int release_stripe_list(struct r5conf *conf) +static int release_stripe_list(struct r5conf *conf, + struct list_head *temp_inactive_list) { struct stripe_head *sh; int count = 0; @@ -303,6 +385,8 @@ static int release_stripe_list(struct r5conf *conf) head = llist_del_all(&conf->released_stripes); head = llist_reverse_order(head); while (head) { + int hash; + sh = llist_entry(head, struct stripe_head, release_list); head = llist_next(head); /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */ @@ -313,7 +397,8 @@ static int release_stripe_list(struct r5conf *conf) * again, the count is always > 1. This is true for * STRIPE_ON_UNPLUG_LIST bit too. */ - __release_stripe(conf, sh); + hash = sh->hash_lock_index; + __release_stripe(conf, sh, &temp_inactive_list[hash]); count++; } @@ -324,9 +409,12 @@ static void release_stripe(struct stripe_head *sh) { struct r5conf *conf = sh->raid_conf; unsigned long flags; + struct list_head list; + int hash; bool wakeup; - if (test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state)) + if (unlikely(!conf->mddev->thread) || + test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state)) goto slow_path; wakeup = llist_add(&sh->release_list, &conf->released_stripes); if (wakeup) @@ -336,8 +424,11 @@ slow_path: local_irq_save(flags); /* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */ if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) { - do_release_stripe(conf, sh); + INIT_LIST_HEAD(&list); + hash = sh->hash_lock_index; + do_release_stripe(conf, sh, &list); spin_unlock(&conf->device_lock); + release_inactive_stripe_list(conf, &list, hash); } local_irq_restore(flags); } @@ -362,18 +453,21 @@ static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) /* find an idle stripe, make sure it is unhashed, and return it. */ -static struct stripe_head *get_free_stripe(struct r5conf *conf) +static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash) { struct stripe_head *sh = NULL; struct list_head *first; - if (list_empty(&conf->inactive_list)) + if (list_empty(conf->inactive_list + hash)) goto out; - first = conf->inactive_list.next; + first = (conf->inactive_list + hash)->next; sh = list_entry(first, struct stripe_head, lru); list_del_init(first); remove_hash(sh); atomic_inc(&conf->active_stripes); + BUG_ON(hash != sh->hash_lock_index); + if (list_empty(conf->inactive_list + hash)) + atomic_inc(&conf->empty_inactive_list_nr); out: return sh; } @@ -416,7 +510,7 @@ static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) { struct r5conf *conf = sh->raid_conf; - int i; + int i, seq; BUG_ON(atomic_read(&sh->count) != 0); BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); @@ -426,7 +520,8 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) (unsigned long long)sh->sector); remove_hash(sh); - +retry: + seq = read_seqcount_begin(&conf->gen_lock); sh->generation = conf->generation - previous; sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; sh->sector = sector; @@ -448,6 +543,8 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) dev->flags = 0; raid5_build_block(sh, i, previous); } + if (read_seqcount_retry(&conf->gen_lock, seq)) + goto retry; insert_hash(conf, sh); sh->cpu = smp_processor_id(); } @@ -552,57 +649,59 @@ get_active_stripe(struct r5conf *conf, sector_t sector, int previous, int noblock, int noquiesce) { struct stripe_head *sh; + int hash = stripe_hash_locks_hash(sector); pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); - spin_lock_irq(&conf->device_lock); + spin_lock_irq(conf->hash_locks + hash); do { wait_event_lock_irq(conf->wait_for_stripe, conf->quiesce == 0 || noquiesce, - conf->device_lock); + *(conf->hash_locks + hash)); sh = __find_stripe(conf, sector, conf->generation - previous); if (!sh) { if (!conf->inactive_blocked) - sh = get_free_stripe(conf); + sh = get_free_stripe(conf, hash); if (noblock && sh == NULL) break; if (!sh) { conf->inactive_blocked = 1; - wait_event_lock_irq(conf->wait_for_stripe, - !list_empty(&conf->inactive_list) && - (atomic_read(&conf->active_stripes) - < (conf->max_nr_stripes *3/4) - || !conf->inactive_blocked), - conf->device_lock); + wait_event_lock_irq( + conf->wait_for_stripe, + !list_empty(conf->inactive_list + hash) && + (atomic_read(&conf->active_stripes) + < (conf->max_nr_stripes * 3 / 4) + || !conf->inactive_blocked), + *(conf->hash_locks + hash)); conf->inactive_blocked = 0; } else init_stripe(sh, sector, previous); } else { + spin_lock(&conf->device_lock); if (atomic_read(&sh->count)) { BUG_ON(!list_empty(&sh->lru) && !test_bit(STRIPE_EXPANDING, &sh->state) && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state) - && !test_bit(STRIPE_ON_RELEASE_LIST, &sh->state)); + ); } else { if (!test_bit(STRIPE_HANDLE, &sh->state)) atomic_inc(&conf->active_stripes); - if (list_empty(&sh->lru) && - !test_bit(STRIPE_EXPANDING, &sh->state)) - BUG(); + BUG_ON(list_empty(&sh->lru)); list_del_init(&sh->lru); if (sh->group) { sh->group->stripes_cnt--; sh->group = NULL; } } + spin_unlock(&conf->device_lock); } } while (sh == NULL); if (sh) atomic_inc(&sh->count); - spin_unlock_irq(&conf->device_lock); + spin_unlock_irq(conf->hash_locks + hash); return sh; } @@ -758,7 +857,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) bi->bi_sector = (sh->sector + rdev->data_offset); if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) - bi->bi_rw |= REQ_FLUSH; + bi->bi_rw |= REQ_NOMERGE; bi->bi_vcnt = 1; bi->bi_io_vec[0].bv_len = STRIPE_SIZE; @@ -1582,7 +1681,7 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) put_cpu(); } -static int grow_one_stripe(struct r5conf *conf) +static int grow_one_stripe(struct r5conf *conf, int hash) { struct stripe_head *sh; sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL); @@ -1598,6 +1697,7 @@ static int grow_one_stripe(struct r5conf *conf) kmem_cache_free(conf->slab_cache, sh); return 0; } + sh->hash_lock_index = hash; /* we just created an active stripe so... */ atomic_set(&sh->count, 1); atomic_inc(&conf->active_stripes); @@ -1610,6 +1710,7 @@ static int grow_stripes(struct r5conf *conf, int num) { struct kmem_cache *sc; int devs = max(conf->raid_disks, conf->previous_raid_disks); + int hash; if (conf->mddev->gendisk) sprintf(conf->cache_name[0], @@ -1627,9 +1728,13 @@ static int grow_stripes(struct r5conf *conf, int num) return 1; conf->slab_cache = sc; conf->pool_size = devs; - while (num--) - if (!grow_one_stripe(conf)) + hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS; + while (num--) { + if (!grow_one_stripe(conf, hash)) return 1; + conf->max_nr_stripes++; + hash = (hash + 1) % NR_STRIPE_HASH_LOCKS; + } return 0; } @@ -1687,6 +1792,7 @@ static int resize_stripes(struct r5conf *conf, int newsize) int err; struct kmem_cache *sc; int i; + int hash, cnt; if (newsize <= conf->pool_size) return 0; /* never bother to shrink */ @@ -1726,19 +1832,29 @@ static int resize_stripes(struct r5conf *conf, int newsize) * OK, we have enough stripes, start collecting inactive * stripes and copying them over */ + hash = 0; + cnt = 0; list_for_each_entry(nsh, &newstripes, lru) { - spin_lock_irq(&conf->device_lock); - wait_event_lock_irq(conf->wait_for_stripe, - !list_empty(&conf->inactive_list), - conf->device_lock); - osh = get_free_stripe(conf); - spin_unlock_irq(&conf->device_lock); + lock_device_hash_lock(conf, hash); + wait_event_cmd(conf->wait_for_stripe, + !list_empty(conf->inactive_list + hash), + unlock_device_hash_lock(conf, hash), + lock_device_hash_lock(conf, hash)); + osh = get_free_stripe(conf, hash); + unlock_device_hash_lock(conf, hash); atomic_set(&nsh->count, 1); for(i=0; i<conf->pool_size; i++) nsh->dev[i].page = osh->dev[i].page; for( ; i<newsize; i++) nsh->dev[i].page = NULL; + nsh->hash_lock_index = hash; kmem_cache_free(conf->slab_cache, osh); + cnt++; + if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS + + !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) { + hash++; + cnt = 0; + } } kmem_cache_destroy(conf->slab_cache); @@ -1797,13 +1913,13 @@ static int resize_stripes(struct r5conf *conf, int newsize) return err; } -static int drop_one_stripe(struct r5conf *conf) +static int drop_one_stripe(struct r5conf *conf, int hash) { struct stripe_head *sh; - spin_lock_irq(&conf->device_lock); - sh = get_free_stripe(conf); - spin_unlock_irq(&conf->device_lock); + spin_lock_irq(conf->hash_locks + hash); + sh = get_free_stripe(conf, hash); + spin_unlock_irq(conf->hash_locks + hash); if (!sh) return 0; BUG_ON(atomic_read(&sh->count)); @@ -1815,8 +1931,10 @@ static int drop_one_stripe(struct r5conf *conf) static void shrink_stripes(struct r5conf *conf) { - while (drop_one_stripe(conf)) - ; + int hash; + for (hash = 0; hash < NR_STRIPE_HASH_LOCKS; hash++) + while (drop_one_stripe(conf, hash)) + ; if (conf->slab_cache) kmem_cache_destroy(conf->slab_cache); @@ -1921,6 +2039,9 @@ static void raid5_end_read_request(struct bio * bi, int error) mdname(conf->mddev), bdn); else retry = 1; + if (set_bad && test_bit(In_sync, &rdev->flags) + && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) + retry = 1; if (retry) if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { set_bit(R5_ReadError, &sh->dev[i].flags); @@ -3900,7 +4021,8 @@ static void raid5_activate_delayed(struct r5conf *conf) } } -static void activate_bit_delay(struct r5conf *conf) +static void activate_bit_delay(struct r5conf *conf, + struct list_head *temp_inactive_list) { /* device_lock is held */ struct list_head head; @@ -3908,9 +4030,11 @@ static void activate_bit_delay(struct r5conf *conf) list_del_init(&conf->bitmap_list); while (!list_empty(&head)) { struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); + int hash; list_del_init(&sh->lru); atomic_inc(&sh->count); - __release_stripe(conf, sh); + hash = sh->hash_lock_index; + __release_stripe(conf, sh, &temp_inactive_list[hash]); } } @@ -3926,7 +4050,7 @@ int md_raid5_congested(struct mddev *mddev, int bits) return 1; if (conf->quiesce) return 1; - if (list_empty_careful(&conf->inactive_list)) + if (atomic_read(&conf->empty_inactive_list_nr)) return 1; return 0; @@ -4256,6 +4380,7 @@ static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group) struct raid5_plug_cb { struct blk_plug_cb cb; struct list_head list; + struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS]; }; static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) @@ -4266,6 +4391,7 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) struct mddev *mddev = cb->cb.data; struct r5conf *conf = mddev->private; int cnt = 0; + int hash; if (cb->list.next && !list_empty(&cb->list)) { spin_lock_irq(&conf->device_lock); @@ -4283,11 +4409,14 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) * STRIPE_ON_RELEASE_LIST could be set here. In that * case, the count is always > 1 here */ - __release_stripe(conf, sh); + hash = sh->hash_lock_index; + __release_stripe(conf, sh, &cb->temp_inactive_list[hash]); cnt++; } spin_unlock_irq(&conf->device_lock); } + release_inactive_stripe_list(conf, cb->temp_inactive_list, + NR_STRIPE_HASH_LOCKS); if (mddev->queue) trace_block_unplug(mddev->queue, cnt, !from_schedule); kfree(cb); @@ -4308,8 +4437,12 @@ static void release_stripe_plug(struct mddev *mddev, cb = container_of(blk_cb, struct raid5_plug_cb, cb); - if (cb->list.next == NULL) + if (cb->list.next == NULL) { + int i; INIT_LIST_HEAD(&cb->list); + for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) + INIT_LIST_HEAD(cb->temp_inactive_list + i); + } if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)) list_add_tail(&sh->lru, &cb->list); @@ -4692,14 +4825,19 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { /* Cannot proceed until we've updated the superblock... */ wait_event(conf->wait_for_overlap, - atomic_read(&conf->reshape_stripes)==0); + atomic_read(&conf->reshape_stripes)==0 + || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); + if (atomic_read(&conf->reshape_stripes) != 0) + return 0; mddev->reshape_position = conf->reshape_progress; mddev->curr_resync_completed = sector_nr; conf->reshape_checkpoint = jiffies; set_bit(MD_CHANGE_DEVS, &mddev->flags); md_wakeup_thread(mddev->thread); wait_event(mddev->sb_wait, mddev->flags == 0 || - kthread_should_stop()); + test_bit(MD_RECOVERY_INTR, &mddev->recovery)); + if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) + return 0; spin_lock_irq(&conf->device_lock); conf->reshape_safe = mddev->reshape_position; spin_unlock_irq(&conf->device_lock); @@ -4782,7 +4920,10 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk >= mddev->resync_max - mddev->curr_resync_completed) { /* Cannot proceed until we've updated the superblock... */ wait_event(conf->wait_for_overlap, - atomic_read(&conf->reshape_stripes) == 0); + atomic_read(&conf->reshape_stripes) == 0 + || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); + if (atomic_read(&conf->reshape_stripes) != 0) + goto ret; mddev->reshape_position = conf->reshape_progress; mddev->curr_resync_completed = sector_nr; conf->reshape_checkpoint = jiffies; @@ -4790,13 +4931,16 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk md_wakeup_thread(mddev->thread); wait_event(mddev->sb_wait, !test_bit(MD_CHANGE_DEVS, &mddev->flags) - || kthread_should_stop()); + || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); + if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) + goto ret; spin_lock_irq(&conf->device_lock); conf->reshape_safe = mddev->reshape_position; spin_unlock_irq(&conf->device_lock); wake_up(&conf->wait_for_overlap); sysfs_notify(&mddev->kobj, NULL, "sync_completed"); } +ret: return reshape_sectors; } @@ -4954,27 +5098,45 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) } static int handle_active_stripes(struct r5conf *conf, int group, - struct r5worker *worker) + struct r5worker *worker, + struct list_head *temp_inactive_list) { struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; - int i, batch_size = 0; + int i, batch_size = 0, hash; + bool release_inactive = false; while (batch_size < MAX_STRIPE_BATCH && (sh = __get_priority_stripe(conf, group)) != NULL) batch[batch_size++] = sh; - if (batch_size == 0) - return batch_size; + if (batch_size == 0) { + for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) + if (!list_empty(temp_inactive_list + i)) + break; + if (i == NR_STRIPE_HASH_LOCKS) + return batch_size; + release_inactive = true; + } spin_unlock_irq(&conf->device_lock); + release_inactive_stripe_list(conf, temp_inactive_list, + NR_STRIPE_HASH_LOCKS); + + if (release_inactive) { + spin_lock_irq(&conf->device_lock); + return 0; + } + for (i = 0; i < batch_size; i++) handle_stripe(batch[i]); cond_resched(); spin_lock_irq(&conf->device_lock); - for (i = 0; i < batch_size; i++) - __release_stripe(conf, batch[i]); + for (i = 0; i < batch_size; i++) { + hash = batch[i]->hash_lock_index; + __release_stripe(conf, batch[i], &temp_inactive_list[hash]); + } return batch_size; } @@ -4995,9 +5157,10 @@ static void raid5_do_work(struct work_struct *work) while (1) { int batch_size, released; - released = release_stripe_list(conf); + released = release_stripe_list(conf, worker->temp_inactive_list); - batch_size = handle_active_stripes(conf, group_id, worker); + batch_size = handle_active_stripes(conf, group_id, worker, + worker->temp_inactive_list); worker->working = false; if (!batch_size && !released) break; @@ -5036,7 +5199,7 @@ static void raid5d(struct md_thread *thread) struct bio *bio; int batch_size, released; - released = release_stripe_list(conf); + released = release_stripe_list(conf, conf->temp_inactive_list); if ( !list_empty(&conf->bitmap_list)) { @@ -5046,7 +5209,7 @@ static void raid5d(struct md_thread *thread) bitmap_unplug(mddev->bitmap); spin_lock_irq(&conf->device_lock); conf->seq_write = conf->seq_flush; - activate_bit_delay(conf); + activate_bit_delay(conf, conf->temp_inactive_list); } raid5_activate_delayed(conf); @@ -5060,7 +5223,8 @@ static void raid5d(struct md_thread *thread) handled++; } - batch_size = handle_active_stripes(conf, ANY_GROUP, NULL); + batch_size = handle_active_stripes(conf, ANY_GROUP, NULL, + conf->temp_inactive_list); if (!batch_size && !released) break; handled += batch_size; @@ -5096,22 +5260,29 @@ raid5_set_cache_size(struct mddev *mddev, int size) { struct r5conf *conf = mddev->private; int err; + int hash; if (size <= 16 || size > 32768) return -EINVAL; + hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS; while (size < conf->max_nr_stripes) { - if (drop_one_stripe(conf)) + if (drop_one_stripe(conf, hash)) conf->max_nr_stripes--; else break; + hash--; + if (hash < 0) + hash = NR_STRIPE_HASH_LOCKS - 1; } err = md_allow_write(mddev); if (err) return err; + hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS; while (size > conf->max_nr_stripes) { - if (grow_one_stripe(conf)) + if (grow_one_stripe(conf, hash)) conf->max_nr_stripes++; else break; + hash = (hash + 1) % NR_STRIPE_HASH_LOCKS; } return 0; } @@ -5199,15 +5370,18 @@ raid5_show_group_thread_cnt(struct mddev *mddev, char *page) return 0; } -static int alloc_thread_groups(struct r5conf *conf, int cnt); +static int alloc_thread_groups(struct r5conf *conf, int cnt, + int *group_cnt, + int *worker_cnt_per_group, + struct r5worker_group **worker_groups); static ssize_t raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) { struct r5conf *conf = mddev->private; unsigned long new; int err; - struct r5worker_group *old_groups; - int old_group_cnt; + struct r5worker_group *new_groups, *old_groups; + int group_cnt, worker_cnt_per_group; if (len >= PAGE_SIZE) return -EINVAL; @@ -5223,14 +5397,19 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) mddev_suspend(mddev); old_groups = conf->worker_groups; - old_group_cnt = conf->worker_cnt_per_group; + if (old_groups) + flush_workqueue(raid5_wq); + + err = alloc_thread_groups(conf, new, + &group_cnt, &worker_cnt_per_group, + &new_groups); + if (!err) { + spin_lock_irq(&conf->device_lock); + conf->group_cnt = group_cnt; + conf->worker_cnt_per_group = worker_cnt_per_group; + conf->worker_groups = new_groups; + spin_unlock_irq(&conf->device_lock); - conf->worker_groups = NULL; - err = alloc_thread_groups(conf, new); - if (err) { - conf->worker_groups = old_groups; - conf->worker_cnt_per_group = old_group_cnt; - } else { if (old_groups) kfree(old_groups[0].workers); kfree(old_groups); @@ -5260,40 +5439,47 @@ static struct attribute_group raid5_attrs_group = { .attrs = raid5_attrs, }; -static int alloc_thread_groups(struct r5conf *conf, int cnt) +static int alloc_thread_groups(struct r5conf *conf, int cnt, + int *group_cnt, + int *worker_cnt_per_group, + struct r5worker_group **worker_groups) { - int i, j; + int i, j, k; ssize_t size; struct r5worker *workers; - conf->worker_cnt_per_group = cnt; + *worker_cnt_per_group = cnt; if (cnt == 0) { - conf->worker_groups = NULL; + *group_cnt = 0; + *worker_groups = NULL; return 0; } - conf->group_cnt = num_possible_nodes(); + *group_cnt = num_possible_nodes(); size = sizeof(struct r5worker) * cnt; - workers = kzalloc(size * conf->group_cnt, GFP_NOIO); - conf->worker_groups = kzalloc(sizeof(struct r5worker_group) * - conf->group_cnt, GFP_NOIO); - if (!conf->worker_groups || !workers) { + workers = kzalloc(size * *group_cnt, GFP_NOIO); + *worker_groups = kzalloc(sizeof(struct r5worker_group) * + *group_cnt, GFP_NOIO); + if (!*worker_groups || !workers) { kfree(workers); - kfree(conf->worker_groups); - conf->worker_groups = NULL; + kfree(*worker_groups); return -ENOMEM; } - for (i = 0; i < conf->group_cnt; i++) { + for (i = 0; i < *group_cnt; i++) { struct r5worker_group *group; - group = &conf->worker_groups[i]; + group = &(*worker_groups)[i]; INIT_LIST_HEAD(&group->handle_list); group->conf = conf; group->workers = workers + i * cnt; for (j = 0; j < cnt; j++) { - group->workers[j].group = group; - INIT_WORK(&group->workers[j].work, raid5_do_work); + struct r5worker *worker = group->workers + j; + worker->group = group; + INIT_WORK(&worker->work, raid5_do_work); + + for (k = 0; k < NR_STRIPE_HASH_LOCKS; k++) + INIT_LIST_HEAD(worker->temp_inactive_list + k); } } @@ -5444,6 +5630,9 @@ static struct r5conf *setup_conf(struct mddev *mddev) struct md_rdev *rdev; struct disk_info *disk; char pers_name[6]; + int i; + int group_cnt, worker_cnt_per_group; + struct r5worker_group *new_group; if (mddev->new_level != 5 && mddev->new_level != 4 @@ -5478,7 +5667,12 @@ static struct r5conf *setup_conf(struct mddev *mddev) if (conf == NULL) goto abort; /* Don't enable multi-threading by default*/ - if (alloc_thread_groups(conf, 0)) + if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group, + &new_group)) { + conf->group_cnt = group_cnt; + conf->worker_cnt_per_group = worker_cnt_per_group; + conf->worker_groups = new_group; + } else goto abort; spin_lock_init(&conf->device_lock); seqcount_init(&conf->gen_lock); @@ -5488,7 +5682,6 @@ static struct r5conf *setup_conf(struct mddev *mddev) INIT_LIST_HEAD(&conf->hold_list); INIT_LIST_HEAD(&conf->delayed_list); INIT_LIST_HEAD(&conf->bitmap_list); - INIT_LIST_HEAD(&conf->inactive_list); init_llist_head(&conf->released_stripes); atomic_set(&conf->active_stripes, 0); atomic_set(&conf->preread_active_stripes, 0); @@ -5514,6 +5707,21 @@ static struct r5conf *setup_conf(struct mddev *mddev) if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) goto abort; + /* We init hash_locks[0] separately to that it can be used + * as the reference lock in the spin_lock_nest_lock() call + * in lock_all_device_hash_locks_irq in order to convince + * lockdep that we know what we are doing. + */ + spin_lock_init(conf->hash_locks); + for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) + spin_lock_init(conf->hash_locks + i); + + for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) + INIT_LIST_HEAD(conf->inactive_list + i); + + for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) + INIT_LIST_HEAD(conf->temp_inactive_list + i); + conf->level = mddev->new_level; if (raid5_alloc_percpu(conf) != 0) goto abort; @@ -5554,7 +5762,6 @@ static struct r5conf *setup_conf(struct mddev *mddev) else conf->max_degraded = 1; conf->algorithm = mddev->new_layout; - conf->max_nr_stripes = NR_STRIPES; conf->reshape_progress = mddev->reshape_position; if (conf->reshape_progress != MaxSector) { conf->prev_chunk_sectors = mddev->chunk_sectors; @@ -5563,7 +5770,8 @@ static struct r5conf *setup_conf(struct mddev *mddev) memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; - if (grow_stripes(conf, conf->max_nr_stripes)) { + atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS); + if (grow_stripes(conf, NR_STRIPES)) { printk(KERN_ERR "md/raid:%s: couldn't allocate %dkB for buffers\n", mdname(mddev), memory); @@ -6369,12 +6577,18 @@ static int raid5_start_reshape(struct mddev *mddev) if (!mddev->sync_thread) { mddev->recovery = 0; spin_lock_irq(&conf->device_lock); + write_seqcount_begin(&conf->gen_lock); mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; + mddev->new_chunk_sectors = + conf->chunk_sectors = conf->prev_chunk_sectors; + mddev->new_layout = conf->algorithm = conf->prev_algo; rdev_for_each(rdev, mddev) rdev->new_data_offset = rdev->data_offset; smp_wmb(); + conf->generation --; conf->reshape_progress = MaxSector; mddev->reshape_position = MaxSector; + write_seqcount_end(&conf->gen_lock); spin_unlock_irq(&conf->device_lock); return -EAGAIN; } @@ -6462,27 +6676,28 @@ static void raid5_quiesce(struct mddev *mddev, int state) break; case 1: /* stop all writes */ - spin_lock_irq(&conf->device_lock); + lock_all_device_hash_locks_irq(conf); /* '2' tells resync/reshape to pause so that all * active stripes can drain */ conf->quiesce = 2; - wait_event_lock_irq(conf->wait_for_stripe, + wait_event_cmd(conf->wait_for_stripe, atomic_read(&conf->active_stripes) == 0 && atomic_read(&conf->active_aligned_reads) == 0, - conf->device_lock); + unlock_all_device_hash_locks_irq(conf), + lock_all_device_hash_locks_irq(conf)); conf->quiesce = 1; - spin_unlock_irq(&conf->device_lock); + unlock_all_device_hash_locks_irq(conf); /* allow reshape to continue */ wake_up(&conf->wait_for_overlap); break; case 0: /* re-enable writes */ - spin_lock_irq(&conf->device_lock); + lock_all_device_hash_locks_irq(conf); conf->quiesce = 0; wake_up(&conf->wait_for_stripe); wake_up(&conf->wait_for_overlap); - spin_unlock_irq(&conf->device_lock); + unlock_all_device_hash_locks_irq(conf); break; } } diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index b42e6b462eda..01ad8ae8f578 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -205,6 +205,7 @@ struct stripe_head { short pd_idx; /* parity disk index */ short qd_idx; /* 'Q' disk index for raid6 */ short ddf_layout;/* use DDF ordering to calculate Q */ + short hash_lock_index; unsigned long state; /* state flags */ atomic_t count; /* nr of active thread/requests */ int bm_seq; /* sequence number for bitmap flushes */ @@ -367,9 +368,18 @@ struct disk_info { struct md_rdev *rdev, *replacement; }; +/* NOTE NR_STRIPE_HASH_LOCKS must remain below 64. + * This is because we sometimes take all the spinlocks + * and creating that much locking depth can cause + * problems. + */ +#define NR_STRIPE_HASH_LOCKS 8 +#define STRIPE_HASH_LOCKS_MASK (NR_STRIPE_HASH_LOCKS - 1) + struct r5worker { struct work_struct work; struct r5worker_group *group; + struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS]; bool working; }; @@ -382,6 +392,8 @@ struct r5worker_group { struct r5conf { struct hlist_head *stripe_hashtbl; + /* only protect corresponding hash list and inactive_list */ + spinlock_t hash_locks[NR_STRIPE_HASH_LOCKS]; struct mddev *mddev; int chunk_sectors; int level, algorithm; @@ -462,7 +474,8 @@ struct r5conf { * Free stripes pool */ atomic_t active_stripes; - struct list_head inactive_list; + struct list_head inactive_list[NR_STRIPE_HASH_LOCKS]; + atomic_t empty_inactive_list_nr; struct llist_head released_stripes; wait_queue_head_t wait_for_stripe; wait_queue_head_t wait_for_overlap; @@ -477,6 +490,7 @@ struct r5conf { * the new thread here until we fully activate the array. */ struct md_thread *thread; + struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS]; struct r5worker_group *worker_groups; int group_cnt; int worker_cnt_per_group; diff --git a/drivers/media/common/siano/smscoreapi.h b/drivers/media/common/siano/smscoreapi.h index d0799e323364..9c9063cd3208 100644 --- a/drivers/media/common/siano/smscoreapi.h +++ b/drivers/media/common/siano/smscoreapi.h @@ -955,7 +955,7 @@ struct sms_rx_stats { u32 modem_state; /* from SMSHOSTLIB_DVB_MODEM_STATE_ET */ s32 SNR; /* dB */ u32 ber; /* Post Viterbi ber [1E-5] */ - u32 ber_error_count; /* Number of erronous SYNC bits. */ + u32 ber_error_count; /* Number of erroneous SYNC bits. */ u32 ber_bit_count; /* Total number of SYNC bits. */ u32 ts_per; /* Transport stream PER, 0xFFFFFFFF indicate N/A */ @@ -981,7 +981,7 @@ struct sms_rx_stats_ex { u32 modem_state; /* from SMSHOSTLIB_DVB_MODEM_STATE_ET */ s32 SNR; /* dB */ u32 ber; /* Post Viterbi ber [1E-5] */ - u32 ber_error_count; /* Number of erronous SYNC bits. */ + u32 ber_error_count; /* Number of erroneous SYNC bits. */ u32 ber_bit_count; /* Total number of SYNC bits. */ u32 ts_per; /* Transport stream PER, 0xFFFFFFFF indicate N/A */ diff --git a/drivers/media/common/siano/smsdvb.h b/drivers/media/common/siano/smsdvb.h index 92c413ba0c79..ae36d0ae0fb1 100644 --- a/drivers/media/common/siano/smsdvb.h +++ b/drivers/media/common/siano/smsdvb.h @@ -95,7 +95,7 @@ struct RECEPTION_STATISTICS_PER_SLICES_S { u32 is_demod_locked; /* 0 - not locked, 1 - locked */ u32 ber_bit_count; /* Total number of SYNC bits. */ - u32 ber_error_count; /* Number of erronous SYNC bits. */ + u32 ber_error_count; /* Number of erroneous SYNC bits. */ s32 MRC_SNR; /* dB */ s32 mrc_in_band_pwr; /* In band power in dBM */ diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c index 58de4410c525..6c7ff0cdcd32 100644 --- a/drivers/media/dvb-core/dvb_demux.c +++ b/drivers/media/dvb-core/dvb_demux.c @@ -435,7 +435,7 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf) dprintk_tscheck("TEI detected. " "PID=0x%x data1=0x%x\n", pid, buf[1]); - /* data in this packet cant be trusted - drop it unless + /* data in this packet can't be trusted - drop it unless * module option dvb_demux_feed_err_pkts is set */ if (!dvb_demux_feed_err_pkts) return; @@ -1032,8 +1032,13 @@ static int dmx_section_feed_release_filter(struct dmx_section_feed *feed, return -EINVAL; } - if (feed->is_filtering) + if (feed->is_filtering) { + /* release dvbdmx->mutex as far as it is + acquired by stop_filtering() itself */ + mutex_unlock(&dvbdmx->mutex); feed->stop_filtering(feed); + mutex_lock(&dvbdmx->mutex); + } spin_lock_irq(&dvbdmx->lock); f = dvbdmxfeed->filter; diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c index 30ee59052157..65728c25ea05 100644 --- a/drivers/media/dvb-frontends/af9033.c +++ b/drivers/media/dvb-frontends/af9033.c @@ -170,18 +170,18 @@ static int af9033_rd_reg_mask(struct af9033_state *state, u32 reg, u8 *val, static int af9033_wr_reg_val_tab(struct af9033_state *state, const struct reg_val *tab, int tab_len) { +#define MAX_TAB_LEN 212 int ret, i, j; - u8 buf[MAX_XFER_SIZE]; + u8 buf[1 + MAX_TAB_LEN]; + + dev_dbg(&state->i2c->dev, "%s: tab_len=%d\n", __func__, tab_len); if (tab_len > sizeof(buf)) { - dev_warn(&state->i2c->dev, - "%s: i2c wr len=%d is too big!\n", - KBUILD_MODNAME, tab_len); + dev_warn(&state->i2c->dev, "%s: tab len %d is too big\n", + KBUILD_MODNAME, tab_len); return -EINVAL; } - dev_dbg(&state->i2c->dev, "%s: tab_len=%d\n", __func__, tab_len); - for (i = 0, j = 0; i < tab_len; i++) { buf[j] = tab[i].val; diff --git a/drivers/media/dvb-frontends/cxd2820r_c.c b/drivers/media/dvb-frontends/cxd2820r_c.c index 125a44041011..5c6ab4921bf1 100644 --- a/drivers/media/dvb-frontends/cxd2820r_c.c +++ b/drivers/media/dvb-frontends/cxd2820r_c.c @@ -78,7 +78,7 @@ int cxd2820r_set_frontend_c(struct dvb_frontend *fe) num = if_freq / 1000; /* Hz => kHz */ num *= 0x4000; - if_ctl = cxd2820r_div_u64_round_closest(num, 41000); + if_ctl = 0x4000 - cxd2820r_div_u64_round_closest(num, 41000); buf[0] = (if_ctl >> 8) & 0x3f; buf[1] = (if_ctl >> 0) & 0xff; diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c index 90536147bf04..6dbbee453ee1 100644 --- a/drivers/media/dvb-frontends/dib8000.c +++ b/drivers/media/dvb-frontends/dib8000.c @@ -3048,7 +3048,7 @@ static int dib8000_tune(struct dvb_frontend *fe) dib8000_set_diversity_in(state->fe[0], state->diversity_onoff); locks = (dib8000_read_word(state, 180) >> 6) & 0x3f; /* P_coff_winlen ? */ - /* coff should lock over P_coff_winlen ofdm symbols : give 3 times this lenght to lock */ + /* coff should lock over P_coff_winlen ofdm symbols : give 3 times this length to lock */ *timeout = dib8000_get_timeout(state, 2 * locks, SYMBOL_DEPENDENT_ON); *tune_state = CT_DEMOD_STEP_5; break; @@ -3115,7 +3115,7 @@ static int dib8000_tune(struct dvb_frontend *fe) case CT_DEMOD_STEP_9: /* 39 */ if ((state->revision == 0x8090) || ((dib8000_read_word(state, 1291) >> 9) & 0x1)) { /* fe capable of deinterleaving : esram */ - /* defines timeout for mpeg lock depending on interleaver lenght of longest layer */ + /* defines timeout for mpeg lock depending on interleaver length of longest layer */ for (i = 0; i < 3; i++) { if (c->layer[i].interleaving >= deeper_interleaver) { dprintk("layer%i: time interleaver = %d ", i, c->layer[i].interleaving); diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c index d416c15691da..bf29a3f0e6f0 100644 --- a/drivers/media/dvb-frontends/drxk_hard.c +++ b/drivers/media/dvb-frontends/drxk_hard.c @@ -1191,7 +1191,7 @@ static int mpegts_configure_pins(struct drxk_state *state, bool mpeg_enable) goto error; if (state->m_enable_parallel == true) { - /* paralel -> enable MD1 to MD7 */ + /* parallel -> enable MD1 to MD7 */ status = write16(state, SIO_PDR_MD1_CFG__A, sio_pdr_mdx_cfg); if (status < 0) @@ -1428,7 +1428,7 @@ static int mpegts_stop(struct drxk_state *state) dprintk(1, "\n"); - /* Gracefull shutdown (byte boundaries) */ + /* Graceful shutdown (byte boundaries) */ status = read16(state, FEC_OC_SNC_MODE__A, &fec_oc_snc_mode); if (status < 0) goto error; @@ -2021,7 +2021,7 @@ static int mpegts_dto_setup(struct drxk_state *state, fec_oc_dto_burst_len = 204; } - /* Check serial or parrallel output */ + /* Check serial or parallel output */ fec_oc_reg_ipr_mode &= (~(FEC_OC_IPR_MODE_SERIAL__M)); if (state->m_enable_parallel == false) { /* MPEG data output is serial -> set ipr_mode[0] */ @@ -2908,7 +2908,7 @@ static int adc_synchronization(struct drxk_state *state) goto error; if (count == 1) { - /* Try sampling on a diffrent edge */ + /* Try sampling on a different edge */ u16 clk_neg = 0; status = read16(state, IQM_AF_CLKNEG__A, &clk_neg); @@ -3306,7 +3306,7 @@ static int dvbt_sc_command(struct drxk_state *state, if (status < 0) goto error; - /* Retreive results parameters from SC */ + /* Retrieve results parameters from SC */ switch (cmd) { /* All commands yielding 5 results */ /* All commands yielding 4 results */ @@ -3849,7 +3849,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz, break; } #if 0 - /* No hierachical channels support in BDA */ + /* No hierarchical channels support in BDA */ /* Priority (only for hierarchical channels) */ switch (channel->priority) { case DRX_PRIORITY_LOW: @@ -4081,7 +4081,7 @@ error: /*============================================================================*/ /** -* \brief Retreive lock status . +* \brief Retrieve lock status . * \param demod Pointer to demodulator instance. * \param lockStat Pointer to lock status structure. * \return DRXStatus_t. @@ -6174,7 +6174,7 @@ static int init_drxk(struct drxk_state *state) goto error; /* Stamp driver version number in SCU data RAM in BCD code - Done to enable field application engineers to retreive drxdriver version + Done to enable field application engineers to retrieve drxdriver version via I2C from SCU RAM. Not using SCU command interface for SCU register access since no microcode may be present. @@ -6399,7 +6399,7 @@ static int drxk_set_parameters(struct dvb_frontend *fe) fe->ops.tuner_ops.get_if_frequency(fe, &IF); start(state, 0, IF); - /* After set_frontend, stats aren't avaliable */ + /* After set_frontend, stats aren't available */ p->strength.stat[0].scale = FE_SCALE_RELATIVE; p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c index 7efb796c472c..50e8b63e5169 100644 --- a/drivers/media/dvb-frontends/rtl2830.c +++ b/drivers/media/dvb-frontends/rtl2830.c @@ -710,6 +710,7 @@ struct dvb_frontend *rtl2830_attach(const struct rtl2830_config *cfg, sizeof(priv->tuner_i2c_adapter.name)); priv->tuner_i2c_adapter.algo = &rtl2830_tuner_i2c_algo; priv->tuner_i2c_adapter.algo_data = NULL; + priv->tuner_i2c_adapter.dev.parent = &i2c->dev; i2c_set_adapdata(&priv->tuner_i2c_adapter, priv); if (i2c_add_adapter(&priv->tuner_i2c_adapter) < 0) { dev_err(&i2c->dev, diff --git a/drivers/media/i2c/adv7183_regs.h b/drivers/media/i2c/adv7183_regs.h index 4a5b7d211d2f..b253d400e817 100644 --- a/drivers/media/i2c/adv7183_regs.h +++ b/drivers/media/i2c/adv7183_regs.h @@ -52,9 +52,9 @@ #define ADV7183_VS_FIELD_CTRL_1 0x31 /* Vsync field control 1 */ #define ADV7183_VS_FIELD_CTRL_2 0x32 /* Vsync field control 2 */ #define ADV7183_VS_FIELD_CTRL_3 0x33 /* Vsync field control 3 */ -#define ADV7183_HS_POS_CTRL_1 0x34 /* Hsync positon control 1 */ -#define ADV7183_HS_POS_CTRL_2 0x35 /* Hsync positon control 2 */ -#define ADV7183_HS_POS_CTRL_3 0x36 /* Hsync positon control 3 */ +#define ADV7183_HS_POS_CTRL_1 0x34 /* Hsync position control 1 */ +#define ADV7183_HS_POS_CTRL_2 0x35 /* Hsync position control 2 */ +#define ADV7183_HS_POS_CTRL_3 0x36 /* Hsync position control 3 */ #define ADV7183_POLARITY 0x37 /* Polarity */ #define ADV7183_NTSC_COMB_CTRL 0x38 /* NTSC comb control */ #define ADV7183_PAL_COMB_CTRL 0x39 /* PAL comb control */ diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c index fbfdd2fc2a36..a324106b9f11 100644 --- a/drivers/media/i2c/adv7604.c +++ b/drivers/media/i2c/adv7604.c @@ -877,7 +877,7 @@ static void configure_custom_video_timings(struct v4l2_subdev *sd, break; case ADV7604_MODE_HDMI: /* set default prim_mode/vid_std for HDMI - accoring to [REF_03, c. 4.2] */ + according to [REF_03, c. 4.2] */ io_write(sd, 0x00, 0x02); /* video std */ io_write(sd, 0x01, 0x06); /* prim mode */ break; diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c index 22f729d66a96..b154f36740b4 100644 --- a/drivers/media/i2c/adv7842.c +++ b/drivers/media/i2c/adv7842.c @@ -1013,7 +1013,7 @@ static void configure_custom_video_timings(struct v4l2_subdev *sd, break; case ADV7842_MODE_HDMI: /* set default prim_mode/vid_std for HDMI - accoring to [REF_03, c. 4.2] */ + according to [REF_03, c. 4.2] */ io_write(sd, 0x00, 0x02); /* video std */ io_write(sd, 0x01, 0x06); /* prim mode */ break; diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c index 82bf5679da30..99ee456700f4 100644 --- a/drivers/media/i2c/ir-kbd-i2c.c +++ b/drivers/media/i2c/ir-kbd-i2c.c @@ -394,7 +394,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id) if (!rc) { /* - * If platform_data doesn't specify rc_dev, initilize it + * If platform_data doesn't specify rc_dev, initialize it * internally */ rc = rc_allocate_device(); diff --git a/drivers/media/i2c/m5mols/m5mols_controls.c b/drivers/media/i2c/m5mols/m5mols_controls.c index f34429e452ab..a60931e66312 100644 --- a/drivers/media/i2c/m5mols/m5mols_controls.c +++ b/drivers/media/i2c/m5mols/m5mols_controls.c @@ -544,7 +544,7 @@ int m5mols_init_controls(struct v4l2_subdev *sd) u16 zoom_step; int ret; - /* Determine the firmware dependant control range and step values */ + /* Determine the firmware dependent control range and step values */ ret = m5mols_read_u16(sd, AE_MAX_GAIN_MON, &exposure_max); if (ret < 0) return ret; diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c index 4734836fe5a4..1c2303d18bf4 100644 --- a/drivers/media/i2c/mt9p031.c +++ b/drivers/media/i2c/mt9p031.c @@ -19,6 +19,7 @@ #include <linux/i2c.h> #include <linux/log2.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/of_gpio.h> #include <linux/pm.h> #include <linux/regulator/consumer.h> diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c index 6fec9384d86e..e7f555cc827a 100644 --- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c +++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c @@ -1460,7 +1460,7 @@ static int s5c73m3_oif_registered(struct v4l2_subdev *sd) mutex_unlock(&state->lock); v4l2_dbg(1, s5c73m3_dbg, sd, "%s: Booting %s (%d)\n", - __func__, ret ? "failed" : "succeded", ret); + __func__, ret ? "failed" : "succeeded", ret); return ret; } diff --git a/drivers/media/i2c/s5c73m3/s5c73m3.h b/drivers/media/i2c/s5c73m3/s5c73m3.h index 9d2c08652246..9dfa516f6944 100644 --- a/drivers/media/i2c/s5c73m3/s5c73m3.h +++ b/drivers/media/i2c/s5c73m3/s5c73m3.h @@ -393,7 +393,7 @@ struct s5c73m3 { /* External master clock frequency */ u32 mclk_frequency; - /* Video bus type - MIPI-CSI2/paralell */ + /* Video bus type - MIPI-CSI2/parallel */ enum v4l2_mbus_type bus_type; const struct s5c73m3_frame_size *sensor_pix_size[2]; diff --git a/drivers/media/i2c/saa7115.c b/drivers/media/i2c/saa7115.c index 637d02634527..afdbcb045cee 100644 --- a/drivers/media/i2c/saa7115.c +++ b/drivers/media/i2c/saa7115.c @@ -1699,7 +1699,7 @@ static void saa711x_write_platform_data(struct saa711x_state *state, * the analog demod. * If the tuner is not found, it returns -ENODEV. * If auto-detection is disabled and the tuner doesn't match what it was - * requred, it returns -EINVAL and fills 'name'. + * required, it returns -EINVAL and fills 'name'. * If the chip is found, it returns the chip ID and fills 'name'. */ static int saa711x_detect_chip(struct i2c_client *client, diff --git a/drivers/media/i2c/soc_camera/ov5642.c b/drivers/media/i2c/soc_camera/ov5642.c index 0a5c5d4fedd6..d2daa6a8f272 100644 --- a/drivers/media/i2c/soc_camera/ov5642.c +++ b/drivers/media/i2c/soc_camera/ov5642.c @@ -642,7 +642,7 @@ static const struct ov5642_datafmt static int reg_read(struct i2c_client *client, u16 reg, u8 *val) { int ret; - /* We have 16-bit i2c addresses - care for endianess */ + /* We have 16-bit i2c addresses - care for endianness */ unsigned char data[2] = { reg >> 8, reg & 0xff }; ret = i2c_master_send(client, data, 2); diff --git a/drivers/media/i2c/ths7303.c b/drivers/media/i2c/ths7303.c index 42276d93624c..ed9ae8875348 100644 --- a/drivers/media/i2c/ths7303.c +++ b/drivers/media/i2c/ths7303.c @@ -83,7 +83,8 @@ static int ths7303_write(struct v4l2_subdev *sd, u8 reg, u8 val) } /* following function is used to set ths7303 */ -int ths7303_setval(struct v4l2_subdev *sd, enum ths7303_filter_mode mode) +static int ths7303_setval(struct v4l2_subdev *sd, + enum ths7303_filter_mode mode) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct ths7303_state *state = to_state(sd); diff --git a/drivers/media/i2c/wm8775.c b/drivers/media/i2c/wm8775.c index 3f584a7d0781..bee7946faa7c 100644 --- a/drivers/media/i2c/wm8775.c +++ b/drivers/media/i2c/wm8775.c @@ -130,12 +130,10 @@ static int wm8775_s_routing(struct v4l2_subdev *sd, return -EINVAL; } state->input = input; - if (!v4l2_ctrl_g_ctrl(state->mute)) + if (v4l2_ctrl_g_ctrl(state->mute)) return 0; if (!v4l2_ctrl_g_ctrl(state->vol)) return 0; - if (!v4l2_ctrl_g_ctrl(state->bal)) - return 0; wm8775_set_audio(sd, 1); return 0; } diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c index a3b1ee9c00d7..92a06fd85865 100644 --- a/drivers/media/pci/bt8xx/bttv-driver.c +++ b/drivers/media/pci/bt8xx/bttv-driver.c @@ -4182,7 +4182,8 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id) } btv->std = V4L2_STD_PAL; init_irqreg(btv); - v4l2_ctrl_handler_setup(hdl); + if (!bttv_tvcards[btv->c.type].no_video) + v4l2_ctrl_handler_setup(hdl); if (hdl->error) { result = hdl->error; goto fail2; diff --git a/drivers/media/pci/cx18/cx18-driver.h b/drivers/media/pci/cx18/cx18-driver.h index 2767c64df0c8..57f4688ea55b 100644 --- a/drivers/media/pci/cx18/cx18-driver.h +++ b/drivers/media/pci/cx18/cx18-driver.h @@ -262,7 +262,7 @@ struct cx18_options { }; /* per-mdl bit flags */ -#define CX18_F_M_NEED_SWAP 0 /* mdl buffer data must be endianess swapped */ +#define CX18_F_M_NEED_SWAP 0 /* mdl buffer data must be endianness swapped */ /* per-stream, s_flags */ #define CX18_F_S_CLAIMED 3 /* this stream is claimed */ diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c index e3fc2c71808a..95666eee7b27 100644 --- a/drivers/media/pci/cx23885/cx23885-417.c +++ b/drivers/media/pci/cx23885/cx23885-417.c @@ -427,7 +427,7 @@ int mc417_register_read(struct cx23885_dev *dev, u16 address, u32 *value) cx_write(MC417_RWD, regval); /* Transition RD to effect read transaction across bus. - * Transtion 0x5000 -> 0x9000 correct (RD/RDY -> WR/RDY)? + * Transition 0x5000 -> 0x9000 correct (RD/RDY -> WR/RDY)? * Should it be 0x9000 -> 0xF000 (also why is RDY being set, its * input only...) */ diff --git a/drivers/media/pci/pluto2/pluto2.c b/drivers/media/pci/pluto2/pluto2.c index 8164d74b46a4..655d6854a8d7 100644 --- a/drivers/media/pci/pluto2/pluto2.c +++ b/drivers/media/pci/pluto2/pluto2.c @@ -401,7 +401,7 @@ static int pluto_hw_init(struct pluto *pluto) /* set automatic LED control by FPGA */ pluto_rw(pluto, REG_MISC, MISC_ALED, MISC_ALED); - /* set data endianess */ + /* set data endianness */ #ifdef __LITTLE_ENDIAN pluto_rw(pluto, REG_PIDn(0), PID0_END, PID0_END); #else diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c index 57ef5456f1e8..1bf06970ca3e 100644 --- a/drivers/media/pci/saa7164/saa7164-core.c +++ b/drivers/media/pci/saa7164/saa7164-core.c @@ -1354,9 +1354,11 @@ static int saa7164_initdev(struct pci_dev *pci_dev, if (fw_debug) { dev->kthread = kthread_run(saa7164_thread_function, dev, "saa7164 debug"); - if (!dev->kthread) + if (IS_ERR(dev->kthread)) { + dev->kthread = NULL; printk(KERN_ERR "%s() Failed to create " "debug kernel thread\n", __func__); + } } } /* != BOARD_UNKNOWN */ diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c index bd72fb97fea5..61f3dbcc259f 100644 --- a/drivers/media/platform/coda.c +++ b/drivers/media/platform/coda.c @@ -1434,7 +1434,7 @@ static void coda_buf_queue(struct vb2_buffer *vb) if (q_data->fourcc == V4L2_PIX_FMT_H264 && vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { /* - * For backwards compatiblity, queuing an empty buffer marks + * For backwards compatibility, queuing an empty buffer marks * the stream end */ if (vb2_get_plane_payload(vb, 0) == 0) diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c index 3d66d88ea3a1..f7915695c907 100644 --- a/drivers/media/platform/exynos4-is/fimc-core.c +++ b/drivers/media/platform/exynos4-is/fimc-core.c @@ -1039,7 +1039,7 @@ static int fimc_runtime_resume(struct device *dev) dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state); - /* Enable clocks and perform basic initalization */ + /* Enable clocks and perform basic initialization */ clk_enable(fimc->clock[CLK_GATE]); fimc_hw_reset(fimc); diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c index 7a4ee4c0449d..c1bce170df6f 100644 --- a/drivers/media/platform/exynos4-is/media-dev.c +++ b/drivers/media/platform/exynos4-is/media-dev.c @@ -759,7 +759,7 @@ static int fimc_md_register_platform_entity(struct fimc_md *fmd, goto dev_unlock; drvdata = dev_get_drvdata(dev); - /* Some subdev didn't probe succesfully id drvdata is NULL */ + /* Some subdev didn't probe successfully id drvdata is NULL */ if (drvdata) { switch (plat_entity) { case IDX_FIMC: diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c index 36513e896413..65cab70fefcb 100644 --- a/drivers/media/platform/m2m-deinterlace.c +++ b/drivers/media/platform/m2m-deinterlace.c @@ -341,8 +341,7 @@ static void deinterlace_issue_dma(struct deinterlace_ctx *ctx, int op, ctx->xt->dir = DMA_MEM_TO_MEM; ctx->xt->src_sgl = false; ctx->xt->dst_sgl = true; - flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | - DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SKIP_SRC_UNMAP; + flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; tx = dmadev->device_prep_interleaved_dma(chan, ctx->xt, flags); if (tx == NULL) { diff --git a/drivers/media/platform/marvell-ccic/mmp-driver.c b/drivers/media/platform/marvell-ccic/mmp-driver.c index 3458fa0e2fd5..054507f16734 100644 --- a/drivers/media/platform/marvell-ccic/mmp-driver.c +++ b/drivers/media/platform/marvell-ccic/mmp-driver.c @@ -142,12 +142,6 @@ static int mmpcam_power_up(struct mcam_camera *mcam) struct mmp_camera *cam = mcam_to_cam(mcam); struct mmp_camera_platform_data *pdata; - if (mcam->bus_type == V4L2_MBUS_CSI2) { - cam->mipi_clk = devm_clk_get(mcam->dev, "mipi"); - if ((IS_ERR(cam->mipi_clk) && mcam->dphy[2] == 0)) - return PTR_ERR(cam->mipi_clk); - } - /* * Turn on power and clocks to the controller. */ @@ -186,12 +180,6 @@ static void mmpcam_power_down(struct mcam_camera *mcam) gpio_set_value(pdata->sensor_power_gpio, 0); gpio_set_value(pdata->sensor_reset_gpio, 0); - if (mcam->bus_type == V4L2_MBUS_CSI2 && !IS_ERR(cam->mipi_clk)) { - if (cam->mipi_clk) - devm_clk_put(mcam->dev, cam->mipi_clk); - cam->mipi_clk = NULL; - } - mcam_clk_disable(mcam); } @@ -292,8 +280,9 @@ void mmpcam_calc_dphy(struct mcam_camera *mcam) return; /* get the escape clk, this is hard coded */ + clk_prepare_enable(cam->mipi_clk); tx_clk_esc = (clk_get_rate(cam->mipi_clk) / 1000000) / 12; - + clk_disable_unprepare(cam->mipi_clk); /* * dphy[2] - CSI2_DPHY6: * bit 0 ~ bit 7: CK Term Enable @@ -325,19 +314,6 @@ static irqreturn_t mmpcam_irq(int irq, void *data) return IRQ_RETVAL(handled); } -static void mcam_deinit_clk(struct mcam_camera *mcam) -{ - unsigned int i; - - for (i = 0; i < NR_MCAM_CLK; i++) { - if (!IS_ERR(mcam->clk[i])) { - if (mcam->clk[i]) - devm_clk_put(mcam->dev, mcam->clk[i]); - } - mcam->clk[i] = NULL; - } -} - static void mcam_init_clk(struct mcam_camera *mcam) { unsigned int i; @@ -371,7 +347,6 @@ static int mmpcam_probe(struct platform_device *pdev) if (cam == NULL) return -ENOMEM; cam->pdev = pdev; - cam->mipi_clk = NULL; INIT_LIST_HEAD(&cam->devlist); mcam = &cam->mcam; @@ -387,6 +362,11 @@ static int mmpcam_probe(struct platform_device *pdev) mcam->mclk_div = pdata->mclk_div; mcam->bus_type = pdata->bus_type; mcam->dphy = pdata->dphy; + if (mcam->bus_type == V4L2_MBUS_CSI2) { + cam->mipi_clk = devm_clk_get(mcam->dev, "mipi"); + if ((IS_ERR(cam->mipi_clk) && mcam->dphy[2] == 0)) + return PTR_ERR(cam->mipi_clk); + } mcam->mipi_enabled = false; mcam->lane = pdata->lane; mcam->chip_id = MCAM_ARMADA610; @@ -444,7 +424,7 @@ static int mmpcam_probe(struct platform_device *pdev) */ ret = mmpcam_power_up(mcam); if (ret) - goto out_deinit_clk; + return ret; ret = mccic_register(mcam); if (ret) goto out_power_down; @@ -469,8 +449,6 @@ out_unregister: mccic_shutdown(mcam); out_power_down: mmpcam_power_down(mcam); -out_deinit_clk: - mcam_deinit_clk(mcam); return ret; } @@ -478,18 +456,10 @@ out_deinit_clk: static int mmpcam_remove(struct mmp_camera *cam) { struct mcam_camera *mcam = &cam->mcam; - struct mmp_camera_platform_data *pdata; mmpcam_remove_device(cam); mccic_shutdown(mcam); mmpcam_power_down(mcam); - pdata = cam->pdev->dev.platform_data; - gpio_free(pdata->sensor_reset_gpio); - gpio_free(pdata->sensor_power_gpio); - mcam_deinit_clk(mcam); - iounmap(cam->power_regs); - iounmap(mcam->regs); - kfree(cam); return 0; } diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c index 1c3608039663..561bce8ffb1b 100644 --- a/drivers/media/platform/omap3isp/isp.c +++ b/drivers/media/platform/omap3isp/isp.c @@ -1673,7 +1673,7 @@ void omap3isp_print_status(struct isp_device *isp) * ISP clocks get disabled in suspend(). Similarly, the clocks are reenabled in * resume(), and the the pipelines are restarted in complete(). * - * TODO: PM dependencies between the ISP and sensors are not modeled explicitly + * TODO: PM dependencies between the ISP and sensors are not modelled explicitly * yet. */ static int isp_pm_prepare(struct device *dev) diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c index a908d006f527..f6304bb074f5 100644 --- a/drivers/media/platform/omap3isp/ispvideo.c +++ b/drivers/media/platform/omap3isp/ispvideo.c @@ -339,14 +339,11 @@ __isp_video_get_format(struct isp_video *video, struct v4l2_format *format) if (subdev == NULL) return -EINVAL; - mutex_lock(&video->mutex); - fmt.pad = pad; fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; - ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt); - if (ret == -ENOIOCTLCMD) - ret = -EINVAL; + mutex_lock(&video->mutex); + ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt); mutex_unlock(&video->mutex); if (ret) diff --git a/drivers/media/platform/s5p-mfc/regs-mfc.h b/drivers/media/platform/s5p-mfc/regs-mfc.h index 9319e93599ae..6ccc3f8c122a 100644 --- a/drivers/media/platform/s5p-mfc/regs-mfc.h +++ b/drivers/media/platform/s5p-mfc/regs-mfc.h @@ -382,7 +382,7 @@ #define S5P_FIMV_R2H_CMD_EDFU_INIT_RET 16 #define S5P_FIMV_R2H_CMD_ERR_RET 32 -/* Dummy definition for MFCv6 compatibilty */ +/* Dummy definition for MFCv6 compatibility */ #define S5P_FIMV_CODEC_H264_MVC_DEC -1 #define S5P_FIMV_R2H_CMD_FIELD_DONE_RET -1 #define S5P_FIMV_MFC_RESET -1 diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c index 5f2c4ad6c2cb..e46067a57853 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c @@ -239,7 +239,7 @@ static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx) frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev); /* Copy timestamp / timecode from decoded src to dst and set - appropraite flags */ + appropriate flags */ src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); list_for_each_entry(dst_buf, &ctx->dst_queue, list) { if (vb2_dma_contig_plane_dma_addr(dst_buf->b, 0) == dec_y_addr) { @@ -428,7 +428,7 @@ static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev, case MFCINST_FINISHING: case MFCINST_FINISHED: case MFCINST_RUNNING: - /* It is higly probable that an error occured + /* It is highly probable that an error occurred * while decoding a frame */ clear_work_bit(ctx); ctx->state = MFCINST_ERROR; @@ -611,7 +611,7 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv) mfc_debug(1, "Int reason: %d (err: %08x)\n", reason, err); switch (reason) { case S5P_MFC_R2H_CMD_ERR_RET: - /* An error has occured */ + /* An error has occurred */ if (ctx->state == MFCINST_RUNNING && s5p_mfc_hw_call(dev->mfc_ops, err_dec, err) >= dev->warn_start) @@ -840,7 +840,7 @@ static int s5p_mfc_open(struct file *file) mutex_unlock(&dev->mfc_mutex); mfc_debug_leave(); return ret; - /* Deinit when failure occured */ + /* Deinit when failure occurred */ err_queue_init: if (dev->num_inst == 1) s5p_mfc_deinit_hw(dev); @@ -881,14 +881,14 @@ static int s5p_mfc_release(struct file *file) /* Mark context as idle */ clear_work_bit_irqsave(ctx); /* If instance was initialised then - * return instance and free reosurces */ + * return instance and free resources */ if (ctx->inst_no != MFC_NO_INSTANCE_SET) { mfc_debug(2, "Has to free instance\n"); ctx->state = MFCINST_RETURN_INST; set_work_bit_irqsave(ctx); s5p_mfc_clean_ctx_int_flags(ctx); s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); - /* Wait until instance is returned or timeout occured */ + /* Wait until instance is returned or timeout occurred */ if (s5p_mfc_wait_for_done_ctx (ctx, S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET, 0)) { s5p_mfc_clock_off(); diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c index 7cab6849fb5b..2475a3c9a0a6 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c @@ -69,7 +69,7 @@ int s5p_mfc_alloc_firmware(struct s5p_mfc_dev *dev) } else { /* In this case bank2 can point to the same address as bank1. - * Firmware will always occupy the beggining of this area so it is + * Firmware will always occupy the beginning of this area so it is * impossible having a video frame buffer with zero address. */ dev->bank2 = dev->bank1; } diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h index 04e6490a45be..fb2acc53112a 100644 --- a/drivers/media/platform/s5p-tv/mixer.h +++ b/drivers/media/platform/s5p-tv/mixer.h @@ -65,7 +65,7 @@ struct mxr_format { int num_subframes; /** specifies to which subframe belong given plane */ int plane2subframe[MXR_MAX_PLANES]; - /** internal code, driver dependant */ + /** internal code, driver dependent */ unsigned long cookie; }; diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c index 641b1f071e06..81b97db111d8 100644 --- a/drivers/media/platform/s5p-tv/mixer_video.c +++ b/drivers/media/platform/s5p-tv/mixer_video.c @@ -528,7 +528,7 @@ static int mxr_s_dv_timings(struct file *file, void *fh, mutex_lock(&mdev->mutex); /* timings change cannot be done while there is an entity - * dependant on output configuration + * dependent on output configuration */ if (mdev->n_output > 0) { mutex_unlock(&mdev->mutex); @@ -585,7 +585,7 @@ static int mxr_s_std(struct file *file, void *fh, v4l2_std_id norm) mutex_lock(&mdev->mutex); /* standard change cannot be done while there is an entity - * dependant on output configuration + * dependent on output configuration */ if (mdev->n_output > 0) { mutex_unlock(&mdev->mutex); diff --git a/drivers/media/platform/soc_camera/omap1_camera.c b/drivers/media/platform/soc_camera/omap1_camera.c index 6769193c7c7b..74ce8b6b79fa 100644 --- a/drivers/media/platform/soc_camera/omap1_camera.c +++ b/drivers/media/platform/soc_camera/omap1_camera.c @@ -1495,7 +1495,7 @@ static int omap1_cam_set_bus_param(struct soc_camera_device *icd) if (ctrlclock & LCLK_EN) CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock); - /* select bus endianess */ + /* select bus endianness */ xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); fmt = xlate->host_fmt; diff --git a/drivers/media/platform/timblogiw.c b/drivers/media/platform/timblogiw.c index 6a74ce040d28..ccdadd623a3a 100644 --- a/drivers/media/platform/timblogiw.c +++ b/drivers/media/platform/timblogiw.c @@ -565,7 +565,7 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb) desc = dmaengine_prep_slave_sg(fh->chan, buf->sg, sg_elems, DMA_DEV_TO_MEM, - DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); + DMA_PREP_INTERRUPT); if (!desc) { spin_lock_irq(&fh->queue_lock); list_del_init(&vb->queue); diff --git a/drivers/media/platform/vivi.c b/drivers/media/platform/vivi.c index 1d3f11965196..2d4e73b45c5e 100644 --- a/drivers/media/platform/vivi.c +++ b/drivers/media/platform/vivi.c @@ -1108,7 +1108,7 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i) return 0; } -/* timeperframe is arbitrary and continous */ +/* timeperframe is arbitrary and continuous */ static int vidioc_enum_frameintervals(struct file *file, void *priv, struct v4l2_frmivalenum *fival) { @@ -1125,7 +1125,7 @@ static int vidioc_enum_frameintervals(struct file *file, void *priv, fival->type = V4L2_FRMIVAL_TYPE_CONTINUOUS; - /* fill in stepwise (step=1.0 is requred by V4L2 spec) */ + /* fill in stepwise (step=1.0 is required by V4L2 spec) */ fival->stepwise.min = tpf_min; fival->stepwise.max = tpf_max; fival->stepwise.step = (struct v4l2_fract) {1, 1}; diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c index 1c9e771aa15c..d16bf0f41e24 100644 --- a/drivers/media/platform/vsp1/vsp1_drv.c +++ b/drivers/media/platform/vsp1/vsp1_drv.c @@ -323,7 +323,7 @@ static void vsp1_clocks_disable(struct vsp1_device *vsp1) * Increment the VSP1 reference count and initialize the device if the first * reference is taken. * - * Return a pointer to the VSP1 device or NULL if an error occured. + * Return a pointer to the VSP1 device or NULL if an error occurred. */ struct vsp1_device *vsp1_device_get(struct vsp1_device *vsp1) { diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c index 714c53ef6c11..4b0ac07af662 100644 --- a/drivers/media/platform/vsp1/vsp1_video.c +++ b/drivers/media/platform/vsp1/vsp1_video.c @@ -1026,8 +1026,10 @@ int vsp1_video_init(struct vsp1_video *video, struct vsp1_entity *rwpf) /* ... and the buffers queue... */ video->alloc_ctx = vb2_dma_contig_init_ctx(video->vsp1->dev); - if (IS_ERR(video->alloc_ctx)) + if (IS_ERR(video->alloc_ctx)) { + ret = PTR_ERR(video->alloc_ctx); goto error; + } video->queue.type = video->type; video->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c index 3db8a8cfe1a8..050b3bb96fec 100644 --- a/drivers/media/radio/radio-shark.c +++ b/drivers/media/radio/radio-shark.c @@ -271,8 +271,7 @@ static void shark_unregister_leds(struct shark_device *shark) cancel_work_sync(&shark->led_work); } -#ifdef CONFIG_PM -static void shark_resume_leds(struct shark_device *shark) +static inline void shark_resume_leds(struct shark_device *shark) { if (test_bit(BLUE_IS_PULSE, &shark->brightness_new)) set_bit(BLUE_PULSE_LED, &shark->brightness_new); @@ -281,7 +280,6 @@ static void shark_resume_leds(struct shark_device *shark) set_bit(RED_LED, &shark->brightness_new); schedule_work(&shark->led_work); } -#endif #else static int shark_register_leds(struct shark_device *shark, struct device *dev) { diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c index d86d90dab8bf..8654e0dc5c95 100644 --- a/drivers/media/radio/radio-shark2.c +++ b/drivers/media/radio/radio-shark2.c @@ -237,8 +237,7 @@ static void shark_unregister_leds(struct shark_device *shark) cancel_work_sync(&shark->led_work); } -#ifdef CONFIG_PM -static void shark_resume_leds(struct shark_device *shark) +static inline void shark_resume_leds(struct shark_device *shark) { int i; @@ -247,7 +246,6 @@ static void shark_resume_leds(struct shark_device *shark) schedule_work(&shark->led_work); } -#endif #else static int shark_register_leds(struct shark_device *shark, struct device *dev) { diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c index 9c9084cb99f7..2fd9009f8663 100644 --- a/drivers/media/radio/radio-si476x.c +++ b/drivers/media/radio/radio-si476x.c @@ -268,8 +268,8 @@ struct si476x_radio; * * @tune_freq: Tune chip to a specific frequency * @seek_start: Star station seeking - * @rsq_status: Get Recieved Signal Quality(RSQ) status - * @rds_blckcnt: Get recived RDS blocks count + * @rsq_status: Get Received Signal Quality(RSQ) status + * @rds_blckcnt: Get received RDS blocks count * @phase_diversity: Change phase diversity mode of the tuner * @phase_div_status: Get phase diversity mode status * @acf_status: Get the status of Automatically Controlled diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c index 036e2f54f4db..3ed1f5669f79 100644 --- a/drivers/media/radio/radio-tea5764.c +++ b/drivers/media/radio/radio-tea5764.c @@ -356,7 +356,7 @@ static int vidioc_s_frequency(struct file *file, void *priv, So we keep it as-is. */ return -EINVAL; } - clamp(freq, FREQ_MIN * FREQ_MUL, FREQ_MAX * FREQ_MUL); + freq = clamp(freq, FREQ_MIN * FREQ_MUL, FREQ_MAX * FREQ_MUL); tea5764_power_up(radio); tea5764_tune(radio, (freq * 125) / 2); return 0; diff --git a/drivers/media/radio/tef6862.c b/drivers/media/radio/tef6862.c index 69e3245a58a0..a9319a24c7ef 100644 --- a/drivers/media/radio/tef6862.c +++ b/drivers/media/radio/tef6862.c @@ -112,7 +112,7 @@ static int tef6862_s_frequency(struct v4l2_subdev *sd, const struct v4l2_frequen if (f->tuner != 0) return -EINVAL; - clamp(freq, TEF6862_LO_FREQ, TEF6862_HI_FREQ); + freq = clamp(freq, TEF6862_LO_FREQ, TEF6862_HI_FREQ); pll = 1964 + ((freq - TEF6862_LO_FREQ) * 20) / FREQ_MUL; i2cmsg[0] = (MSA_MODE_PRESET << MSA_MODE_SHIFT) | WM_SUB_PLLM; i2cmsg[1] = (pll >> 8) & 0xff; diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c index 72e3fa652481..f329485c6629 100644 --- a/drivers/media/rc/imon.c +++ b/drivers/media/rc/imon.c @@ -1370,7 +1370,7 @@ static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf) * 0x68nnnnB7 to 0x6AnnnnB7, the left mouse button generates * 0x688301b7 and the right one 0x688481b7. All other keys generate * 0x2nnnnnnn. Position coordinate is encoded in buf[1] and buf[2] with - * reversed endianess. Extract direction from buffer, rotate endianess, + * reversed endianness. Extract direction from buffer, rotate endianness, * adjust sign and feed the values into stabilize(). The resulting codes * will be 0x01008000, 0x01007F00, which match the newer devices. */ diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c index 094484fac94c..a5d4f883d053 100644 --- a/drivers/media/rc/redrat3.c +++ b/drivers/media/rc/redrat3.c @@ -118,7 +118,7 @@ static int debug; #define RR3_IR_IO_LENGTH_FUZZ 0x04 /* Timeout for end of signal detection */ #define RR3_IR_IO_SIG_TIMEOUT 0x05 -/* Minumum value for pause recognition. */ +/* Minimum value for pause recognition. */ #define RR3_IR_IO_MIN_PAUSE 0x06 /* Clock freq. of EZ-USB chip */ diff --git a/drivers/media/tuners/mt2063.c b/drivers/media/tuners/mt2063.c index 2e1a02e360ff..20cca405bf45 100644 --- a/drivers/media/tuners/mt2063.c +++ b/drivers/media/tuners/mt2063.c @@ -1195,7 +1195,7 @@ static u32 mt2063_set_dnc_output_enable(struct mt2063_state *state, * DNC Output is selected, the other is always off) * * @state: ptr to mt2063_state structure - * @Mode: desired reciever delivery system + * @Mode: desired receiver delivery system * * Note: Register cache must be valid for it to work */ @@ -2119,7 +2119,7 @@ static int mt2063_set_analog_params(struct dvb_frontend *fe, /* * As defined on EN 300 429, the DVB-C roll-off factor is 0.15. - * So, the amount of the needed bandwith is given by: + * So, the amount of the needed bandwidth is given by: * Bw = Symbol_rate * (1 + 0.15) * As such, the maximum symbol rate supported by 6 MHz is given by: * max_symbol_rate = 6 MHz / 1.15 = 5217391 Bauds diff --git a/drivers/media/tuners/tuner-xc2028-types.h b/drivers/media/tuners/tuner-xc2028-types.h index 74dc46a71f64..7e4798783db7 100644 --- a/drivers/media/tuners/tuner-xc2028-types.h +++ b/drivers/media/tuners/tuner-xc2028-types.h @@ -119,7 +119,7 @@ #define V4L2_STD_A2 (V4L2_STD_A2_A | V4L2_STD_A2_B) #define V4L2_STD_NICAM (V4L2_STD_NICAM_A | V4L2_STD_NICAM_B) -/* To preserve backward compatibilty, +/* To preserve backward compatibility, (std & V4L2_STD_AUDIO) = 0 means that ALL audio stds are supported */ diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c index e9d017bea377..528cce958a82 100644 --- a/drivers/media/usb/cx231xx/cx231xx-cards.c +++ b/drivers/media/usb/cx231xx/cx231xx-cards.c @@ -1412,8 +1412,8 @@ err_v4l2: usb_set_intfdata(interface, NULL); err_if: usb_put_dev(udev); - kfree(dev); clear_bit(dev->devno, &cx231xx_devused); + kfree(dev); return retval; } diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c index c8fcd78425bd..8f9b2cea88f0 100644 --- a/drivers/media/usb/dvb-usb-v2/af9035.c +++ b/drivers/media/usb/dvb-usb-v2/af9035.c @@ -131,7 +131,7 @@ static int af9035_wr_regs(struct dvb_usb_device *d, u32 reg, u8 *val, int len) { u8 wbuf[MAX_XFER_SIZE]; u8 mbox = (reg >> 16) & 0xff; - struct usb_req req = { CMD_MEM_WR, mbox, sizeof(wbuf), wbuf, 0, NULL }; + struct usb_req req = { CMD_MEM_WR, mbox, 6 + len, wbuf, 0, NULL }; if (6 + len > sizeof(wbuf)) { dev_warn(&d->udev->dev, "%s: i2c wr: len=%d is too big!\n", @@ -238,14 +238,15 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap, } else { /* I2C */ u8 buf[MAX_XFER_SIZE]; - struct usb_req req = { CMD_I2C_RD, 0, sizeof(buf), + struct usb_req req = { CMD_I2C_RD, 0, 5 + msg[0].len, buf, msg[1].len, msg[1].buf }; if (5 + msg[0].len > sizeof(buf)) { dev_warn(&d->udev->dev, "%s: i2c xfer: len=%d is too big!\n", KBUILD_MODNAME, msg[0].len); - return -EOPNOTSUPP; + ret = -EOPNOTSUPP; + goto unlock; } req.mbox |= ((msg[0].addr & 0x80) >> 3); buf[0] = msg[1].len; @@ -274,14 +275,15 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap, } else { /* I2C */ u8 buf[MAX_XFER_SIZE]; - struct usb_req req = { CMD_I2C_WR, 0, sizeof(buf), buf, - 0, NULL }; + struct usb_req req = { CMD_I2C_WR, 0, 5 + msg[0].len, + buf, 0, NULL }; if (5 + msg[0].len > sizeof(buf)) { dev_warn(&d->udev->dev, "%s: i2c xfer: len=%d is too big!\n", KBUILD_MODNAME, msg[0].len); - return -EOPNOTSUPP; + ret = -EOPNOTSUPP; + goto unlock; } req.mbox |= ((msg[0].addr & 0x80) >> 3); buf[0] = msg[0].len; @@ -319,6 +321,7 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap, ret = -EOPNOTSUPP; } +unlock: mutex_unlock(&d->i2c_mutex); if (ret < 0) @@ -1534,6 +1537,8 @@ static const struct usb_device_id af9035_id_table[] = { /* XXX: that same ID [0ccd:0099] is used by af9015 driver too */ { DVB_USB_DEVICE(USB_VID_TERRATEC, 0x0099, &af9035_props, "TerraTec Cinergy T Stick Dual RC (rev. 2)", NULL) }, + { DVB_USB_DEVICE(USB_VID_LEADTEK, 0x6a05, + &af9035_props, "Leadtek WinFast DTV Dongle Dual", NULL) }, { } }; MODULE_DEVICE_TABLE(usb, af9035_id_table); diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c index 2627553f7de1..08240e498451 100644 --- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c +++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c @@ -266,7 +266,7 @@ static int mxl111sf_adap_fe_init(struct dvb_frontend *fe) struct mxl111sf_adap_state *adap_state = &state->adap_state[fe->id]; int err; - /* exit if we didnt initialize the driver yet */ + /* exit if we didn't initialize the driver yet */ if (!state->chip_id) { mxl_debug("driver not yet initialized, exit."); goto fail; @@ -322,7 +322,7 @@ static int mxl111sf_adap_fe_sleep(struct dvb_frontend *fe) struct mxl111sf_adap_state *adap_state = &state->adap_state[fe->id]; int err; - /* exit if we didnt initialize the driver yet */ + /* exit if we didn't initialize the driver yet */ if (!state->chip_id) { mxl_debug("driver not yet initialized, exit."); goto fail; diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c index 40832a1aef6c..98d24aefb640 100644 --- a/drivers/media/usb/dvb-usb/technisat-usb2.c +++ b/drivers/media/usb/dvb-usb/technisat-usb2.c @@ -102,7 +102,7 @@ static int technisat_usb2_i2c_access(struct usb_device *udev, if (rxlen > 62) { err("i2c RX buffer can't exceed 62 bytes (dev 0x%02x)", device_addr); - txlen = 62; + rxlen = 62; } b[0] = I2C_SPEED_100KHZ_BIT; diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c index fc5d60efd4ab..dd19c9ff76e0 100644 --- a/drivers/media/usb/em28xx/em28xx-video.c +++ b/drivers/media/usb/em28xx/em28xx-video.c @@ -1664,8 +1664,8 @@ static int em28xx_v4l2_close(struct file *filp) em28xx_videodbg("users=%d\n", dev->users); - mutex_lock(&dev->lock); vb2_fop_release(filp); + mutex_lock(&dev->lock); if (dev->users == 1) { /* the device is already disconnect, diff --git a/drivers/media/usb/gspca/gl860/gl860.c b/drivers/media/usb/gspca/gl860/gl860.c index cb1e64ca59c9..cea8d7f51c3c 100644 --- a/drivers/media/usb/gspca/gl860/gl860.c +++ b/drivers/media/usb/gspca/gl860/gl860.c @@ -438,7 +438,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, s32 nToSkip = sd->swapRB * (gspca_dev->cam.cam_mode[mode].bytesperline + 1); - /* Test only against 0202h, so endianess does not matter */ + /* Test only against 0202h, so endianness does not matter */ switch (*(s16 *) data) { case 0x0202: /* End of frame, start a new one */ gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); diff --git a/drivers/media/usb/gspca/pac207.c b/drivers/media/usb/gspca/pac207.c index cd79c180f67b..07529e5a0c56 100644 --- a/drivers/media/usb/gspca/pac207.c +++ b/drivers/media/usb/gspca/pac207.c @@ -416,7 +416,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, #if IS_ENABLED(CONFIG_INPUT) static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* interrupt packet data */ - int len) /* interrput packet length */ + int len) /* interrupt packet length */ { int ret = -EINVAL; diff --git a/drivers/media/usb/gspca/pac7302.c b/drivers/media/usb/gspca/pac7302.c index a91509643563..2fd1c5e31a0f 100644 --- a/drivers/media/usb/gspca/pac7302.c +++ b/drivers/media/usb/gspca/pac7302.c @@ -874,7 +874,7 @@ static int sd_dbg_s_register(struct gspca_dev *gspca_dev, #if IS_ENABLED(CONFIG_INPUT) static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* interrupt packet data */ - int len) /* interrput packet length */ + int len) /* interrupt packet length */ { int ret = -EINVAL; u8 data0, data1; diff --git a/drivers/media/usb/gspca/stk1135.c b/drivers/media/usb/gspca/stk1135.c index 1fc80af2a189..48234c9a8b6c 100644 --- a/drivers/media/usb/gspca/stk1135.c +++ b/drivers/media/usb/gspca/stk1135.c @@ -361,6 +361,9 @@ static void stk1135_configure_clock(struct gspca_dev *gspca_dev) /* set serial interface clock divider (30MHz/0x1f*16+2) = 60240 kHz) */ reg_w(gspca_dev, STK1135_REG_SICTL + 2, 0x1f); + + /* wait a while for sensor to catch up */ + udelay(1000); } static void stk1135_camera_disable(struct gspca_dev *gspca_dev) diff --git a/drivers/media/usb/gspca/stv0680.c b/drivers/media/usb/gspca/stv0680.c index 9c0827631b9c..7f94ec74282e 100644 --- a/drivers/media/usb/gspca/stv0680.c +++ b/drivers/media/usb/gspca/stv0680.c @@ -139,7 +139,7 @@ static int sd_config(struct gspca_dev *gspca_dev, struct sd *sd = (struct sd *) gspca_dev; struct cam *cam = &gspca_dev->cam; - /* Give the camera some time to settle, otherwise initalization will + /* Give the camera some time to settle, otherwise initialization will fail on hotplug, and yes it really needs a full second. */ msleep(1000); diff --git a/drivers/media/usb/gspca/sunplus.c b/drivers/media/usb/gspca/sunplus.c index a517d185febe..46c9f2229a18 100644 --- a/drivers/media/usb/gspca/sunplus.c +++ b/drivers/media/usb/gspca/sunplus.c @@ -1027,6 +1027,7 @@ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x055f, 0xc650), BS(SPCA533, 0)}, {USB_DEVICE(0x05da, 0x1018), BS(SPCA504B, 0)}, {USB_DEVICE(0x06d6, 0x0031), BS(SPCA533, 0)}, + {USB_DEVICE(0x06d6, 0x0041), BS(SPCA504B, 0)}, {USB_DEVICE(0x0733, 0x1311), BS(SPCA533, 0)}, {USB_DEVICE(0x0733, 0x1314), BS(SPCA533, 0)}, {USB_DEVICE(0x0733, 0x2211), BS(SPCA533, 0)}, diff --git a/drivers/media/usb/gspca/zc3xx.c b/drivers/media/usb/gspca/zc3xx.c index 7b95d8e88a20..d3e1b6d8bf49 100644 --- a/drivers/media/usb/gspca/zc3xx.c +++ b/drivers/media/usb/gspca/zc3xx.c @@ -6905,7 +6905,7 @@ static int sd_get_jcomp(struct gspca_dev *gspca_dev, #if IS_ENABLED(CONFIG_INPUT) static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* interrupt packet data */ - int len) /* interrput packet length */ + int len) /* interrupt packet length */ { if (len == 8 && data[4] == 1) { input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1); diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c index 77bbf7889659..78c9bc8e7f56 100644 --- a/drivers/media/usb/pwc/pwc-if.c +++ b/drivers/media/usb/pwc/pwc-if.c @@ -1039,7 +1039,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id /* Set the leds off */ pwc_set_leds(pdev, 0, 0); - /* Setup intial videomode */ + /* Setup initial videomode */ rc = pwc_set_video_mode(pdev, MAX_WIDTH, MAX_HEIGHT, V4L2_PIX_FMT_YUV420, 30, &compression, 1); if (rc) diff --git a/drivers/media/usb/usbtv/usbtv.c b/drivers/media/usb/usbtv/usbtv.c index 8a505a90d318..6222a4ab1e00 100644 --- a/drivers/media/usb/usbtv/usbtv.c +++ b/drivers/media/usb/usbtv/usbtv.c @@ -50,13 +50,8 @@ #define USBTV_ISOC_TRANSFERS 16 #define USBTV_ISOC_PACKETS 8 -#define USBTV_WIDTH 720 -#define USBTV_HEIGHT 480 - #define USBTV_CHUNK_SIZE 256 #define USBTV_CHUNK 240 -#define USBTV_CHUNKS (USBTV_WIDTH * USBTV_HEIGHT \ - / 4 / USBTV_CHUNK) /* Chunk header. */ #define USBTV_MAGIC_OK(chunk) ((be32_to_cpu(chunk[0]) & 0xff000000) \ @@ -65,6 +60,27 @@ #define USBTV_ODD(chunk) ((be32_to_cpu(chunk[0]) & 0x0000f000) >> 15) #define USBTV_CHUNK_NO(chunk) (be32_to_cpu(chunk[0]) & 0x00000fff) +#define USBTV_TV_STD (V4L2_STD_525_60 | V4L2_STD_PAL) + +/* parameters for supported TV norms */ +struct usbtv_norm_params { + v4l2_std_id norm; + int cap_width, cap_height; +}; + +static struct usbtv_norm_params norm_params[] = { + { + .norm = V4L2_STD_525_60, + .cap_width = 720, + .cap_height = 480, + }, + { + .norm = V4L2_STD_PAL, + .cap_width = 720, + .cap_height = 576, + } +}; + /* A single videobuf2 frame buffer. */ struct usbtv_buf { struct vb2_buffer vb; @@ -94,11 +110,38 @@ struct usbtv { USBTV_COMPOSITE_INPUT, USBTV_SVIDEO_INPUT, } input; + v4l2_std_id norm; + int width, height; + int n_chunks; int iso_size; unsigned int sequence; struct urb *isoc_urbs[USBTV_ISOC_TRANSFERS]; }; +static int usbtv_configure_for_norm(struct usbtv *usbtv, v4l2_std_id norm) +{ + int i, ret = 0; + struct usbtv_norm_params *params = NULL; + + for (i = 0; i < ARRAY_SIZE(norm_params); i++) { + if (norm_params[i].norm & norm) { + params = &norm_params[i]; + break; + } + } + + if (params) { + usbtv->width = params->cap_width; + usbtv->height = params->cap_height; + usbtv->n_chunks = usbtv->width * usbtv->height + / 4 / USBTV_CHUNK; + usbtv->norm = params->norm; + } else + ret = -EINVAL; + + return ret; +} + static int usbtv_set_regs(struct usbtv *usbtv, const u16 regs[][2], int size) { int ret; @@ -158,6 +201,57 @@ static int usbtv_select_input(struct usbtv *usbtv, int input) return ret; } +static int usbtv_select_norm(struct usbtv *usbtv, v4l2_std_id norm) +{ + int ret; + static const u16 pal[][2] = { + { USBTV_BASE + 0x001a, 0x0068 }, + { USBTV_BASE + 0x010e, 0x0072 }, + { USBTV_BASE + 0x010f, 0x00a2 }, + { USBTV_BASE + 0x0112, 0x00b0 }, + { USBTV_BASE + 0x0117, 0x0001 }, + { USBTV_BASE + 0x0118, 0x002c }, + { USBTV_BASE + 0x012d, 0x0010 }, + { USBTV_BASE + 0x012f, 0x0020 }, + { USBTV_BASE + 0x024f, 0x0002 }, + { USBTV_BASE + 0x0254, 0x0059 }, + { USBTV_BASE + 0x025a, 0x0016 }, + { USBTV_BASE + 0x025b, 0x0035 }, + { USBTV_BASE + 0x0263, 0x0017 }, + { USBTV_BASE + 0x0266, 0x0016 }, + { USBTV_BASE + 0x0267, 0x0036 } + }; + + static const u16 ntsc[][2] = { + { USBTV_BASE + 0x001a, 0x0079 }, + { USBTV_BASE + 0x010e, 0x0068 }, + { USBTV_BASE + 0x010f, 0x009c }, + { USBTV_BASE + 0x0112, 0x00f0 }, + { USBTV_BASE + 0x0117, 0x0000 }, + { USBTV_BASE + 0x0118, 0x00fc }, + { USBTV_BASE + 0x012d, 0x0004 }, + { USBTV_BASE + 0x012f, 0x0008 }, + { USBTV_BASE + 0x024f, 0x0001 }, + { USBTV_BASE + 0x0254, 0x005f }, + { USBTV_BASE + 0x025a, 0x0012 }, + { USBTV_BASE + 0x025b, 0x0001 }, + { USBTV_BASE + 0x0263, 0x001c }, + { USBTV_BASE + 0x0266, 0x0011 }, + { USBTV_BASE + 0x0267, 0x0005 } + }; + + ret = usbtv_configure_for_norm(usbtv, norm); + + if (!ret) { + if (norm & V4L2_STD_525_60) + ret = usbtv_set_regs(usbtv, ntsc, ARRAY_SIZE(ntsc)); + else if (norm & V4L2_STD_PAL) + ret = usbtv_set_regs(usbtv, pal, ARRAY_SIZE(pal)); + } + + return ret; +} + static int usbtv_setup_capture(struct usbtv *usbtv) { int ret; @@ -225,26 +319,11 @@ static int usbtv_setup_capture(struct usbtv *usbtv) { USBTV_BASE + 0x0284, 0x0088 }, { USBTV_BASE + 0x0003, 0x0004 }, - { USBTV_BASE + 0x001a, 0x0079 }, { USBTV_BASE + 0x0100, 0x00d3 }, - { USBTV_BASE + 0x010e, 0x0068 }, - { USBTV_BASE + 0x010f, 0x009c }, - { USBTV_BASE + 0x0112, 0x00f0 }, { USBTV_BASE + 0x0115, 0x0015 }, - { USBTV_BASE + 0x0117, 0x0000 }, - { USBTV_BASE + 0x0118, 0x00fc }, - { USBTV_BASE + 0x012d, 0x0004 }, - { USBTV_BASE + 0x012f, 0x0008 }, { USBTV_BASE + 0x0220, 0x002e }, { USBTV_BASE + 0x0225, 0x0008 }, { USBTV_BASE + 0x024e, 0x0002 }, - { USBTV_BASE + 0x024f, 0x0001 }, - { USBTV_BASE + 0x0254, 0x005f }, - { USBTV_BASE + 0x025a, 0x0012 }, - { USBTV_BASE + 0x025b, 0x0001 }, - { USBTV_BASE + 0x0263, 0x001c }, - { USBTV_BASE + 0x0266, 0x0011 }, - { USBTV_BASE + 0x0267, 0x0005 }, { USBTV_BASE + 0x024e, 0x0002 }, { USBTV_BASE + 0x024f, 0x0002 }, }; @@ -253,6 +332,10 @@ static int usbtv_setup_capture(struct usbtv *usbtv) if (ret) return ret; + ret = usbtv_select_norm(usbtv, usbtv->norm); + if (ret) + return ret; + ret = usbtv_select_input(usbtv, usbtv->input); if (ret) return ret; @@ -296,7 +379,7 @@ static void usbtv_image_chunk(struct usbtv *usbtv, u32 *chunk) frame_id = USBTV_FRAME_ID(chunk); odd = USBTV_ODD(chunk); chunk_no = USBTV_CHUNK_NO(chunk); - if (chunk_no >= USBTV_CHUNKS) + if (chunk_no >= usbtv->n_chunks) return; /* Beginning of a frame. */ @@ -324,10 +407,10 @@ static void usbtv_image_chunk(struct usbtv *usbtv, u32 *chunk) usbtv->chunks_done++; /* Last chunk in a frame, signalling an end */ - if (odd && chunk_no == USBTV_CHUNKS-1) { + if (odd && chunk_no == usbtv->n_chunks-1) { int size = vb2_plane_size(&buf->vb, 0); enum vb2_buffer_state state = usbtv->chunks_done == - USBTV_CHUNKS ? + usbtv->n_chunks ? VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR; @@ -500,6 +583,8 @@ static int usbtv_querycap(struct file *file, void *priv, static int usbtv_enum_input(struct file *file, void *priv, struct v4l2_input *i) { + struct usbtv *dev = video_drvdata(file); + switch (i->index) { case USBTV_COMPOSITE_INPUT: strlcpy(i->name, "Composite", sizeof(i->name)); @@ -512,7 +597,7 @@ static int usbtv_enum_input(struct file *file, void *priv, } i->type = V4L2_INPUT_TYPE_CAMERA; - i->std = V4L2_STD_525_60; + i->std = dev->vdev.tvnorms; return 0; } @@ -531,23 +616,37 @@ static int usbtv_enum_fmt_vid_cap(struct file *file, void *priv, static int usbtv_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { - f->fmt.pix.width = USBTV_WIDTH; - f->fmt.pix.height = USBTV_HEIGHT; + struct usbtv *usbtv = video_drvdata(file); + + f->fmt.pix.width = usbtv->width; + f->fmt.pix.height = usbtv->height; f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV; f->fmt.pix.field = V4L2_FIELD_INTERLACED; - f->fmt.pix.bytesperline = USBTV_WIDTH * 2; + f->fmt.pix.bytesperline = usbtv->width * 2; f->fmt.pix.sizeimage = (f->fmt.pix.bytesperline * f->fmt.pix.height); f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; - f->fmt.pix.priv = 0; + return 0; } static int usbtv_g_std(struct file *file, void *priv, v4l2_std_id *norm) { - *norm = V4L2_STD_525_60; + struct usbtv *usbtv = video_drvdata(file); + *norm = usbtv->norm; return 0; } +static int usbtv_s_std(struct file *file, void *priv, v4l2_std_id norm) +{ + int ret = -EINVAL; + struct usbtv *usbtv = video_drvdata(file); + + if ((norm & V4L2_STD_525_60) || (norm & V4L2_STD_PAL)) + ret = usbtv_select_norm(usbtv, norm); + + return ret; +} + static int usbtv_g_input(struct file *file, void *priv, unsigned int *i) { struct usbtv *usbtv = video_drvdata(file); @@ -561,13 +660,6 @@ static int usbtv_s_input(struct file *file, void *priv, unsigned int i) return usbtv_select_input(usbtv, i); } -static int usbtv_s_std(struct file *file, void *priv, v4l2_std_id norm) -{ - if (norm & V4L2_STD_525_60) - return 0; - return -EINVAL; -} - struct v4l2_ioctl_ops usbtv_ioctl_ops = { .vidioc_querycap = usbtv_querycap, .vidioc_enum_input = usbtv_enum_input, @@ -604,10 +696,12 @@ static int usbtv_queue_setup(struct vb2_queue *vq, const struct v4l2_format *v4l_fmt, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { + struct usbtv *usbtv = vb2_get_drv_priv(vq); + if (*nbuffers < 2) *nbuffers = 2; *nplanes = 1; - sizes[0] = USBTV_WIDTH * USBTV_HEIGHT / 2 * sizeof(u32); + sizes[0] = USBTV_CHUNK * usbtv->n_chunks * 2 * sizeof(u32); return 0; } @@ -690,7 +784,11 @@ static int usbtv_probe(struct usb_interface *intf, return -ENOMEM; usbtv->dev = dev; usbtv->udev = usb_get_dev(interface_to_usbdev(intf)); + usbtv->iso_size = size; + + (void)usbtv_configure_for_norm(usbtv, V4L2_STD_525_60); + spin_lock_init(&usbtv->buflock); mutex_init(&usbtv->v4l2_lock); mutex_init(&usbtv->vb2q_lock); @@ -727,7 +825,7 @@ static int usbtv_probe(struct usb_interface *intf, usbtv->vdev.release = video_device_release_empty; usbtv->vdev.fops = &usbtv_fops; usbtv->vdev.ioctl_ops = &usbtv_ioctl_ops; - usbtv->vdev.tvnorms = V4L2_STD_525_60; + usbtv->vdev.tvnorms = USBTV_TV_STD; usbtv->vdev.queue = &usbtv->vb2q; usbtv->vdev.lock = &usbtv->v4l2_lock; set_bit(V4L2_FL_USE_FH_PRIO, &usbtv->vdev.flags); diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c index 899cb6d1c4a4..898c208889cd 100644 --- a/drivers/media/usb/uvc/uvc_video.c +++ b/drivers/media/usb/uvc/uvc_video.c @@ -556,7 +556,7 @@ static u16 uvc_video_clock_host_sof(const struct uvc_clock_sample *sample) * * SOF = ((SOF2 - SOF1) * PTS + SOF1 * STC2 - SOF2 * STC1) / (STC2 - STC1) (1) * - * to avoid loosing precision in the division. Similarly, the host timestamp is + * to avoid losing precision in the division. Similarly, the host timestamp is * computed with * * TS = ((TS2 - TS1) * PTS + TS1 * SOF2 - TS2 * SOF1) / (SOF2 - SOF1) (2) diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c index 60dcc0f3b32e..fb46790d0eca 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls.c +++ b/drivers/media/v4l2-core/v4l2-ctrls.c @@ -420,7 +420,7 @@ const char * const *v4l2_ctrl_get_menu(u32 id) "Advanced Simple", "Core", "Simple Scalable", - "Advanced Coding Efficency", + "Advanced Coding Efficiency", NULL, }; diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index b19b306c8f7f..0edc165f418d 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c @@ -145,6 +145,25 @@ static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb) } /** + * __setup_lengths() - setup initial lengths for every plane in + * every buffer on the queue + */ +static void __setup_lengths(struct vb2_queue *q, unsigned int n) +{ + unsigned int buffer, plane; + struct vb2_buffer *vb; + + for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) { + vb = q->bufs[buffer]; + if (!vb) + continue; + + for (plane = 0; plane < vb->num_planes; ++plane) + vb->v4l2_planes[plane].length = q->plane_sizes[plane]; + } +} + +/** * __setup_offsets() - setup unique offsets ("cookies") for every plane in * every buffer on the queue */ @@ -169,7 +188,6 @@ static void __setup_offsets(struct vb2_queue *q, unsigned int n) continue; for (plane = 0; plane < vb->num_planes; ++plane) { - vb->v4l2_planes[plane].length = q->plane_sizes[plane]; vb->v4l2_planes[plane].m.mem_offset = off; dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n", @@ -241,6 +259,7 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory, q->bufs[q->num_buffers + buffer] = vb; } + __setup_lengths(q, buffer); if (memory == V4L2_MEMORY_MMAP) __setup_offsets(q, buffer); @@ -1824,8 +1843,8 @@ int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb) return -EINVAL; } - if (eb->flags & ~O_CLOEXEC) { - dprintk(1, "Queue does support only O_CLOEXEC flag\n"); + if (eb->flags & ~(O_CLOEXEC | O_ACCMODE)) { + dprintk(1, "Queue does support only O_CLOEXEC and access mode flags\n"); return -EINVAL; } @@ -1848,14 +1867,14 @@ int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb) vb_plane = &vb->planes[eb->plane]; - dbuf = call_memop(q, get_dmabuf, vb_plane->mem_priv); + dbuf = call_memop(q, get_dmabuf, vb_plane->mem_priv, eb->flags & O_ACCMODE); if (IS_ERR_OR_NULL(dbuf)) { dprintk(1, "Failed to export buffer %d, plane %d\n", eb->index, eb->plane); return -EINVAL; } - ret = dma_buf_fd(dbuf, eb->flags); + ret = dma_buf_fd(dbuf, eb->flags & ~O_ACCMODE); if (ret < 0) { dprintk(3, "buffer %d, plane %d failed to export (%d)\n", eb->index, eb->plane, ret); diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c index 646f08f4f504..33d3871d1e13 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c @@ -393,7 +393,7 @@ static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf) return sgt; } -static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv) +static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags) { struct vb2_dc_buf *buf = buf_priv; struct dma_buf *dbuf; @@ -404,7 +404,7 @@ static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv) if (WARN_ON(!buf->sgt_base)) return NULL; - dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, 0); + dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, flags); if (IS_ERR(dbuf)) return NULL; diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c index 2f860543912c..0d3a8ffe47a3 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c @@ -178,7 +178,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, buf->pages = kzalloc(buf->num_pages * sizeof(struct page *), GFP_KERNEL); if (!buf->pages) - return NULL; + goto userptr_fail_alloc_pages; num_pages_from_user = get_user_pages(current, current->mm, vaddr & PAGE_MASK, @@ -204,6 +204,7 @@ userptr_fail_get_user_pages: while (--num_pages_from_user >= 0) put_page(buf->pages[num_pages_from_user]); kfree(buf->pages); +userptr_fail_alloc_pages: kfree(buf); return NULL; } diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 62a60caa5d1f..dd671582c9a1 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -32,7 +32,7 @@ config MFD_AS3722 select MFD_CORE select REGMAP_I2C select REGMAP_IRQ - depends on I2C && OF + depends on I2C=y && OF help The ams AS3722 is a compact system PMU suitable for mobile phones, tablets etc. It has 4 DC/DC step-down regulators, 3 DC/DC step-down diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c index da1c6566d93d..37edf9e989b0 100644 --- a/drivers/mfd/lpc_ich.c +++ b/drivers/mfd/lpc_ich.c @@ -506,7 +506,7 @@ static struct lpc_ich_info lpc_chipset_info[] = { .iTCO_version = 2, }, [LPC_WPT_LP] = { - .name = "Lynx Point_LP", + .name = "Wildcat Point_LP", .iTCO_version = 2, }, }; diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c index 34c18fb8c089..54cc25546592 100644 --- a/drivers/mfd/sec-core.c +++ b/drivers/mfd/sec-core.c @@ -81,31 +81,31 @@ static struct of_device_id sec_dt_match[] = { int sec_reg_read(struct sec_pmic_dev *sec_pmic, u8 reg, void *dest) { - return regmap_read(sec_pmic->regmap, reg, dest); + return regmap_read(sec_pmic->regmap_pmic, reg, dest); } EXPORT_SYMBOL_GPL(sec_reg_read); int sec_bulk_read(struct sec_pmic_dev *sec_pmic, u8 reg, int count, u8 *buf) { - return regmap_bulk_read(sec_pmic->regmap, reg, buf, count); + return regmap_bulk_read(sec_pmic->regmap_pmic, reg, buf, count); } EXPORT_SYMBOL_GPL(sec_bulk_read); int sec_reg_write(struct sec_pmic_dev *sec_pmic, u8 reg, u8 value) { - return regmap_write(sec_pmic->regmap, reg, value); + return regmap_write(sec_pmic->regmap_pmic, reg, value); } EXPORT_SYMBOL_GPL(sec_reg_write); int sec_bulk_write(struct sec_pmic_dev *sec_pmic, u8 reg, int count, u8 *buf) { - return regmap_raw_write(sec_pmic->regmap, reg, buf, count); + return regmap_raw_write(sec_pmic->regmap_pmic, reg, buf, count); } EXPORT_SYMBOL_GPL(sec_bulk_write); int sec_reg_update(struct sec_pmic_dev *sec_pmic, u8 reg, u8 val, u8 mask) { - return regmap_update_bits(sec_pmic->regmap, reg, mask, val); + return regmap_update_bits(sec_pmic->regmap_pmic, reg, mask, val); } EXPORT_SYMBOL_GPL(sec_reg_update); @@ -166,6 +166,11 @@ static struct regmap_config s5m8767_regmap_config = { .cache_type = REGCACHE_FLAT, }; +static const struct regmap_config sec_rtc_regmap_config = { + .reg_bits = 8, + .val_bits = 8, +}; + #ifdef CONFIG_OF /* * Only the common platform data elements for s5m8767 are parsed here from the @@ -266,9 +271,9 @@ static int sec_pmic_probe(struct i2c_client *i2c, break; } - sec_pmic->regmap = devm_regmap_init_i2c(i2c, regmap); - if (IS_ERR(sec_pmic->regmap)) { - ret = PTR_ERR(sec_pmic->regmap); + sec_pmic->regmap_pmic = devm_regmap_init_i2c(i2c, regmap); + if (IS_ERR(sec_pmic->regmap_pmic)) { + ret = PTR_ERR(sec_pmic->regmap_pmic); dev_err(&i2c->dev, "Failed to allocate register map: %d\n", ret); return ret; @@ -277,6 +282,15 @@ static int sec_pmic_probe(struct i2c_client *i2c, sec_pmic->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR); i2c_set_clientdata(sec_pmic->rtc, sec_pmic); + sec_pmic->regmap_rtc = devm_regmap_init_i2c(sec_pmic->rtc, + &sec_rtc_regmap_config); + if (IS_ERR(sec_pmic->regmap_rtc)) { + ret = PTR_ERR(sec_pmic->regmap_rtc); + dev_err(&i2c->dev, "Failed to allocate RTC register map: %d\n", + ret); + return ret; + } + if (pdata && pdata->cfg_pmic_irq) pdata->cfg_pmic_irq(); diff --git a/drivers/mfd/sec-irq.c b/drivers/mfd/sec-irq.c index 0dd84e99081e..b441b1be27cb 100644 --- a/drivers/mfd/sec-irq.c +++ b/drivers/mfd/sec-irq.c @@ -280,19 +280,19 @@ int sec_irq_init(struct sec_pmic_dev *sec_pmic) switch (type) { case S5M8763X: - ret = regmap_add_irq_chip(sec_pmic->regmap, sec_pmic->irq, + ret = regmap_add_irq_chip(sec_pmic->regmap_pmic, sec_pmic->irq, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, sec_pmic->irq_base, &s5m8763_irq_chip, &sec_pmic->irq_data); break; case S5M8767X: - ret = regmap_add_irq_chip(sec_pmic->regmap, sec_pmic->irq, + ret = regmap_add_irq_chip(sec_pmic->regmap_pmic, sec_pmic->irq, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, sec_pmic->irq_base, &s5m8767_irq_chip, &sec_pmic->irq_data); break; case S2MPS11X: - ret = regmap_add_irq_chip(sec_pmic->regmap, sec_pmic->irq, + ret = regmap_add_irq_chip(sec_pmic->regmap_pmic, sec_pmic->irq, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, sec_pmic->irq_base, &s2mps11_irq_chip, &sec_pmic->irq_data); diff --git a/drivers/mfd/ti-ssp.c b/drivers/mfd/ti-ssp.c index 71e3e0c5bf73..a5424579679c 100644 --- a/drivers/mfd/ti-ssp.c +++ b/drivers/mfd/ti-ssp.c @@ -32,6 +32,7 @@ #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/io.h> +#include <linux/sched.h> #include <linux/mfd/core.h> #include <linux/mfd/ti_ssp.h> @@ -409,7 +410,6 @@ static int ti_ssp_probe(struct platform_device *pdev) cells[id].id = id; cells[id].name = data->dev_name; cells[id].platform_data = data->pdata; - cells[id].data_size = data->pdata_size; } error = mfd_add_devices(dev, 0, cells, 2, NULL, 0, NULL); diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c index 08b18f3f5264..9e2b985293fc 100644 --- a/drivers/misc/carma/carma-fpga.c +++ b/drivers/misc/carma/carma-fpga.c @@ -633,8 +633,7 @@ static int data_submit_dma(struct fpga_device *priv, struct data_buf *buf) struct dma_async_tx_descriptor *tx; dma_cookie_t cookie; dma_addr_t dst, src; - unsigned long dma_flags = DMA_COMPL_SKIP_DEST_UNMAP | - DMA_COMPL_SKIP_SRC_UNMAP; + unsigned long dma_flags = 0; dst_sg = buf->vb.sglist; dst_nents = buf->vb.sglen; diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c index 0e8df41aaf14..2cf2bbc0b927 100644 --- a/drivers/misc/enclosure.c +++ b/drivers/misc/enclosure.c @@ -198,6 +198,13 @@ static void enclosure_remove_links(struct enclosure_component *cdev) { char name[ENCLOSURE_NAME_SIZE]; + /* + * In odd circumstances, like multipath devices, something else may + * already have removed the links, so check for this condition first. + */ + if (!cdev->dev->kobj.sd) + return; + enclosure_link_name(cdev, name); sysfs_remove_link(&cdev->dev->kobj, name); sysfs_remove_link(&cdev->cdev.kobj, "device"); diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index 6c0fde55270d..66f411a6e8ea 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -109,9 +109,12 @@ #define MEI_DEV_ID_PPT_2 0x1CBA /* Panther Point */ #define MEI_DEV_ID_PPT_3 0x1DBA /* Panther Point */ -#define MEI_DEV_ID_LPT 0x8C3A /* Lynx Point */ +#define MEI_DEV_ID_LPT_H 0x8C3A /* Lynx Point H */ #define MEI_DEV_ID_LPT_W 0x8D3A /* Lynx Point - Wellsburg */ #define MEI_DEV_ID_LPT_LP 0x9C3A /* Lynx Point LP */ +#define MEI_DEV_ID_LPT_HR 0x8CBA /* Lynx Point H Refresh */ + +#define MEI_DEV_ID_WPT_LP 0x9CBA /* Wildcat Point LP */ /* * MEI HW Section */ diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index b96205aece0c..2cab3c0a6805 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -76,9 +76,11 @@ static DEFINE_PCI_DEVICE_TABLE(mei_me_pci_tbl) = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_H)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_W)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_HR)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_WPT_LP)}, /* required last entry */ {0, } diff --git a/drivers/misc/mic/card/mic_virtio.c b/drivers/misc/mic/card/mic_virtio.c index 8aa42e738acc..653799b96bfa 100644 --- a/drivers/misc/mic/card/mic_virtio.c +++ b/drivers/misc/mic/card/mic_virtio.c @@ -154,14 +154,14 @@ static void mic_reset_inform_host(struct virtio_device *vdev) { struct mic_vdev *mvdev = to_micvdev(vdev); struct mic_device_ctrl __iomem *dc = mvdev->dc; - int retry = 100, i; + int retry; iowrite8(0, &dc->host_ack); iowrite8(1, &dc->vdev_reset); mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); /* Wait till host completes all card accesses and acks the reset */ - for (i = retry; i--;) { + for (retry = 100; retry--;) { if (ioread8(&dc->host_ack)) break; msleep(100); @@ -187,11 +187,12 @@ static void mic_reset(struct virtio_device *vdev) /* * The virtio_ring code calls this API when it wants to notify the Host. */ -static void mic_notify(struct virtqueue *vq) +static bool mic_notify(struct virtqueue *vq) { struct mic_vdev *mvdev = vq->priv; mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); + return true; } static void mic_del_vq(struct virtqueue *vq, int n) @@ -247,17 +248,17 @@ static struct virtqueue *mic_find_vq(struct virtio_device *vdev, /* First assign the vring's allocated in host memory */ vqconfig = mic_vq_config(mvdev->desc) + index; memcpy_fromio(&config, vqconfig, sizeof(config)); - _vr_size = vring_size(config.num, MIC_VIRTIO_RING_ALIGN); + _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN); vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info)); - va = mic_card_map(mvdev->mdev, config.address, vr_size); + va = mic_card_map(mvdev->mdev, le64_to_cpu(config.address), vr_size); if (!va) return ERR_PTR(-ENOMEM); mvdev->vr[index] = va; memset_io(va, 0x0, _vr_size); - vq = vring_new_virtqueue(index, - config.num, MIC_VIRTIO_RING_ALIGN, vdev, - false, - va, mic_notify, callback, name); + vq = vring_new_virtqueue(index, le16_to_cpu(config.num), + MIC_VIRTIO_RING_ALIGN, vdev, false, + (void __force *)va, mic_notify, callback, + name); if (!vq) { err = -ENOMEM; goto unmap; @@ -272,7 +273,8 @@ static struct virtqueue *mic_find_vq(struct virtio_device *vdev, /* Allocate and reassign used ring now */ mvdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 + - sizeof(struct vring_used_elem) * config.num); + sizeof(struct vring_used_elem) * + le16_to_cpu(config.num)); used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(mvdev->used_size[index])); if (!used) { @@ -309,7 +311,7 @@ static int mic_find_vqs(struct virtio_device *vdev, unsigned nvqs, { struct mic_vdev *mvdev = to_micvdev(vdev); struct mic_device_ctrl __iomem *dc = mvdev->dc; - int i, err, retry = 100; + int i, err, retry; /* We must have this many virtqueues. */ if (nvqs > ioread8(&mvdev->desc->num_vq)) @@ -331,7 +333,7 @@ static int mic_find_vqs(struct virtio_device *vdev, unsigned nvqs, * rings have been re-assigned. */ mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); - for (i = retry; i--;) { + for (retry = 100; retry--;) { if (!ioread8(&dc->used_address_updated)) break; msleep(100); @@ -519,8 +521,8 @@ static void mic_scan_devices(struct mic_driver *mdrv, bool remove) struct device *dev; int ret; - for (i = mic_aligned_size(struct mic_bootparam); - i < MIC_DP_SIZE; i += mic_total_desc_size(d)) { + for (i = sizeof(struct mic_bootparam); i < MIC_DP_SIZE; + i += mic_total_desc_size(d)) { d = mdrv->dp + i; dc = (void __iomem *)d + mic_aligned_desc_size(d); /* @@ -539,7 +541,8 @@ static void mic_scan_devices(struct mic_driver *mdrv, bool remove) continue; /* device already exists */ - dev = device_find_child(mdrv->dev, d, mic_match_desc); + dev = device_find_child(mdrv->dev, (void __force *)d, + mic_match_desc); if (dev) { if (remove) iowrite8(MIC_VIRTIO_PARAM_DEV_REMOVE, diff --git a/drivers/misc/mic/card/mic_virtio.h b/drivers/misc/mic/card/mic_virtio.h index 2c5c22c93ba8..d0407ba53bb7 100644 --- a/drivers/misc/mic/card/mic_virtio.h +++ b/drivers/misc/mic/card/mic_virtio.h @@ -42,8 +42,8 @@ static inline unsigned mic_desc_size(struct mic_device_desc __iomem *desc) { - return mic_aligned_size(*desc) - + ioread8(&desc->num_vq) * mic_aligned_size(struct mic_vqconfig) + return sizeof(*desc) + + ioread8(&desc->num_vq) * sizeof(struct mic_vqconfig) + ioread8(&desc->feature_len) * 2 + ioread8(&desc->config_len); } @@ -67,8 +67,7 @@ mic_vq_configspace(struct mic_device_desc __iomem *desc) } static inline unsigned mic_total_desc_size(struct mic_device_desc __iomem *desc) { - return mic_aligned_desc_size(desc) + - mic_aligned_size(struct mic_device_ctrl); + return mic_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl); } int mic_devices_init(struct mic_driver *mdrv); diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c index 7558d9186438..b75c6b5cc20f 100644 --- a/drivers/misc/mic/host/mic_boot.c +++ b/drivers/misc/mic/host/mic_boot.c @@ -62,7 +62,7 @@ void mic_bootparam_init(struct mic_device *mdev) { struct mic_bootparam *bootparam = mdev->dp; - bootparam->magic = MIC_MAGIC; + bootparam->magic = cpu_to_le32(MIC_MAGIC); bootparam->c2h_shutdown_db = mdev->shutdown_db; bootparam->h2c_shutdown_db = -1; bootparam->h2c_config_db = -1; diff --git a/drivers/misc/mic/host/mic_virtio.c b/drivers/misc/mic/host/mic_virtio.c index 5b8494bd1e00..e04bb4fe6823 100644 --- a/drivers/misc/mic/host/mic_virtio.c +++ b/drivers/misc/mic/host/mic_virtio.c @@ -41,7 +41,7 @@ static int mic_virtio_copy_to_user(struct mic_vdev *mvdev, * We are copying from IO below an should ideally use something * like copy_to_user_fromio(..) if it existed. */ - if (copy_to_user(ubuf, dbuf, len)) { + if (copy_to_user(ubuf, (void __force *)dbuf, len)) { err = -EFAULT; dev_err(mic_dev(mvdev), "%s %d err %d\n", __func__, __LINE__, err); @@ -66,7 +66,7 @@ static int mic_virtio_copy_from_user(struct mic_vdev *mvdev, * We are copying to IO below and should ideally use something * like copy_from_user_toio(..) if it existed. */ - if (copy_from_user(dbuf, ubuf, len)) { + if (copy_from_user((void __force *)dbuf, ubuf, len)) { err = -EFAULT; dev_err(mic_dev(mvdev), "%s %d err %d\n", __func__, __LINE__, err); @@ -293,7 +293,7 @@ static void mic_virtio_init_post(struct mic_vdev *mvdev) continue; } mvdev->mvr[i].vrh.vring.used = - mvdev->mdev->aper.va + + (void __force *)mvdev->mdev->aper.va + le64_to_cpu(vqconfig[i].used_address); } @@ -378,7 +378,7 @@ int mic_virtio_config_change(struct mic_vdev *mvdev, void __user *argp) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake); - int ret = 0, retry = 100, i; + int ret = 0, retry, i; struct mic_bootparam *bootparam = mvdev->mdev->dp; s8 db = bootparam->h2c_config_db; @@ -401,7 +401,7 @@ int mic_virtio_config_change(struct mic_vdev *mvdev, mvdev->dc->config_change = MIC_VIRTIO_PARAM_CONFIG_CHANGED; mvdev->mdev->ops->send_intr(mvdev->mdev, db); - for (i = retry; i--;) { + for (retry = 100; retry--;) { ret = wait_event_timeout(wake, mvdev->dc->guest_ack, msecs_to_jiffies(100)); if (ret) @@ -467,7 +467,7 @@ static int mic_copy_dp_entry(struct mic_vdev *mvdev, } /* Find the first free device page entry */ - for (i = mic_aligned_size(struct mic_bootparam); + for (i = sizeof(struct mic_bootparam); i < MIC_DP_SIZE - mic_total_desc_size(dd_config); i += mic_total_desc_size(devp)) { devp = mdev->dp + i; @@ -525,6 +525,7 @@ int mic_virtio_add_device(struct mic_vdev *mvdev, char irqname[10]; struct mic_bootparam *bootparam = mdev->dp; u16 num; + dma_addr_t vr_addr; mutex_lock(&mdev->mic_mutex); @@ -559,17 +560,16 @@ int mic_virtio_add_device(struct mic_vdev *mvdev, } vr->len = vr_size; vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN); - vr->info->magic = MIC_MAGIC + mvdev->virtio_id + i; - vqconfig[i].address = mic_map_single(mdev, - vr->va, vr_size); - if (mic_map_error(vqconfig[i].address)) { + vr->info->magic = cpu_to_le32(MIC_MAGIC + mvdev->virtio_id + i); + vr_addr = mic_map_single(mdev, vr->va, vr_size); + if (mic_map_error(vr_addr)) { free_pages((unsigned long)vr->va, get_order(vr_size)); ret = -ENOMEM; dev_err(mic_dev(mvdev), "%s %d err %d\n", __func__, __LINE__, ret); goto err; } - vqconfig[i].address = cpu_to_le64(vqconfig[i].address); + vqconfig[i].address = cpu_to_le64(vr_addr); vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN); ret = vringh_init_kern(&mvr->vrh, @@ -639,7 +639,7 @@ void mic_virtio_del_device(struct mic_vdev *mvdev) struct mic_vdev *tmp_mvdev; struct mic_device *mdev = mvdev->mdev; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake); - int i, ret, retry = 100; + int i, ret, retry; struct mic_vqconfig *vqconfig; struct mic_bootparam *bootparam = mdev->dp; s8 db; @@ -652,16 +652,16 @@ void mic_virtio_del_device(struct mic_vdev *mvdev) "Requesting hot remove id %d\n", mvdev->virtio_id); mvdev->dc->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE; mdev->ops->send_intr(mdev, db); - for (i = retry; i--;) { + for (retry = 100; retry--;) { ret = wait_event_timeout(wake, mvdev->dc->guest_ack, msecs_to_jiffies(100)); if (ret) break; } dev_dbg(mdev->sdev->parent, - "Device id %d config_change %d guest_ack %d\n", + "Device id %d config_change %d guest_ack %d retry %d\n", mvdev->virtio_id, mvdev->dc->config_change, - mvdev->dc->guest_ack); + mvdev->dc->guest_ack, retry); mvdev->dc->config_change = 0; mvdev->dc->guest_ack = 0; skip_hot_remove: diff --git a/drivers/misc/mic/host/mic_x100.c b/drivers/misc/mic/host/mic_x100.c index 81e9541b784c..0dfa8a81436e 100644 --- a/drivers/misc/mic/host/mic_x100.c +++ b/drivers/misc/mic/host/mic_x100.c @@ -397,8 +397,8 @@ mic_x100_load_ramdisk(struct mic_device *mdev) * so copy over the ramdisk @ 128M. */ memcpy_toio(mdev->aper.va + (mdev->bootaddr << 1), fw->data, fw->size); - iowrite32(cpu_to_le32(mdev->bootaddr << 1), &bp->hdr.ramdisk_image); - iowrite32(cpu_to_le32(fw->size), &bp->hdr.ramdisk_size); + iowrite32(mdev->bootaddr << 1, &bp->hdr.ramdisk_image); + iowrite32(fw->size, &bp->hdr.ramdisk_size); release_firmware(fw); error: return rc; diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c index ef8956568c3a..157b570ba343 100644 --- a/drivers/mmc/core/sdio_bus.c +++ b/drivers/mmc/core/sdio_bus.c @@ -308,8 +308,7 @@ static void sdio_acpi_set_handle(struct sdio_func *func) struct mmc_host *host = func->card->host; u64 addr = (host->slotno << 16) | func->num; - ACPI_HANDLE_SET(&func->dev, - acpi_get_child(ACPI_HANDLE(host->parent), addr)); + acpi_preset_companion(&func->dev, ACPI_HANDLE(host->parent), addr); } #else static inline void sdio_acpi_set_handle(struct sdio_func *func) {} diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index 0b10a9030f4e..98b6b6ef7e5c 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c @@ -22,6 +22,7 @@ #include <linux/delay.h> #include <linux/spinlock.h> #include <linux/timer.h> +#include <linux/of.h> #include <linux/omap-dma.h> #include <linux/mmc/host.h> #include <linux/mmc/card.h> @@ -90,17 +91,6 @@ #define OMAP_MMC_CMDTYPE_AC 2 #define OMAP_MMC_CMDTYPE_ADTC 3 -#define OMAP_DMA_MMC_TX 21 -#define OMAP_DMA_MMC_RX 22 -#define OMAP_DMA_MMC2_TX 54 -#define OMAP_DMA_MMC2_RX 55 - -#define OMAP24XX_DMA_MMC2_TX 47 -#define OMAP24XX_DMA_MMC2_RX 48 -#define OMAP24XX_DMA_MMC1_TX 61 -#define OMAP24XX_DMA_MMC1_RX 62 - - #define DRIVER_NAME "mmci-omap" /* Specifies how often in millisecs to poll for card status changes @@ -1330,7 +1320,7 @@ static int mmc_omap_probe(struct platform_device *pdev) struct mmc_omap_host *host = NULL; struct resource *res; dma_cap_mask_t mask; - unsigned sig; + unsigned sig = 0; int i, ret = 0; int irq; @@ -1340,7 +1330,7 @@ static int mmc_omap_probe(struct platform_device *pdev) } if (pdata->nr_slots == 0) { dev_err(&pdev->dev, "no slots\n"); - return -ENXIO; + return -EPROBE_DEFER; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -1407,19 +1397,20 @@ static int mmc_omap_probe(struct platform_device *pdev) host->dma_tx_burst = -1; host->dma_rx_burst = -1; - if (mmc_omap2()) - sig = host->id == 0 ? OMAP24XX_DMA_MMC1_TX : OMAP24XX_DMA_MMC2_TX; - else - sig = host->id == 0 ? OMAP_DMA_MMC_TX : OMAP_DMA_MMC2_TX; - host->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig); + res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx"); + if (res) + sig = res->start; + host->dma_tx = dma_request_slave_channel_compat(mask, + omap_dma_filter_fn, &sig, &pdev->dev, "tx"); if (!host->dma_tx) dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n", sig); - if (mmc_omap2()) - sig = host->id == 0 ? OMAP24XX_DMA_MMC1_RX : OMAP24XX_DMA_MMC2_RX; - else - sig = host->id == 0 ? OMAP_DMA_MMC_RX : OMAP_DMA_MMC2_RX; - host->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig); + + res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx"); + if (res) + sig = res->start; + host->dma_rx = dma_request_slave_channel_compat(mask, + omap_dma_filter_fn, &sig, &pdev->dev, "rx"); if (!host->dma_rx) dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n", sig); @@ -1512,12 +1503,20 @@ static int mmc_omap_remove(struct platform_device *pdev) return 0; } +#if IS_BUILTIN(CONFIG_OF) +static const struct of_device_id mmc_omap_match[] = { + { .compatible = "ti,omap2420-mmc", }, + { }, +}; +#endif + static struct platform_driver mmc_omap_driver = { .probe = mmc_omap_probe, .remove = mmc_omap_remove, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, + .of_match_table = of_match_ptr(mmc_omap_match), }, }; diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c index d78a97d4153a..59f08c44abdb 100644 --- a/drivers/mtd/nand/atmel_nand.c +++ b/drivers/mtd/nand/atmel_nand.c @@ -375,8 +375,7 @@ static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len, dma_dev = host->dma_chan->device; - flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP | - DMA_COMPL_SKIP_DEST_UNMAP; + flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; phys_addr = dma_map_single(dma_dev->dev, p, len, dir); if (dma_mapping_error(dma_dev->dev, phys_addr)) { diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c index 3dc1a7564d87..8b2752263db9 100644 --- a/drivers/mtd/nand/fsmc_nand.c +++ b/drivers/mtd/nand/fsmc_nand.c @@ -573,8 +573,6 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len, dma_dev = chan->device; dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction); - flags |= DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP; - if (direction == DMA_TO_DEVICE) { dma_src = dma_addr; dma_dst = host->data_pa; diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index 4cabdc9fda90..4b3aaa898a8b 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -962,7 +962,7 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info) static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info) { struct platform_device *pdev = info->pdev; - if (use_dma) { + if (info->use_dma) { pxa_free_dma(info->data_dma_ch); dma_free_coherent(&pdev->dev, info->buf_size, info->data_buff, info->data_buff_phys); @@ -1259,10 +1259,6 @@ static struct of_device_id pxa3xx_nand_dt_ids[] = { .compatible = "marvell,pxa3xx-nand", .data = (void *)PXA3XX_NAND_VARIANT_PXA, }, - { - .compatible = "marvell,armada370-nand", - .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370, - }, {} }; MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 4dd5ee2a34cc..398e299ee1bd 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -4110,7 +4110,7 @@ static int bond_check_params(struct bond_params *params) if (!miimon) { pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n"); pr_warning("Forcing miimon to 100msec\n"); - miimon = 100; + miimon = BOND_DEFAULT_MIIMON; } } @@ -4147,7 +4147,7 @@ static int bond_check_params(struct bond_params *params) if (!miimon) { pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure and link speed which are essential for TLB/ALB load balancing\n"); pr_warning("Forcing miimon to 100msec\n"); - miimon = 100; + miimon = BOND_DEFAULT_MIIMON; } } @@ -4199,9 +4199,9 @@ static int bond_check_params(struct bond_params *params) (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) { /* not complete check, but should be good enough to catch mistakes */ - __be32 ip = in_aton(arp_ip_target[i]); - if (!isdigit(arp_ip_target[i][0]) || ip == 0 || - ip == htonl(INADDR_BROADCAST)) { + __be32 ip; + if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) || + IS_IP_TARGET_UNUSABLE_ADDRESS(ip)) { pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n", arp_ip_target[i]); arp_interval = 0; diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 9a5223c7b4d1..ea6f640782b7 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -45,10 +45,15 @@ int bond_option_mode_set(struct bonding *bond, int mode) return -EPERM; } - if (BOND_MODE_IS_LB(mode) && bond->params.arp_interval) { - pr_err("%s: %s mode is incompatible with arp monitoring.\n", - bond->dev->name, bond_mode_tbl[mode].modename); - return -EINVAL; + if (BOND_NO_USES_ARP(mode) && bond->params.arp_interval) { + pr_info("%s: %s mode is incompatible with arp monitoring, start mii monitoring\n", + bond->dev->name, bond_mode_tbl[mode].modename); + /* disable arp monitoring */ + bond->params.arp_interval = 0; + /* set miimon to default value */ + bond->params.miimon = BOND_DEFAULT_MIIMON; + pr_info("%s: Setting MII monitoring interval to %d.\n", + bond->dev->name, bond->params.miimon); } /* don't cache arp_validate between modes */ diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index bc8fd362a5aa..0ae580bbc5db 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -523,9 +523,8 @@ static ssize_t bonding_store_arp_interval(struct device *d, ret = -EINVAL; goto out; } - if (bond->params.mode == BOND_MODE_ALB || - bond->params.mode == BOND_MODE_TLB) { - pr_info("%s: ARP monitoring cannot be used with ALB/TLB. Only MII monitoring is supported on %s.\n", + if (BOND_NO_USES_ARP(bond->params.mode)) { + pr_info("%s: ARP monitoring cannot be used with ALB/TLB/802.3ad. Only MII monitoring is supported on %s.\n", bond->dev->name, bond->dev->name); ret = -EINVAL; goto out; @@ -603,15 +602,14 @@ static ssize_t bonding_store_arp_targets(struct device *d, return restart_syscall(); targets = bond->params.arp_targets; - newtarget = in_aton(buf + 1); + if (!in4_pton(buf + 1, -1, (u8 *)&newtarget, -1, NULL) || + IS_IP_TARGET_UNUSABLE_ADDRESS(newtarget)) { + pr_err("%s: invalid ARP target %pI4 specified for addition\n", + bond->dev->name, &newtarget); + goto out; + } /* look for adds */ if (buf[0] == '+') { - if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) { - pr_err("%s: invalid ARP target %pI4 specified for addition\n", - bond->dev->name, &newtarget); - goto out; - } - if (bond_get_targets_ip(targets, newtarget) != -1) { /* dup */ pr_err("%s: ARP target %pI4 is already present\n", bond->dev->name, &newtarget); @@ -634,12 +632,6 @@ static ssize_t bonding_store_arp_targets(struct device *d, targets[ind] = newtarget; write_unlock_bh(&bond->lock); } else if (buf[0] == '-') { - if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) { - pr_err("%s: invalid ARP target %pI4 specified for removal\n", - bond->dev->name, &newtarget); - goto out; - } - ind = bond_get_targets_ip(targets, newtarget); if (ind == -1) { pr_err("%s: unable to remove nonexistent ARP target %pI4.\n", @@ -701,6 +693,8 @@ static ssize_t bonding_store_downdelay(struct device *d, int new_value, ret = count; struct bonding *bond = to_bond(d); + if (!rtnl_trylock()) + return restart_syscall(); if (!(bond->params.miimon)) { pr_err("%s: Unable to set down delay as MII monitoring is disabled\n", bond->dev->name); @@ -734,6 +728,7 @@ static ssize_t bonding_store_downdelay(struct device *d, } out: + rtnl_unlock(); return ret; } static DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR, @@ -756,6 +751,8 @@ static ssize_t bonding_store_updelay(struct device *d, int new_value, ret = count; struct bonding *bond = to_bond(d); + if (!rtnl_trylock()) + return restart_syscall(); if (!(bond->params.miimon)) { pr_err("%s: Unable to set up delay as MII monitoring is disabled\n", bond->dev->name); @@ -789,6 +786,7 @@ static ssize_t bonding_store_updelay(struct device *d, } out: + rtnl_unlock(); return ret; } static DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR, @@ -1637,12 +1635,12 @@ static ssize_t bonding_show_packets_per_slave(struct device *d, char *buf) { struct bonding *bond = to_bond(d); - int packets_per_slave = bond->params.packets_per_slave; + unsigned int packets_per_slave = bond->params.packets_per_slave; if (packets_per_slave > 1) packets_per_slave = reciprocal_value(packets_per_slave); - return sprintf(buf, "%d\n", packets_per_slave); + return sprintf(buf, "%u\n", packets_per_slave); } static ssize_t bonding_store_packets_per_slave(struct device *d, diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 77a07a12e77f..a9f4f9f4d8ce 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@ -35,6 +35,8 @@ #define BOND_MAX_ARP_TARGETS 16 +#define BOND_DEFAULT_MIIMON 100 + #define IS_UP(dev) \ ((((dev)->flags & IFF_UP) == IFF_UP) && \ netif_running(dev) && \ @@ -55,6 +57,11 @@ ((mode) == BOND_MODE_TLB) || \ ((mode) == BOND_MODE_ALB)) +#define BOND_NO_USES_ARP(mode) \ + (((mode) == BOND_MODE_8023AD) || \ + ((mode) == BOND_MODE_TLB) || \ + ((mode) == BOND_MODE_ALB)) + #define TX_QUEUE_OVERRIDE(mode) \ (((mode) == BOND_MODE_ACTIVEBACKUP) || \ ((mode) == BOND_MODE_ROUNDROBIN)) @@ -63,6 +70,9 @@ (((mode) == BOND_MODE_TLB) || \ ((mode) == BOND_MODE_ALB)) +#define IS_IP_TARGET_UNUSABLE_ADDRESS(a) \ + ((htonl(INADDR_BROADCAST) == a) || \ + ipv4_is_zeronet(a)) /* * Less bad way to call ioctl from within the kernel; this needs to be * done some other way to get the call out of interrupt context. diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index e3fc07cf2f62..77061eebb034 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c @@ -712,22 +712,31 @@ static int c_can_set_mode(struct net_device *dev, enum can_mode mode) return 0; } -static int c_can_get_berr_counter(const struct net_device *dev, - struct can_berr_counter *bec) +static int __c_can_get_berr_counter(const struct net_device *dev, + struct can_berr_counter *bec) { unsigned int reg_err_counter; struct c_can_priv *priv = netdev_priv(dev); - c_can_pm_runtime_get_sync(priv); - reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG); bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >> ERR_CNT_REC_SHIFT; bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK; + return 0; +} + +static int c_can_get_berr_counter(const struct net_device *dev, + struct can_berr_counter *bec) +{ + struct c_can_priv *priv = netdev_priv(dev); + int err; + + c_can_pm_runtime_get_sync(priv); + err = __c_can_get_berr_counter(dev, bec); c_can_pm_runtime_put_sync(priv); - return 0; + return err; } /* @@ -754,6 +763,7 @@ static void c_can_do_tx(struct net_device *dev) if (!(val & (1 << (msg_obj_no - 1)))) { can_get_echo_skb(dev, msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST); + c_can_object_get(dev, 0, msg_obj_no, IF_COMM_ALL); stats->tx_bytes += priv->read_reg(priv, C_CAN_IFACE(MSGCTRL_REG, 0)) & IF_MCONT_DLC_MASK; @@ -872,7 +882,7 @@ static int c_can_handle_state_change(struct net_device *dev, if (unlikely(!skb)) return 0; - c_can_get_berr_counter(dev, &bec); + __c_can_get_berr_counter(dev, &bec); reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG); rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >> ERR_CNT_RP_SHIFT; diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index ae08cf129ebb..aaed97bee471 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -1020,13 +1020,13 @@ static int flexcan_probe(struct platform_device *pdev) dev_err(&pdev->dev, "no ipg clock defined\n"); return PTR_ERR(clk_ipg); } - clock_freq = clk_get_rate(clk_ipg); clk_per = devm_clk_get(&pdev->dev, "per"); if (IS_ERR(clk_per)) { dev_err(&pdev->dev, "no per clock defined\n"); return PTR_ERR(clk_per); } + clock_freq = clk_get_rate(clk_per); } mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index 7164a999f50f..f17c3018b7c7 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -494,20 +494,20 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id) uint8_t isrc, status; int n = 0; - /* Shared interrupts and IRQ off? */ - if (priv->read_reg(priv, SJA1000_IER) == IRQ_OFF) - return IRQ_NONE; - if (priv->pre_irq) priv->pre_irq(priv); + /* Shared interrupts and IRQ off? */ + if (priv->read_reg(priv, SJA1000_IER) == IRQ_OFF) + goto out; + while ((isrc = priv->read_reg(priv, SJA1000_IR)) && (n < SJA1000_MAX_IRQ)) { - n++; + status = priv->read_reg(priv, SJA1000_SR); /* check for absent controller due to hw unplug */ if (status == 0xFF && sja1000_is_absent(priv)) - return IRQ_NONE; + goto out; if (isrc & IRQ_WUI) netdev_warn(dev, "wakeup interrupt\n"); @@ -535,7 +535,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id) status = priv->read_reg(priv, SJA1000_SR); /* check for absent controller */ if (status == 0xFF && sja1000_is_absent(priv)) - return IRQ_NONE; + goto out; } } if (isrc & (IRQ_DOI | IRQ_EI | IRQ_BEI | IRQ_EPI | IRQ_ALI)) { @@ -543,8 +543,9 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id) if (sja1000_err(dev, isrc, status)) break; } + n++; } - +out: if (priv->post_irq) priv->post_irq(priv); diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index 50b853a79d77..46dfb1378c17 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -717,8 +717,7 @@ static int emac_open(struct net_device *dev) if (netif_msg_ifup(db)) dev_dbg(db->dev, "enabling %s\n", dev->name); - if (devm_request_irq(db->dev, dev->irq, &emac_interrupt, - 0, dev->name, dev)) + if (request_irq(dev->irq, &emac_interrupt, 0, dev->name, dev)) return -EAGAIN; /* Initialize EMAC board */ @@ -774,6 +773,8 @@ static int emac_stop(struct net_device *ndev) emac_shutdown(ndev); + free_irq(ndev->irq, ndev); + return 0; } diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 5aa5e8146496..c3c4c266b846 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1388,6 +1388,9 @@ static int alx_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct alx_priv *alx = pci_get_drvdata(pdev); + struct alx_hw *hw = &alx->hw; + + alx_reset_phy(hw); if (!netif_running(alx->dev)) return 0; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 4e01c57d8c8d..a1f66e2c9a86 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1376,7 +1376,6 @@ enum { BNX2X_SP_RTNL_RX_MODE, BNX2X_SP_RTNL_HYPERVISOR_VLAN, BNX2X_SP_RTNL_TX_STOP, - BNX2X_SP_RTNL_TX_RESUME, }; struct bnx2x_prev_path_list { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index dcafbda3e5be..ec96130533cc 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -2959,6 +2959,10 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) bp->port.pmf = 0; + /* clear pending work in rtnl task */ + bp->sp_rtnl_state = 0; + smp_mb(); + /* Free SKBs, SGEs, TPA pool and driver internals */ bnx2x_free_skbs(bp); if (CNIC_LOADED(bp)) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index fcf2761d8828..fdace204b054 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -778,11 +778,6 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) /* ets may affect cmng configuration: reinit it in hw */ bnx2x_set_local_cmng(bp); - - set_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state); - - schedule_delayed_work(&bp->sp_rtnl_task, 0); - return; case BNX2X_DCBX_STATE_TX_RELEASED: DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_TX_RELEASED\n"); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index e622cc1f96ff..814d0eca9b33 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -577,7 +577,9 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); if (rc) { BNX2X_ERR("DMAE returned failure %d\n", rc); +#ifdef BNX2X_STOP_ON_ERROR bnx2x_panic(); +#endif } } @@ -614,7 +616,9 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); if (rc) { BNX2X_ERR("DMAE returned failure %d\n", rc); +#ifdef BNX2X_STOP_ON_ERROR bnx2x_panic(); +#endif } } @@ -5231,18 +5235,18 @@ static void bnx2x_eq_int(struct bnx2x *bp) case EVENT_RING_OPCODE_STOP_TRAFFIC: DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n"); + bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED); if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_TX_STOP)) break; - bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED); goto next_spqe; case EVENT_RING_OPCODE_START_TRAFFIC: DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n"); + bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_TX_START)) break; - bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); goto next_spqe; case EVENT_RING_OPCODE_FUNCTION_UPDATE: @@ -9352,6 +9356,10 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global) bnx2x_process_kill_chip_reset(bp, global); barrier(); + /* clear errors in PGB */ + if (!CHIP_IS_E1x(bp)) + REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f); + /* Recover after reset: */ /* MCP */ if (global && bnx2x_reset_mcp_comp(bp, val)) @@ -9706,11 +9714,10 @@ sp_rtnl_not_reset: &bp->sp_rtnl_state)) bnx2x_pf_set_vfs_vlan(bp); - if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) + if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) { bnx2x_dcbx_stop_hw_tx(bp); - - if (test_and_clear_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state)) bnx2x_dcbx_resume_hw_tx(bp); + } /* work which needs rtnl lock not-taken (as it takes the lock itself and * can be called from other contexts as well) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 5ecf267dc4cc..3efbb35267c8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -2864,6 +2864,17 @@ #define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ 0x9430 #define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_WRITE 0x9434 #define PGLUE_B_REG_INTERNAL_VFID_ENABLE 0x9438 +/* [W 7] Writing 1 to each bit in this register clears a corresponding error + * details register and enables logging new error details. Bit 0 - clears + * INCORRECT_RCV_DETAILS; Bit 1 - clears RX_ERR_DETAILS; Bit 2 - clears + * TX_ERR_WR_ADD_31_0 TX_ERR_WR_ADD_63_32 TX_ERR_WR_DETAILS + * TX_ERR_WR_DETAILS2 TX_ERR_RD_ADD_31_0 TX_ERR_RD_ADD_63_32 + * TX_ERR_RD_DETAILS TX_ERR_RD_DETAILS2 TX_ERR_WR_DETAILS_ICPL; Bit 3 - + * clears VF_LENGTH_VIOLATION_DETAILS. Bit 4 - clears + * VF_GRC_SPACE_VIOLATION_DETAILS. Bit 5 - clears RX_TCPL_ERR_DETAILS. Bit 6 + * - clears TCPL_IN_TWO_RCBS_DETAILS. */ +#define PGLUE_B_REG_LATCHED_ERRORS_CLR 0x943c + /* [R 9] Interrupt register #0 read */ #define PGLUE_B_REG_PGLUE_B_INT_STS 0x9298 /* [RC 9] Interrupt register #0 read clear */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 0216d592d0ce..2e46c28fc601 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -3114,6 +3114,11 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) { struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); + if (!IS_SRIOV(bp)) { + BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n"); + return -EINVAL; + } + DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", num_vfs_param, BNX2X_NR_VIRTFN(bp)); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 9199adf32d33..efa8a151d789 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -152,7 +152,7 @@ static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping) if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) { DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n"); *done = PFVF_STATUS_SUCCESS; - return 0; + return -EINVAL; } /* Write message address */ diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 00c5be8c55b8..f3dd93b4aeaa 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -8932,6 +8932,9 @@ static int tg3_chip_reset(struct tg3 *tp) void (*write_op)(struct tg3 *, u32, u32); int i, err; + if (!pci_device_is_present(tp->pdev)) + return -ENODEV; + tg3_nvram_lock(tp); tg3_ape_lock(tp, TG3_APE_LOCK_GRC); @@ -10629,10 +10632,8 @@ static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir) static ssize_t tg3_show_temp(struct device *dev, struct device_attribute *devattr, char *buf) { - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *netdev = pci_get_drvdata(pdev); - struct tg3 *tp = netdev_priv(netdev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + struct tg3 *tp = dev_get_drvdata(dev); u32 temperature; spin_lock_bh(&tp->lock); @@ -10650,29 +10651,25 @@ static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL, static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL, TG3_TEMP_MAX_OFFSET); -static struct attribute *tg3_attributes[] = { +static struct attribute *tg3_attrs[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_crit.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, NULL }; - -static const struct attribute_group tg3_group = { - .attrs = tg3_attributes, -}; +ATTRIBUTE_GROUPS(tg3); static void tg3_hwmon_close(struct tg3 *tp) { if (tp->hwmon_dev) { hwmon_device_unregister(tp->hwmon_dev); tp->hwmon_dev = NULL; - sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group); } } static void tg3_hwmon_open(struct tg3 *tp) { - int i, err; + int i; u32 size = 0; struct pci_dev *pdev = tp->pdev; struct tg3_ocir ocirs[TG3_SD_NUM_RECS]; @@ -10690,18 +10687,11 @@ static void tg3_hwmon_open(struct tg3 *tp) if (!size) return; - /* Register hwmon sysfs hooks */ - err = sysfs_create_group(&pdev->dev.kobj, &tg3_group); - if (err) { - dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n"); - return; - } - - tp->hwmon_dev = hwmon_device_register(&pdev->dev); + tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3", + tp, tg3_groups); if (IS_ERR(tp->hwmon_dev)) { tp->hwmon_dev = NULL; dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n"); - sysfs_remove_group(&pdev->dev.kobj, &tg3_group); } } @@ -11594,10 +11584,11 @@ static int tg3_close(struct net_device *dev) memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev)); memset(&tp->estats_prev, 0, sizeof(tp->estats_prev)); - tg3_power_down_prepare(tp); - - tg3_carrier_off(tp); + if (pci_device_is_present(tp->pdev)) { + tg3_power_down_prepare(tp); + tg3_carrier_off(tp); + } return 0; } @@ -13618,16 +13609,9 @@ static int tg3_hwtstamp_ioctl(struct net_device *dev, if (stmpconf.flags) return -EINVAL; - switch (stmpconf.tx_type) { - case HWTSTAMP_TX_ON: - tg3_flag_set(tp, TX_TSTAMP_EN); - break; - case HWTSTAMP_TX_OFF: - tg3_flag_clear(tp, TX_TSTAMP_EN); - break; - default: + if (stmpconf.tx_type != HWTSTAMP_TX_ON && + stmpconf.tx_type != HWTSTAMP_TX_OFF) return -ERANGE; - } switch (stmpconf.rx_filter) { case HWTSTAMP_FILTER_NONE: @@ -13689,6 +13673,11 @@ static int tg3_hwtstamp_ioctl(struct net_device *dev, tw32(TG3_RX_PTP_CTL, tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); + if (stmpconf.tx_type == HWTSTAMP_TX_ON) + tg3_flag_set(tp, TX_TSTAMP_EN); + else + tg3_flag_clear(tp, TX_TSTAMP_EN); + return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? -EFAULT : 0; } @@ -16514,6 +16503,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) /* Clear this out for sanity. */ tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); + /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */ + tw32(TG3PCI_REG_BASE_ADDR, 0); + pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &pci_state_reg); if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && @@ -17741,10 +17733,12 @@ static int tg3_suspend(struct device *device) struct pci_dev *pdev = to_pci_dev(device); struct net_device *dev = pci_get_drvdata(pdev); struct tg3 *tp = netdev_priv(dev); - int err; + int err = 0; + + rtnl_lock(); if (!netif_running(dev)) - return 0; + goto unlock; tg3_reset_task_cancel(tp); tg3_phy_stop(tp); @@ -17786,6 +17780,8 @@ out: tg3_phy_start(tp); } +unlock: + rtnl_unlock(); return err; } @@ -17794,10 +17790,12 @@ static int tg3_resume(struct device *device) struct pci_dev *pdev = to_pci_dev(device); struct net_device *dev = pci_get_drvdata(pdev); struct tg3 *tp = netdev_priv(dev); - int err; + int err = 0; + + rtnl_lock(); if (!netif_running(dev)) - return 0; + goto unlock; netif_device_attach(dev); @@ -17821,6 +17819,8 @@ out: if (!err) tg3_phy_start(tp); +unlock: + rtnl_unlock(); return err; } #endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index ecd2fb3ef695..6c9308850453 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -49,13 +49,15 @@ #include <asm/io.h> #include "cxgb4_uld.h" -#define FW_VERSION_MAJOR 1 -#define FW_VERSION_MINOR 4 -#define FW_VERSION_MICRO 0 +#define T4FW_VERSION_MAJOR 0x01 +#define T4FW_VERSION_MINOR 0x06 +#define T4FW_VERSION_MICRO 0x18 +#define T4FW_VERSION_BUILD 0x00 -#define FW_VERSION_MAJOR_T5 0 -#define FW_VERSION_MINOR_T5 0 -#define FW_VERSION_MICRO_T5 0 +#define T5FW_VERSION_MAJOR 0x01 +#define T5FW_VERSION_MINOR 0x08 +#define T5FW_VERSION_MICRO 0x1C +#define T5FW_VERSION_BUILD 0x00 #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__) @@ -240,6 +242,26 @@ struct pci_params { unsigned char width; }; +#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) +#define CHELSIO_CHIP_FPGA 0x100 +#define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf) +#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf) + +#define CHELSIO_T4 0x4 +#define CHELSIO_T5 0x5 + +enum chip_type { + T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), + T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2), + T4_FIRST_REV = T4_A1, + T4_LAST_REV = T4_A2, + + T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), + T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1), + T5_FIRST_REV = T5_A0, + T5_LAST_REV = T5_A1, +}; + struct adapter_params { struct tp_params tp; struct vpd_params vpd; @@ -259,7 +281,7 @@ struct adapter_params { unsigned char nports; /* # of ethernet ports */ unsigned char portvec; - unsigned char rev; /* chip revision */ + enum chip_type chip; /* chip code */ unsigned char offload; unsigned char bypass; @@ -267,6 +289,23 @@ struct adapter_params { unsigned int ofldq_wr_cred; }; +#include "t4fw_api.h" + +#define FW_VERSION(chip) ( \ + FW_HDR_FW_VER_MAJOR_GET(chip##FW_VERSION_MAJOR) | \ + FW_HDR_FW_VER_MINOR_GET(chip##FW_VERSION_MINOR) | \ + FW_HDR_FW_VER_MICRO_GET(chip##FW_VERSION_MICRO) | \ + FW_HDR_FW_VER_BUILD_GET(chip##FW_VERSION_BUILD)) +#define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf) + +struct fw_info { + u8 chip; + char *fs_name; + char *fw_mod_name; + struct fw_hdr fw_hdr; +}; + + struct trace_params { u32 data[TRACE_LEN / 4]; u32 mask[TRACE_LEN / 4]; @@ -512,25 +551,6 @@ struct sge { struct l2t_data; -#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) -#define CHELSIO_CHIP_VERSION(code) ((code) >> 4) -#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf) - -#define CHELSIO_T4 0x4 -#define CHELSIO_T5 0x5 - -enum chip_type { - T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 0), - T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), - T4_A3 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2), - T4_FIRST_REV = T4_A1, - T4_LAST_REV = T4_A3, - - T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), - T5_FIRST_REV = T5_A1, - T5_LAST_REV = T5_A1, -}; - #ifdef CONFIG_PCI_IOV /* T4 supports SRIOV on PF0-3 and T5 on PF0-7. However, the Serial @@ -715,12 +735,12 @@ enum { static inline int is_t5(enum chip_type chip) { - return (chip >= T5_FIRST_REV && chip <= T5_LAST_REV); + return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5; } static inline int is_t4(enum chip_type chip) { - return (chip >= T4_FIRST_REV && chip <= T4_LAST_REV); + return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4; } static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr) @@ -900,7 +920,11 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p); int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); unsigned int t4_flash_cfg_addr(struct adapter *adapter); int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size); -int t4_check_fw_version(struct adapter *adapter); +int t4_get_fw_version(struct adapter *adapter, u32 *vers); +int t4_get_tp_version(struct adapter *adapter, u32 *vers); +int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, + const u8 *fw_data, unsigned int fw_size, + struct fw_hdr *card_fw, enum dev_state state, int *reset); int t4_prep_adapter(struct adapter *adapter); int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); void t4_fatal_err(struct adapter *adapter); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 8b929eeecd2d..d6b12e035a7d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -276,9 +276,9 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = { { 0, } }; -#define FW_FNAME "cxgb4/t4fw.bin" +#define FW4_FNAME "cxgb4/t4fw.bin" #define FW5_FNAME "cxgb4/t5fw.bin" -#define FW_CFNAME "cxgb4/t4-config.txt" +#define FW4_CFNAME "cxgb4/t4-config.txt" #define FW5_CFNAME "cxgb4/t5-config.txt" MODULE_DESCRIPTION(DRV_DESC); @@ -286,7 +286,7 @@ MODULE_AUTHOR("Chelsio Communications"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(DRV_VERSION); MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl); -MODULE_FIRMWARE(FW_FNAME); +MODULE_FIRMWARE(FW4_FNAME); MODULE_FIRMWARE(FW5_FNAME); /* @@ -1071,72 +1071,6 @@ freeout: t4_free_sge_resources(adap); } /* - * Returns 0 if new FW was successfully loaded, a positive errno if a load was - * started but failed, and a negative errno if flash load couldn't start. - */ -static int upgrade_fw(struct adapter *adap) -{ - int ret; - u32 vers, exp_major; - const struct fw_hdr *hdr; - const struct firmware *fw; - struct device *dev = adap->pdev_dev; - char *fw_file_name; - - switch (CHELSIO_CHIP_VERSION(adap->chip)) { - case CHELSIO_T4: - fw_file_name = FW_FNAME; - exp_major = FW_VERSION_MAJOR; - break; - case CHELSIO_T5: - fw_file_name = FW5_FNAME; - exp_major = FW_VERSION_MAJOR_T5; - break; - default: - dev_err(dev, "Unsupported chip type, %x\n", adap->chip); - return -EINVAL; - } - - ret = request_firmware(&fw, fw_file_name, dev); - if (ret < 0) { - dev_err(dev, "unable to load firmware image %s, error %d\n", - fw_file_name, ret); - return ret; - } - - hdr = (const struct fw_hdr *)fw->data; - vers = ntohl(hdr->fw_ver); - if (FW_HDR_FW_VER_MAJOR_GET(vers) != exp_major) { - ret = -EINVAL; /* wrong major version, won't do */ - goto out; - } - - /* - * If the flash FW is unusable or we found something newer, load it. - */ - if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != exp_major || - vers > adap->params.fw_vers) { - dev_info(dev, "upgrading firmware ...\n"); - ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size, - /*force=*/false); - if (!ret) - dev_info(dev, - "firmware upgraded to version %pI4 from %s\n", - &hdr->fw_ver, fw_file_name); - else - dev_err(dev, "firmware upgrade failed! err=%d\n", -ret); - } else { - /* - * Tell our caller that we didn't upgrade the firmware. - */ - ret = -EINVAL; - } - -out: release_firmware(fw); - return ret; -} - -/* * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc. * The allocated memory is cleared. */ @@ -1415,7 +1349,7 @@ static int get_sset_count(struct net_device *dev, int sset) static int get_regs_len(struct net_device *dev) { struct adapter *adap = netdev2adap(dev); - if (is_t4(adap->chip)) + if (is_t4(adap->params.chip)) return T4_REGMAP_SIZE; else return T5_REGMAP_SIZE; @@ -1499,7 +1433,7 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats, data += sizeof(struct port_stats) / sizeof(u64); collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); data += sizeof(struct queue_port_stats) / sizeof(u64); - if (!is_t4(adapter->chip)) { + if (!is_t4(adapter->params.chip)) { t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7)); val1 = t4_read_reg(adapter, SGE_STAT_TOTAL); val2 = t4_read_reg(adapter, SGE_STAT_MATCH); @@ -1521,8 +1455,8 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats, */ static inline unsigned int mk_adap_vers(const struct adapter *ap) { - return CHELSIO_CHIP_VERSION(ap->chip) | - (CHELSIO_CHIP_RELEASE(ap->chip) << 10) | (1 << 16); + return CHELSIO_CHIP_VERSION(ap->params.chip) | + (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16); } static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start, @@ -2189,7 +2123,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs, static const unsigned int *reg_ranges; int arr_size = 0, buf_size = 0; - if (is_t4(ap->chip)) { + if (is_t4(ap->params.chip)) { reg_ranges = &t4_reg_ranges[0]; arr_size = ARRAY_SIZE(t4_reg_ranges); buf_size = T4_REGMAP_SIZE; @@ -2967,7 +2901,7 @@ static int setup_debugfs(struct adapter *adap) size = t4_read_reg(adap, MA_EDRAM1_BAR); add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size)); } - if (is_t4(adap->chip)) { + if (is_t4(adap->params.chip)) { size = t4_read_reg(adap, MA_EXT_MEMORY_BAR); if (i & EXT_MEM_ENABLE) add_debugfs_mem(adap, "mc", MEM_MC, @@ -3419,7 +3353,7 @@ unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo) v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2); - if (is_t4(adap->chip)) { + if (is_t4(adap->params.chip)) { lp_count = G_LP_COUNT(v1); hp_count = G_HP_COUNT(v1); } else { @@ -3588,7 +3522,7 @@ static void drain_db_fifo(struct adapter *adap, int usecs) do { v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2); - if (is_t4(adap->chip)) { + if (is_t4(adap->params.chip)) { lp_count = G_LP_COUNT(v1); hp_count = G_HP_COUNT(v1); } else { @@ -3708,7 +3642,7 @@ static void process_db_drop(struct work_struct *work) adap = container_of(work, struct adapter, db_drop_task); - if (is_t4(adap->chip)) { + if (is_t4(adap->params.chip)) { disable_dbs(adap); notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP); drain_db_fifo(adap, 1); @@ -3753,7 +3687,7 @@ static void process_db_drop(struct work_struct *work) void t4_db_full(struct adapter *adap) { - if (is_t4(adap->chip)) { + if (is_t4(adap->params.chip)) { t4_set_reg_field(adap, SGE_INT_ENABLE3, DBFIFO_HP_INT | DBFIFO_LP_INT, 0); queue_work(workq, &adap->db_full_task); @@ -3762,7 +3696,7 @@ void t4_db_full(struct adapter *adap) void t4_db_dropped(struct adapter *adap) { - if (is_t4(adap->chip)) + if (is_t4(adap->params.chip)) queue_work(workq, &adap->db_drop_task); } @@ -3789,7 +3723,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld) lli.nchan = adap->params.nports; lli.nports = adap->params.nports; lli.wr_cred = adap->params.ofldq_wr_cred; - lli.adapter_type = adap->params.rev; + lli.adapter_type = adap->params.chip; lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2)); lli.udb_density = 1 << QUEUESPERPAGEPF0_GET( t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >> @@ -4483,7 +4417,7 @@ static void setup_memwin(struct adapter *adap) u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base; bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */ - if (is_t4(adap->chip)) { + if (is_t4(adap->params.chip)) { mem_win0_base = bar0 + MEMWIN0_BASE; mem_win1_base = bar0 + MEMWIN1_BASE; mem_win2_base = bar0 + MEMWIN2_BASE; @@ -4668,8 +4602,10 @@ static int adap_init0_config(struct adapter *adapter, int reset) const struct firmware *cf; unsigned long mtype = 0, maddr = 0; u32 finiver, finicsum, cfcsum; - int ret, using_flash; + int ret; + int config_issued = 0; char *fw_config_file, fw_config_file_path[256]; + char *config_name = NULL; /* * Reset device if necessary. @@ -4686,9 +4622,9 @@ static int adap_init0_config(struct adapter *adapter, int reset) * then use that. Otherwise, use the configuration file stored * in the adapter flash ... */ - switch (CHELSIO_CHIP_VERSION(adapter->chip)) { + switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) { case CHELSIO_T4: - fw_config_file = FW_CFNAME; + fw_config_file = FW4_CFNAME; break; case CHELSIO_T5: fw_config_file = FW5_CFNAME; @@ -4702,13 +4638,16 @@ static int adap_init0_config(struct adapter *adapter, int reset) ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev); if (ret < 0) { - using_flash = 1; + config_name = "On FLASH"; mtype = FW_MEMTYPE_CF_FLASH; maddr = t4_flash_cfg_addr(adapter); } else { u32 params[7], val[7]; - using_flash = 0; + sprintf(fw_config_file_path, + "/lib/firmware/%s", fw_config_file); + config_name = fw_config_file_path; + if (cf->size >= FLASH_CFG_MAX_SIZE) ret = -ENOMEM; else { @@ -4776,6 +4715,26 @@ static int adap_init0_config(struct adapter *adapter, int reset) FW_LEN16(caps_cmd)); ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd); + + /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware + * Configuration File in FLASH), our last gasp effort is to use the + * Firmware Configuration File which is embedded in the firmware. A + * very few early versions of the firmware didn't have one embedded + * but we can ignore those. + */ + if (ret == -ENOENT) { + memset(&caps_cmd, 0, sizeof(caps_cmd)); + caps_cmd.op_to_write = + htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST | + FW_CMD_READ); + caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); + ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, + sizeof(caps_cmd), &caps_cmd); + config_name = "Firmware Default"; + } + + config_issued = 1; if (ret < 0) goto bye; @@ -4816,7 +4775,6 @@ static int adap_init0_config(struct adapter *adapter, int reset) if (ret < 0) goto bye; - sprintf(fw_config_file_path, "/lib/firmware/%s", fw_config_file); /* * Return successfully and note that we're operating with parameters * not supplied by the driver, rather than from hard-wired @@ -4824,11 +4782,8 @@ static int adap_init0_config(struct adapter *adapter, int reset) */ adapter->flags |= USING_SOFT_PARAMS; dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\ - "Configuration File %s, version %#x, computed checksum %#x\n", - (using_flash - ? "in device FLASH" - : fw_config_file_path), - finiver, cfcsum); + "Configuration File \"%s\", version %#x, computed checksum %#x\n", + config_name, finiver, cfcsum); return 0; /* @@ -4837,9 +4792,9 @@ static int adap_init0_config(struct adapter *adapter, int reset) * want to issue a warning since this is fairly common.) */ bye: - if (ret != -ENOENT) - dev_warn(adapter->pdev_dev, "Configuration file error %d\n", - -ret); + if (config_issued && ret != -ENOENT) + dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n", + config_name, -ret); return ret; } @@ -5086,6 +5041,47 @@ bye: return ret; } +static struct fw_info fw_info_array[] = { + { + .chip = CHELSIO_T4, + .fs_name = FW4_CFNAME, + .fw_mod_name = FW4_FNAME, + .fw_hdr = { + .chip = FW_HDR_CHIP_T4, + .fw_ver = __cpu_to_be32(FW_VERSION(T4)), + .intfver_nic = FW_INTFVER(T4, NIC), + .intfver_vnic = FW_INTFVER(T4, VNIC), + .intfver_ri = FW_INTFVER(T4, RI), + .intfver_iscsi = FW_INTFVER(T4, ISCSI), + .intfver_fcoe = FW_INTFVER(T4, FCOE), + }, + }, { + .chip = CHELSIO_T5, + .fs_name = FW5_CFNAME, + .fw_mod_name = FW5_FNAME, + .fw_hdr = { + .chip = FW_HDR_CHIP_T5, + .fw_ver = __cpu_to_be32(FW_VERSION(T5)), + .intfver_nic = FW_INTFVER(T5, NIC), + .intfver_vnic = FW_INTFVER(T5, VNIC), + .intfver_ri = FW_INTFVER(T5, RI), + .intfver_iscsi = FW_INTFVER(T5, ISCSI), + .intfver_fcoe = FW_INTFVER(T5, FCOE), + }, + } +}; + +static struct fw_info *find_fw_info(int chip) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) { + if (fw_info_array[i].chip == chip) + return &fw_info_array[i]; + } + return NULL; +} + /* * Phase 0 of initialization: contact FW, obtain config, perform basic init. */ @@ -5123,44 +5119,54 @@ static int adap_init0(struct adapter *adap) * later reporting and B. to warn if the currently loaded firmware * is excessively mismatched relative to the driver.) */ - ret = t4_check_fw_version(adap); - - /* The error code -EFAULT is returned by t4_check_fw_version() if - * firmware on adapter < supported firmware. If firmware on adapter - * is too old (not supported by driver) and we're the MASTER_PF set - * adapter state to DEV_STATE_UNINIT to force firmware upgrade - * and reinitialization. - */ - if ((adap->flags & MASTER_PF) && ret == -EFAULT) - state = DEV_STATE_UNINIT; + t4_get_fw_version(adap, &adap->params.fw_vers); + t4_get_tp_version(adap, &adap->params.tp_vers); if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) { - if (ret == -EINVAL || ret == -EFAULT || ret > 0) { - if (upgrade_fw(adap) >= 0) { - /* - * Note that the chip was reset as part of the - * firmware upgrade so we don't reset it again - * below and grab the new firmware version. - */ - reset = 0; - ret = t4_check_fw_version(adap); - } else - if (ret == -EFAULT) { - /* - * Firmware is old but still might - * work if we force reinitialization - * of the adapter. Ignoring FW upgrade - * failure. - */ - dev_warn(adap->pdev_dev, - "Ignoring firmware upgrade " - "failure, and forcing driver " - "to reinitialize the " - "adapter.\n"); - ret = 0; - } + struct fw_info *fw_info; + struct fw_hdr *card_fw; + const struct firmware *fw; + const u8 *fw_data = NULL; + unsigned int fw_size = 0; + + /* This is the firmware whose headers the driver was compiled + * against + */ + fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip)); + if (fw_info == NULL) { + dev_err(adap->pdev_dev, + "unable to get firmware info for chip %d.\n", + CHELSIO_CHIP_VERSION(adap->params.chip)); + return -EINVAL; } + + /* allocate memory to read the header of the firmware on the + * card + */ + card_fw = t4_alloc_mem(sizeof(*card_fw)); + + /* Get FW from from /lib/firmware/ */ + ret = request_firmware(&fw, fw_info->fw_mod_name, + adap->pdev_dev); + if (ret < 0) { + dev_err(adap->pdev_dev, + "unable to load firmware image %s, error %d\n", + fw_info->fw_mod_name, ret); + } else { + fw_data = fw->data; + fw_size = fw->size; + } + + /* upgrade FW logic */ + ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw, + state, &reset); + + /* Cleaning up */ + if (fw != NULL) + release_firmware(fw); + t4_free_mem(card_fw); + if (ret < 0) - return ret; + goto bye; } /* @@ -5245,7 +5251,7 @@ static int adap_init0(struct adapter *adap) if (ret == -ENOENT) { dev_info(adap->pdev_dev, "No Configuration File present " - "on adapter. Using hard-wired " + "on adapter. Using hard-wired " "configuration parameters.\n"); ret = adap_init0_no_config(adap, reset); } @@ -5787,7 +5793,7 @@ static void print_port_info(const struct net_device *dev) netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n", adap->params.vpd.id, - CHELSIO_CHIP_RELEASE(adap->params.rev), buf, + CHELSIO_CHIP_RELEASE(adap->params.chip), buf, is_offload(adap) ? "R" : "", adap->params.pci.width, spd, (adap->flags & USING_MSIX) ? " MSI-X" : (adap->flags & USING_MSI) ? " MSI" : ""); @@ -5910,7 +5916,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto out_unmap_bar0; - if (!is_t4(adapter->chip)) { + if (!is_t4(adapter->params.chip)) { s_qpp = QUEUESPERPAGEPF1 * adapter->fn; qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp); @@ -6064,7 +6070,7 @@ sriov: out_free_dev: free_some_resources(adapter); out_unmap_bar: - if (!is_t4(adapter->chip)) + if (!is_t4(adapter->params.chip)) iounmap(adapter->bar2); out_unmap_bar0: iounmap(adapter->regs); @@ -6116,7 +6122,7 @@ static void remove_one(struct pci_dev *pdev) free_some_resources(adapter); iounmap(adapter->regs); - if (!is_t4(adapter->chip)) + if (!is_t4(adapter->params.chip)) iounmap(adapter->bar2); kfree(adapter); pci_disable_pcie_error_reporting(pdev); diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index ac311f5f3eb9..cc380c36e1a8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -509,7 +509,7 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) u32 val; if (q->pend_cred >= 8) { val = PIDX(q->pend_cred / 8); - if (!is_t4(adap->chip)) + if (!is_t4(adap->params.chip)) val |= DBTYPE(1); wmb(); t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO(1) | @@ -847,7 +847,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) wmb(); /* write descriptors before telling HW */ spin_lock(&q->db_lock); if (!q->db_disabled) { - if (is_t4(adap->chip)) { + if (is_t4(adap->params.chip)) { t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), QID(q->cntxt_id) | PIDX(n)); } else { @@ -1596,7 +1596,7 @@ static noinline int handle_trace_pkt(struct adapter *adap, return 0; } - if (is_t4(adap->chip)) + if (is_t4(adap->params.chip)) __skb_pull(skb, sizeof(struct cpl_trace_pkt)); else __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt)); @@ -1661,7 +1661,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, const struct cpl_rx_pkt *pkt; struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); struct sge *s = &q->adap->sge; - int cpl_trace_pkt = is_t4(q->adap->chip) ? + int cpl_trace_pkt = is_t4(q->adap->params.chip) ? CPL_TRACE_PKT : CPL_TRACE_PKT_T5; if (unlikely(*(u8 *)rsp == cpl_trace_pkt)) @@ -2182,7 +2182,7 @@ err: static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) { q->cntxt_id = id; - if (!is_t4(adap->chip)) { + if (!is_t4(adap->params.chip)) { unsigned int s_qpp; unsigned short udb_density; unsigned long qpshift; @@ -2641,7 +2641,7 @@ static int t4_sge_init_hard(struct adapter *adap) * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows * and generate an interrupt when this occurs so we can recover. */ - if (is_t4(adap->chip)) { + if (is_t4(adap->params.chip)) { t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS, V_HP_INT_THRESH(M_HP_INT_THRESH) | V_LP_INT_THRESH(M_LP_INT_THRESH), diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 4cbb2f9850be..74a6fce5a15a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -296,7 +296,7 @@ int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len; u32 mc_bist_status_rdata, mc_bist_data_pattern; - if (is_t4(adap->chip)) { + if (is_t4(adap->params.chip)) { mc_bist_cmd = MC_BIST_CMD; mc_bist_cmd_addr = MC_BIST_CMD_ADDR; mc_bist_cmd_len = MC_BIST_CMD_LEN; @@ -349,7 +349,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len; u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata; - if (is_t4(adap->chip)) { + if (is_t4(adap->params.chip)) { edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx); edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx); edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx); @@ -402,7 +402,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir) { int i; - u32 win_pf = is_t4(adap->chip) ? 0 : V_PFNUM(adap->fn); + u32 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn); /* * Setup offset into PCIE memory window. Address must be a @@ -863,104 +863,169 @@ unlock: } /** - * get_fw_version - read the firmware version + * t4_get_fw_version - read the firmware version * @adapter: the adapter * @vers: where to place the version * * Reads the FW version from flash. */ -static int get_fw_version(struct adapter *adapter, u32 *vers) +int t4_get_fw_version(struct adapter *adapter, u32 *vers) { - return t4_read_flash(adapter, adapter->params.sf_fw_start + - offsetof(struct fw_hdr, fw_ver), 1, vers, 0); + return t4_read_flash(adapter, FLASH_FW_START + + offsetof(struct fw_hdr, fw_ver), 1, + vers, 0); } /** - * get_tp_version - read the TP microcode version + * t4_get_tp_version - read the TP microcode version * @adapter: the adapter * @vers: where to place the version * * Reads the TP microcode version from flash. */ -static int get_tp_version(struct adapter *adapter, u32 *vers) +int t4_get_tp_version(struct adapter *adapter, u32 *vers) { - return t4_read_flash(adapter, adapter->params.sf_fw_start + + return t4_read_flash(adapter, FLASH_FW_START + offsetof(struct fw_hdr, tp_microcode_ver), 1, vers, 0); } -/** - * t4_check_fw_version - check if the FW is compatible with this driver - * @adapter: the adapter - * - * Checks if an adapter's FW is compatible with the driver. Returns 0 - * if there's exact match, a negative error if the version could not be - * read or there's a major version mismatch, and a positive value if the - * expected major version is found but there's a minor version mismatch. +/* Is the given firmware API compatible with the one the driver was compiled + * with? */ -int t4_check_fw_version(struct adapter *adapter) +static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2) { - u32 api_vers[2]; - int ret, major, minor, micro; - int exp_major, exp_minor, exp_micro; - ret = get_fw_version(adapter, &adapter->params.fw_vers); - if (!ret) - ret = get_tp_version(adapter, &adapter->params.tp_vers); - if (!ret) - ret = t4_read_flash(adapter, adapter->params.sf_fw_start + - offsetof(struct fw_hdr, intfver_nic), - 2, api_vers, 1); - if (ret) - return ret; + /* short circuit if it's the exact same firmware version */ + if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) + return 1; - major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers); - minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers); - micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers); +#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) + if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && + SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe)) + return 1; +#undef SAME_INTF - switch (CHELSIO_CHIP_VERSION(adapter->chip)) { - case CHELSIO_T4: - exp_major = FW_VERSION_MAJOR; - exp_minor = FW_VERSION_MINOR; - exp_micro = FW_VERSION_MICRO; - break; - case CHELSIO_T5: - exp_major = FW_VERSION_MAJOR_T5; - exp_minor = FW_VERSION_MINOR_T5; - exp_micro = FW_VERSION_MICRO_T5; - break; - default: - dev_err(adapter->pdev_dev, "Unsupported chip type, %x\n", - adapter->chip); - return -EINVAL; - } + return 0; +} - memcpy(adapter->params.api_vers, api_vers, - sizeof(adapter->params.api_vers)); +/* The firmware in the filesystem is usable, but should it be installed? + * This routine explains itself in detail if it indicates the filesystem + * firmware should be installed. + */ +static int should_install_fs_fw(struct adapter *adap, int card_fw_usable, + int k, int c) +{ + const char *reason; - if (major < exp_major || (major == exp_major && minor < exp_minor) || - (major == exp_major && minor == exp_minor && micro < exp_micro)) { - dev_err(adapter->pdev_dev, - "Card has firmware version %u.%u.%u, minimum " - "supported firmware is %u.%u.%u.\n", major, minor, - micro, exp_major, exp_minor, exp_micro); - return -EFAULT; + if (!card_fw_usable) { + reason = "incompatible or unusable"; + goto install; } - if (major != exp_major) { /* major mismatch - fail */ - dev_err(adapter->pdev_dev, - "card FW has major version %u, driver wants %u\n", - major, exp_major); - return -EINVAL; + if (k > c) { + reason = "older than the version supported with this driver"; + goto install; } - if (minor == exp_minor && micro == exp_micro) - return 0; /* perfect match */ + return 0; + +install: + dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, " + "installing firmware %u.%u.%u.%u on card.\n", + FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c), + FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), reason, + FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k), + FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k)); - /* Minor/micro version mismatch. Report it but often it's OK. */ return 1; } +int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, + const u8 *fw_data, unsigned int fw_size, + struct fw_hdr *card_fw, enum dev_state state, + int *reset) +{ + int ret, card_fw_usable, fs_fw_usable; + const struct fw_hdr *fs_fw; + const struct fw_hdr *drv_fw; + + drv_fw = &fw_info->fw_hdr; + + /* Read the header of the firmware on the card */ + ret = -t4_read_flash(adap, FLASH_FW_START, + sizeof(*card_fw) / sizeof(uint32_t), + (uint32_t *)card_fw, 1); + if (ret == 0) { + card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw); + } else { + dev_err(adap->pdev_dev, + "Unable to read card's firmware header: %d\n", ret); + card_fw_usable = 0; + } + + if (fw_data != NULL) { + fs_fw = (const void *)fw_data; + fs_fw_usable = fw_compatible(drv_fw, fs_fw); + } else { + fs_fw = NULL; + fs_fw_usable = 0; + } + + if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver && + (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) { + /* Common case: the firmware on the card is an exact match and + * the filesystem one is an exact match too, or the filesystem + * one is absent/incompatible. + */ + } else if (fs_fw_usable && state == DEV_STATE_UNINIT && + should_install_fs_fw(adap, card_fw_usable, + be32_to_cpu(fs_fw->fw_ver), + be32_to_cpu(card_fw->fw_ver))) { + ret = -t4_fw_upgrade(adap, adap->mbox, fw_data, + fw_size, 0); + if (ret != 0) { + dev_err(adap->pdev_dev, + "failed to install firmware: %d\n", ret); + goto bye; + } + + /* Installed successfully, update the cached header too. */ + memcpy(card_fw, fs_fw, sizeof(*card_fw)); + card_fw_usable = 1; + *reset = 0; /* already reset as part of load_fw */ + } + + if (!card_fw_usable) { + uint32_t d, c, k; + + d = be32_to_cpu(drv_fw->fw_ver); + c = be32_to_cpu(card_fw->fw_ver); + k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0; + + dev_err(adap->pdev_dev, "Cannot find a usable firmware: " + "chip state %d, " + "driver compiled with %d.%d.%d.%d, " + "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n", + state, + FW_HDR_FW_VER_MAJOR_GET(d), FW_HDR_FW_VER_MINOR_GET(d), + FW_HDR_FW_VER_MICRO_GET(d), FW_HDR_FW_VER_BUILD_GET(d), + FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c), + FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), + FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k), + FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k)); + ret = EINVAL; + goto bye; + } + + /* We're using whatever's on the card and it's known to be good. */ + adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver); + adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver); + +bye: + return ret; +} + /** * t4_flash_erase_sectors - erase a range of flash sectors * @adapter: the adapter @@ -1368,7 +1433,7 @@ static void pcie_intr_handler(struct adapter *adapter) PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, pcie_port_intr_info) + t4_handle_intr_status(adapter, PCIE_INT_CAUSE, - is_t4(adapter->chip) ? + is_t4(adapter->params.chip) ? pcie_intr_info : t5_pcie_intr_info); if (fat) @@ -1782,7 +1847,7 @@ static void xgmac_intr_handler(struct adapter *adap, int port) { u32 v, int_cause_reg; - if (is_t4(adap->chip)) + if (is_t4(adap->params.chip)) int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE); else int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE); @@ -2250,7 +2315,7 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) #define GET_STAT(name) \ t4_read_reg64(adap, \ - (is_t4(adap->chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \ + (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \ T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L))) #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) @@ -2332,7 +2397,7 @@ void t4_wol_magic_enable(struct adapter *adap, unsigned int port, { u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg; - if (is_t4(adap->chip)) { + if (is_t4(adap->params.chip)) { mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO); mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI); port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2); @@ -2374,7 +2439,7 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, int i; u32 port_cfg_reg; - if (is_t4(adap->chip)) + if (is_t4(adap->params.chip)) port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2); else port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2); @@ -2387,7 +2452,7 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, return -EINVAL; #define EPIO_REG(name) \ - (is_t4(adap->chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \ + (is_t4(adap->params.chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \ T5_PORT_REG(port, MAC_PORT_EPIO_##name)) t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); @@ -2474,7 +2539,7 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len) { int i, off; - u32 win_pf = is_t4(adap->chip) ? 0 : V_PFNUM(adap->fn); + u32 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn); /* Align on a 2KB boundary. */ @@ -3306,7 +3371,7 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, int i, ret; struct fw_vi_mac_cmd c; struct fw_vi_mac_exact *p; - unsigned int max_naddr = is_t4(adap->chip) ? + unsigned int max_naddr = is_t4(adap->params.chip) ? NUM_MPS_CLS_SRAM_L_INSTANCES : NUM_MPS_T5_CLS_SRAM_L_INSTANCES; @@ -3368,7 +3433,7 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, int ret, mode; struct fw_vi_mac_cmd c; struct fw_vi_mac_exact *p = c.u.exact; - unsigned int max_mac_addr = is_t4(adap->chip) ? + unsigned int max_mac_addr = is_t4(adap->params.chip) ? NUM_MPS_CLS_SRAM_L_INSTANCES : NUM_MPS_T5_CLS_SRAM_L_INSTANCES; @@ -3699,13 +3764,14 @@ int t4_prep_adapter(struct adapter *adapter) { int ret, ver; uint16_t device_id; + u32 pl_rev; ret = t4_wait_dev_ready(adapter); if (ret < 0) return ret; get_pci_mode(adapter, &adapter->params.pci); - adapter->params.rev = t4_read_reg(adapter, PL_REV); + pl_rev = G_REV(t4_read_reg(adapter, PL_REV)); ret = get_flash_params(adapter); if (ret < 0) { @@ -3717,14 +3783,13 @@ int t4_prep_adapter(struct adapter *adapter) */ pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id); ver = device_id >> 12; + adapter->params.chip = 0; switch (ver) { case CHELSIO_T4: - adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T4, - adapter->params.rev); + adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev); break; case CHELSIO_T5: - adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T5, - adapter->params.rev); + adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev); break; default: dev_err(adapter->pdev_dev, "Device %d is not supported\n", @@ -3732,9 +3797,6 @@ int t4_prep_adapter(struct adapter *adapter) return -EINVAL; } - /* Reassign the updated revision field */ - adapter->params.rev = adapter->chip; - init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); /* diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index ef146c0ba481..0a8205d69d2c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -1092,6 +1092,11 @@ #define PL_REV 0x1943c +#define S_REV 0 +#define M_REV 0xfU +#define V_REV(x) ((x) << S_REV) +#define G_REV(x) (((x) >> S_REV) & M_REV) + #define LE_DB_CONFIG 0x19c04 #define HASHEN 0x00100000U @@ -1199,4 +1204,13 @@ #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR) #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx) +#define A_PL_VF_REV 0x4 +#define A_PL_VF_WHOAMI 0x0 +#define A_PL_VF_REVISION 0x8 + +#define S_CHIPID 4 +#define M_CHIPID 0xfU +#define V_CHIPID(x) ((x) << S_CHIPID) +#define G_CHIPID(x) (((x) >> S_CHIPID) & M_CHIPID) + #endif /* __T4_REGS_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 6f77ac487743..74fea74ce0aa 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -2157,7 +2157,7 @@ struct fw_debug_cmd { struct fw_hdr { u8 ver; - u8 reserved1; + u8 chip; /* terminator chip type */ __be16 len512; /* bin length in units of 512-bytes */ __be32 fw_ver; /* firmware version */ __be32 tp_microcode_ver; @@ -2176,6 +2176,11 @@ struct fw_hdr { __be32 reserved6[23]; }; +enum fw_hdr_chip { + FW_HDR_CHIP_T4, + FW_HDR_CHIP_T5 +}; + #define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff) #define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff) #define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff) diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h index be5c7ef6ca93..68eaa9c88c7d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h @@ -344,7 +344,6 @@ struct adapter { unsigned long registered_device_map; unsigned long open_device_map; unsigned long flags; - enum chip_type chip; struct adapter_params params; /* queue and interrupt resources */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 5f90ec5f7519..0899c0983594 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -1064,7 +1064,7 @@ static inline unsigned int mk_adap_vers(const struct adapter *adapter) /* * Chip version 4, revision 0x3f (cxgb4vf). */ - return CHELSIO_CHIP_VERSION(adapter->chip) | (0x3f << 10); + return CHELSIO_CHIP_VERSION(adapter->params.chip) | (0x3f << 10); } /* @@ -1551,9 +1551,13 @@ static void cxgb4vf_get_regs(struct net_device *dev, reg_block_dump(adapter, regbuf, T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST, T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST); + + /* T5 adds new registers in the PL Register map. + */ reg_block_dump(adapter, regbuf, T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST, - T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_LAST); + T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip) + ? A_PL_VF_WHOAMI : A_PL_VF_REVISION)); reg_block_dump(adapter, regbuf, T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST, T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST); @@ -2087,6 +2091,7 @@ static int adap_init0(struct adapter *adapter) unsigned int ethqsets; int err; u32 param, val = 0; + unsigned int chipid; /* * Wait for the device to become ready before proceeding ... @@ -2114,12 +2119,14 @@ static int adap_init0(struct adapter *adapter) return err; } + adapter->params.chip = 0; switch (adapter->pdev->device >> 12) { case CHELSIO_T4: - adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T4, 0); + adapter->params.chip = CHELSIO_CHIP_CODE(CHELSIO_T4, 0); break; case CHELSIO_T5: - adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T5, 0); + chipid = G_REV(t4_read_reg(adapter, A_PL_VF_REV)); + adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid); break; } diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 8475c4cda9e4..0a89963c48ce 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -537,7 +537,7 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl) */ if (fl->pend_cred >= FL_PER_EQ_UNIT) { val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT); - if (!is_t4(adapter->chip)) + if (!is_t4(adapter->params.chip)) val |= DBTYPE(1); wmb(); t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index 53cbfed21d0b..61362450d05b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h @@ -39,21 +39,28 @@ #include "../cxgb4/t4fw_api.h" #define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) -#define CHELSIO_CHIP_VERSION(code) ((code) >> 4) +#define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf) #define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf) +/* All T4 and later chips have their PCI-E Device IDs encoded as 0xVFPP where: + * + * V = "4" for T4; "5" for T5, etc. or + * = "a" for T4 FPGA; "b" for T4 FPGA, etc. + * F = "0" for PF 0..3; "4".."7" for PF4..7; and "8" for VFs + * PP = adapter product designation + */ #define CHELSIO_T4 0x4 #define CHELSIO_T5 0x5 enum chip_type { - T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 0), - T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), - T4_A3 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2), + T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), + T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2), T4_FIRST_REV = T4_A1, - T4_LAST_REV = T4_A3, + T4_LAST_REV = T4_A2, - T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), - T5_FIRST_REV = T5_A1, + T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), + T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1), + T5_FIRST_REV = T5_A0, T5_LAST_REV = T5_A1, }; @@ -203,6 +210,7 @@ struct adapter_params { struct vpd_params vpd; /* Vital Product Data */ struct rss_params rss; /* Receive Side Scaling */ struct vf_resources vfres; /* Virtual Function Resource limits */ + enum chip_type chip; /* chip code */ u8 nports; /* # of Ethernet "ports" */ }; @@ -253,7 +261,7 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd, static inline int is_t4(enum chip_type chip) { - return (chip >= T4_FIRST_REV && chip <= T4_LAST_REV); + return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4; } int t4vf_wait_dev_ready(struct adapter *); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 9f96dc3bb112..d958c44341b5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -1027,7 +1027,7 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free, unsigned nfilters = 0; unsigned int rem = naddr; struct fw_vi_mac_cmd cmd, rpl; - unsigned int max_naddr = is_t4(adapter->chip) ? + unsigned int max_naddr = is_t4(adapter->params.chip) ? NUM_MPS_CLS_SRAM_L_INSTANCES : NUM_MPS_T5_CLS_SRAM_L_INSTANCES; @@ -1121,7 +1121,7 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid, struct fw_vi_mac_exact *p = &cmd.u.exact[0]; size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, u.exact[1]), 16); - unsigned int max_naddr = is_t4(adapter->chip) ? + unsigned int max_naddr = is_t4(adapter->params.chip) ? NUM_MPS_CLS_SRAM_L_INSTANCES : NUM_MPS_T5_CLS_SRAM_L_INSTANCES; diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index f4825db5d179..5878df619b53 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -503,6 +503,7 @@ struct be_adapter { }; #define be_physfn(adapter) (!adapter->virtfn) +#define be_virtfn(adapter) (adapter->virtfn) #define sriov_enabled(adapter) (adapter->num_vfs > 0) #define sriov_want(adapter) (be_physfn(adapter) && \ (num_vfs || pci_num_vf(adapter->pdev))) diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 7fb0edfe3d24..e0e8bc1ef14c 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1032,6 +1032,13 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, } else { req->hdr.version = 2; req->page_size = 1; /* 1 for 4K */ + + /* coalesce-wm field in this cmd is not relevant to Lancer. + * Lancer uses COMMON_MODIFY_CQ to set this field + */ + if (!lancer_chip(adapter)) + AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm, + ctxt, coalesce_wm); AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt, no_delay); AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, @@ -1758,7 +1765,7 @@ err: /* Uses sycnhronous mcc */ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, - u32 num, bool untagged, bool promiscuous) + u32 num, bool promiscuous) { struct be_mcc_wrb *wrb; struct be_cmd_req_vlan_config *req; @@ -1778,7 +1785,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, req->interface_id = if_id; req->promiscuous = promiscuous; - req->untagged = untagged; + req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0; req->num_vlan = num; if (!promiscuous) { memcpy(req->normal_vlan, vtag_array, @@ -1847,7 +1854,19 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN); } + if ((req->if_flags_mask & cpu_to_le32(be_if_cap_flags(adapter))) != + req->if_flags_mask) { + dev_warn(&adapter->pdev->dev, + "Cannot set rx filter flags 0x%x\n", + req->if_flags_mask); + dev_warn(&adapter->pdev->dev, + "Interface is capable of 0x%x flags only\n", + be_if_cap_flags(adapter)); + } + req->if_flags_mask &= cpu_to_le32(be_if_cap_flags(adapter)); + status = be_mcc_notify_wait(adapter); + err: spin_unlock_bh(&adapter->mcc_lock); return status; diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index edf3e8a0ff83..0075686276aa 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -1984,7 +1984,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver, char *fw_on_flash); int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num); int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, - u32 num, bool untagged, bool promiscuous); + u32 num, bool promiscuous); int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status); int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc); int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc); diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h index 3e2162121601..dc88782185f2 100644 --- a/drivers/net/ethernet/emulex/benet/be_hw.h +++ b/drivers/net/ethernet/emulex/benet/be_hw.h @@ -64,6 +64,9 @@ #define SLIPORT_ERROR_NO_RESOURCE1 0x2 #define SLIPORT_ERROR_NO_RESOURCE2 0x9 +#define SLIPORT_ERROR_FW_RESET1 0x2 +#define SLIPORT_ERROR_FW_RESET2 0x0 + /********* Memory BAR register ************/ #define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc /* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index eaecaadfa8c5..0fde69d5cb6a 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1079,7 +1079,7 @@ static int be_vid_config(struct be_adapter *adapter) vids[num++] = cpu_to_le16(i); status = be_cmd_vlan_config(adapter, adapter->if_handle, - vids, num, 1, 0); + vids, num, 0); if (status) { /* Set to VLAN promisc mode as setting VLAN filter failed */ @@ -2464,8 +2464,16 @@ void be_detect_error(struct be_adapter *adapter) */ if (sliport_status & SLIPORT_STATUS_ERR_MASK) { adapter->hw_error = true; - dev_err(&adapter->pdev->dev, - "Error detected in the card\n"); + /* Do not log error messages if its a FW reset */ + if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 && + sliport_err2 == SLIPORT_ERROR_FW_RESET2) { + dev_info(&adapter->pdev->dev, + "Firmware update in progress\n"); + return; + } else { + dev_err(&adapter->pdev->dev, + "Error detected in the card\n"); + } } if (sliport_status & SLIPORT_STATUS_ERR_MASK) { @@ -2658,8 +2666,8 @@ static int be_close(struct net_device *netdev) be_roce_dev_close(adapter); - for_all_evt_queues(adapter, eqo, i) { - if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { + if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { + for_all_evt_queues(adapter, eqo, i) { napi_disable(&eqo->napi); be_disable_busy_poll(eqo); } @@ -2676,6 +2684,11 @@ static int be_close(struct net_device *netdev) be_rx_qs_destroy(adapter); + for (i = 1; i < (adapter->uc_macs + 1); i++) + be_cmd_pmac_del(adapter, adapter->if_handle, + adapter->pmac_id[i], 0); + adapter->uc_macs = 0; + for_all_evt_queues(adapter, eqo, i) { if (msix_enabled(adapter)) synchronize_irq(be_msix_vec_get(adapter, eqo)); @@ -2927,28 +2940,35 @@ static void be_cancel_worker(struct be_adapter *adapter) } } -static int be_clear(struct be_adapter *adapter) +static void be_mac_clear(struct be_adapter *adapter) { int i; + if (adapter->pmac_id) { + for (i = 0; i < (adapter->uc_macs + 1); i++) + be_cmd_pmac_del(adapter, adapter->if_handle, + adapter->pmac_id[i], 0); + adapter->uc_macs = 0; + + kfree(adapter->pmac_id); + adapter->pmac_id = NULL; + } +} + +static int be_clear(struct be_adapter *adapter) +{ be_cancel_worker(adapter); if (sriov_enabled(adapter)) be_vf_clear(adapter); /* delete the primary mac along with the uc-mac list */ - for (i = 0; i < (adapter->uc_macs + 1); i++) - be_cmd_pmac_del(adapter, adapter->if_handle, - adapter->pmac_id[i], 0); - adapter->uc_macs = 0; + be_mac_clear(adapter); be_cmd_if_destroy(adapter, adapter->if_handle, 0); be_clear_queues(adapter); - kfree(adapter->pmac_id); - adapter->pmac_id = NULL; - be_msix_disable(adapter); return 0; } @@ -3248,12 +3268,10 @@ static int be_mac_setup(struct be_adapter *adapter) memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN); } - /* On BE3 VFs this cmd may fail due to lack of privilege. - * Ignore the failure as in this case pmac_id is fetched - * in the IFACE_CREATE cmd. - */ - be_cmd_pmac_add(adapter, mac, adapter->if_handle, - &adapter->pmac_id[0], 0); + /* For BE3-R VFs, the PF programs the initial MAC address */ + if (!(BEx_chip(adapter) && be_virtfn(adapter))) + be_cmd_pmac_add(adapter, mac, adapter->if_handle, + &adapter->pmac_id[0], 0); return 0; } @@ -3809,6 +3827,8 @@ static int lancer_fw_download(struct be_adapter *adapter, } if (change_status == LANCER_FW_RESET_NEEDED) { + dev_info(&adapter->pdev->dev, + "Resetting adapter to activate new FW\n"); status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK); if (status) { @@ -4360,13 +4380,13 @@ static int lancer_recover_func(struct be_adapter *adapter) goto err; } - dev_err(dev, "Error recovery successful\n"); + dev_err(dev, "Adapter recovery successful\n"); return 0; err: if (status == -EAGAIN) dev_err(dev, "Waiting for resource provisioning\n"); else - dev_err(dev, "Error recovery failed\n"); + dev_err(dev, "Adapter recovery failed\n"); return status; } @@ -4594,6 +4614,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state) if (adapter->wol) be_setup_wol(adapter, true); + be_intr_set(adapter, false); cancel_delayed_work_sync(&adapter->func_recovery_work); netif_device_detach(netdev); @@ -4629,6 +4650,7 @@ static int be_resume(struct pci_dev *pdev) if (status) return status; + be_intr_set(adapter, true); /* tell fw we're ready to fire cmds */ status = be_cmd_fw_init(adapter); if (status) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index b2793b91cc55..e7c8b749c5a5 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -98,10 +98,6 @@ static void set_multicast_list(struct net_device *ndev); * detected as not set during a prior frame transmission, then the * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in - * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously - * detected as not set during a prior frame transmission, then the - * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs - * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in * frames not being transmitted until there is a 0-to-1 transition on * ENET_TDAR[TDAR]. */ @@ -385,8 +381,15 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) * data. */ bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr, - FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); - + skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { + bdp->cbd_bufaddr = 0; + fep->tx_skbuff[index] = NULL; + dev_kfree_skb_any(skb); + if (net_ratelimit()) + netdev_err(ndev, "Tx DMA memory map failed\n"); + return NETDEV_TX_OK; + } /* Send it on its way. Tell FEC it's ready, interrupt when done, * it's the last BD of the frame, and to put the CRC on the end. */ @@ -772,11 +775,10 @@ fec_enet_tx(struct net_device *ndev) else index = bdp - fep->tx_bd_base; - dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, - FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); - bdp->cbd_bufaddr = 0; - skb = fep->tx_skbuff[index]; + dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, skb->len, + DMA_TO_DEVICE); + bdp->cbd_bufaddr = 0; /* Check for errors. */ if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | @@ -861,6 +863,7 @@ fec_enet_rx(struct net_device *ndev, int budget) struct bufdesc_ex *ebdp = NULL; bool vlan_packet_rcvd = false; u16 vlan_tag; + int index = 0; #ifdef CONFIG_M532x flush_cache_all(); @@ -916,10 +919,15 @@ fec_enet_rx(struct net_device *ndev, int budget) ndev->stats.rx_packets++; pkt_len = bdp->cbd_datlen; ndev->stats.rx_bytes += pkt_len; - data = (__u8*)__va(bdp->cbd_bufaddr); - dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, - FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE); + if (fep->bufdesc_ex) + index = (struct bufdesc_ex *)bdp - + (struct bufdesc_ex *)fep->rx_bd_base; + else + index = bdp - fep->rx_bd_base; + data = fep->rx_skbuff[index]->data; + dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, + FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) swap_buffer(data, pkt_len); @@ -999,8 +1007,8 @@ fec_enet_rx(struct net_device *ndev, int budget) napi_gro_receive(&fep->napi, skb); } - bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data, - FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE); + dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr, + FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); rx_processing_done: /* Clear the status flags for this buffer */ status &= ~BD_ENET_RX_STATS; @@ -1719,6 +1727,12 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { + fec_enet_free_buffers(ndev); + if (net_ratelimit()) + netdev_err(ndev, "Rx DMA memory map failed\n"); + return -ENOMEM; + } bdp->cbd_sc = BD_ENET_RX_EMPTY; if (fep->bufdesc_ex) { diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 2d1c6bdd3618..7628e0fd8455 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -3033,7 +3033,7 @@ static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, dev->hw_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX; - dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO | + dev->features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM; diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h index 58c147271a36..f9313b36c887 100644 --- a/drivers/net/ethernet/intel/e1000/e1000.h +++ b/drivers/net/ethernet/intel/e1000/e1000.h @@ -83,6 +83,11 @@ struct e1000_adapter; #define E1000_MAX_INTR 10 +/* + * Count for polling __E1000_RESET condition every 10-20msec. + */ +#define E1000_CHECK_RESET_COUNT 50 + /* TX/RX descriptor defines */ #define E1000_DEFAULT_TXD 256 #define E1000_MAX_TXD 256 @@ -312,8 +317,6 @@ struct e1000_adapter { struct delayed_work watchdog_task; struct delayed_work fifo_stall_task; struct delayed_work phy_info_task; - - struct mutex mutex; }; enum e1000_state_t { diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index e38622825fa7..46e6544ed1b7 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -494,13 +494,20 @@ static void e1000_down_and_stop(struct e1000_adapter *adapter) { set_bit(__E1000_DOWN, &adapter->flags); - /* Only kill reset task if adapter is not resetting */ - if (!test_bit(__E1000_RESETTING, &adapter->flags)) - cancel_work_sync(&adapter->reset_task); - cancel_delayed_work_sync(&adapter->watchdog_task); + + /* + * Since the watchdog task can reschedule other tasks, we should cancel + * it first, otherwise we can run into the situation when a work is + * still running after the adapter has been turned down. + */ + cancel_delayed_work_sync(&adapter->phy_info_task); cancel_delayed_work_sync(&adapter->fifo_stall_task); + + /* Only kill reset task if adapter is not resetting */ + if (!test_bit(__E1000_RESETTING, &adapter->flags)) + cancel_work_sync(&adapter->reset_task); } void e1000_down(struct e1000_adapter *adapter) @@ -544,21 +551,8 @@ void e1000_down(struct e1000_adapter *adapter) e1000_clean_all_rx_rings(adapter); } -static void e1000_reinit_safe(struct e1000_adapter *adapter) -{ - while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) - msleep(1); - mutex_lock(&adapter->mutex); - e1000_down(adapter); - e1000_up(adapter); - mutex_unlock(&adapter->mutex); - clear_bit(__E1000_RESETTING, &adapter->flags); -} - void e1000_reinit_locked(struct e1000_adapter *adapter) { - /* if rtnl_lock is not held the call path is bogus */ - ASSERT_RTNL(); WARN_ON(in_interrupt()); while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) msleep(1); @@ -1316,7 +1310,6 @@ static int e1000_sw_init(struct e1000_adapter *adapter) e1000_irq_disable(adapter); spin_lock_init(&adapter->stats_lock); - mutex_init(&adapter->mutex); set_bit(__E1000_DOWN, &adapter->flags); @@ -1440,6 +1433,10 @@ static int e1000_close(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + int count = E1000_CHECK_RESET_COUNT; + + while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) + usleep_range(10000, 20000); WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); e1000_down(adapter); @@ -2325,11 +2322,8 @@ static void e1000_update_phy_info_task(struct work_struct *work) struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, phy_info_task.work); - if (test_bit(__E1000_DOWN, &adapter->flags)) - return; - mutex_lock(&adapter->mutex); + e1000_phy_get_info(&adapter->hw, &adapter->phy_info); - mutex_unlock(&adapter->mutex); } /** @@ -2345,9 +2339,6 @@ static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) struct net_device *netdev = adapter->netdev; u32 tctl; - if (test_bit(__E1000_DOWN, &adapter->flags)) - return; - mutex_lock(&adapter->mutex); if (atomic_read(&adapter->tx_fifo_stall)) { if ((er32(TDT) == er32(TDH)) && (er32(TDFT) == er32(TDFH)) && @@ -2368,7 +2359,6 @@ static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) schedule_delayed_work(&adapter->fifo_stall_task, 1); } } - mutex_unlock(&adapter->mutex); } bool e1000_has_link(struct e1000_adapter *adapter) @@ -2422,10 +2412,6 @@ static void e1000_watchdog(struct work_struct *work) struct e1000_tx_ring *txdr = adapter->tx_ring; u32 link, tctl; - if (test_bit(__E1000_DOWN, &adapter->flags)) - return; - - mutex_lock(&adapter->mutex); link = e1000_has_link(adapter); if ((netif_carrier_ok(netdev)) && link) goto link_up; @@ -2516,7 +2502,7 @@ link_up: adapter->tx_timeout_count++; schedule_work(&adapter->reset_task); /* exit immediately since reset is imminent */ - goto unlock; + return; } } @@ -2544,9 +2530,6 @@ link_up: /* Reschedule the task */ if (!test_bit(__E1000_DOWN, &adapter->flags)) schedule_delayed_work(&adapter->watchdog_task, 2 * HZ); - -unlock: - mutex_unlock(&adapter->mutex); } enum latency_range { @@ -3495,10 +3478,8 @@ static void e1000_reset_task(struct work_struct *work) struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, reset_task); - if (test_bit(__E1000_DOWN, &adapter->flags)) - return; e_err(drv, "Reset adapter\n"); - e1000_reinit_safe(adapter); + e1000_reinit_locked(adapter); } /** @@ -4963,6 +4944,11 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) netif_device_detach(netdev); if (netif_running(netdev)) { + int count = E1000_CHECK_RESET_COUNT; + + while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) + usleep_range(10000, 20000); + WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); e1000_down(adapter); } diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index aedd5736a87d..8d3945ab7334 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3482,10 +3482,10 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) * specified. Matching the kind of event packet is not supported, with the * exception of "all V2 events regardless of level 2 or 4". **/ -static int e1000e_config_hwtstamp(struct e1000_adapter *adapter) +static int e1000e_config_hwtstamp(struct e1000_adapter *adapter, + struct hwtstamp_config *config) { struct e1000_hw *hw = &adapter->hw; - struct hwtstamp_config *config = &adapter->hwtstamp_config; u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; u32 rxmtrl = 0; @@ -3586,6 +3586,8 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter) return -ERANGE; } + adapter->hwtstamp_config = *config; + /* enable/disable Tx h/w time stamping */ regval = er32(TSYNCTXCTL); regval &= ~E1000_TSYNCTXCTL_ENABLED; @@ -3874,7 +3876,7 @@ void e1000e_reset(struct e1000_adapter *adapter) e1000e_reset_adaptive(hw); /* initialize systim and reset the ns time counter */ - e1000e_config_hwtstamp(adapter); + e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config); /* Set EEE advertisement as appropriate */ if (adapter->flags2 & FLAG2_HAS_EEE) { @@ -5797,14 +5799,10 @@ static int e1000e_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; - adapter->hwtstamp_config = config; - - ret_val = e1000e_config_hwtstamp(adapter); + ret_val = e1000e_config_hwtstamp(adapter, &config); if (ret_val) return ret_val; - config = adapter->hwtstamp_config; - switch (config.rx_filter) { case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index be15938ba213..12b0932204ba 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -354,6 +354,9 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); int i; + if (!vsi->tx_rings) + return stats; + rcu_read_lock(); for (i = 0; i < vsi->num_queue_pairs; i++) { struct i40e_ring *tx_ring, *rx_ring; diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index c4c4fe332c7e..ad2b74d95138 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -1728,7 +1728,10 @@ s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, * ownership of the resources, wait and try again to * see if they have relinquished the resources yet. */ - udelay(usec_interval); + if (usec_interval >= 1000) + mdelay(usec_interval/1000); + else + udelay(usec_interval); } ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); if (ret_val) diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index b0f3666b1d7f..c3143da497c8 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2062,14 +2062,15 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct igb_adapter *adapter = netdev_priv(netdev); - wol->supported = WAKE_UCAST | WAKE_MCAST | - WAKE_BCAST | WAKE_MAGIC | - WAKE_PHY; wol->wolopts = 0; if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED)) return; + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC | + WAKE_PHY; + /* apply any specific unsupported masks here */ switch (adapter->hw.device_id) { default: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 0c55079ebee3..cc06854296a3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -4251,8 +4251,8 @@ static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter, rx_ring->l2_accel_priv = NULL; } -int ixgbe_fwd_ring_down(struct net_device *vdev, - struct ixgbe_fwd_adapter *accel) +static int ixgbe_fwd_ring_down(struct net_device *vdev, + struct ixgbe_fwd_adapter *accel) { struct ixgbe_adapter *adapter = accel->real_adapter; unsigned int rxbase = accel->rx_base_queue; @@ -7986,10 +7986,9 @@ skip_sriov: NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXHASH | - NETIF_F_RXCSUM | - NETIF_F_HW_L2FW_DOFFLOAD; + NETIF_F_RXCSUM; - netdev->hw_features = netdev->features; + netdev->hw_features = netdev->features | NETIF_F_HW_L2FW_DOFFLOAD; switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index e4c676006be9..39217e5ff7dc 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -46,6 +46,7 @@ static bool ixgbe_get_i2c_data(u32 *i2cctl); static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); +static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); /** * ixgbe_identify_phy_generic - Get physical layer module @@ -1164,7 +1165,7 @@ err_read_i2c_eeprom: * * Searches for and identifies the QSFP module and assigns appropriate PHY type **/ -s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) +static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) { struct ixgbe_adapter *adapter = hw->back; s32 status = IXGBE_ERR_PHY_ADDR_INVALID; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index aae900a256da..fffcbdd2bf0e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -145,7 +145,6 @@ s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw); s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); -s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, u16 *list_offset, u16 *data_offset); diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 00cd36e08601..61088a6a9424 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2890,7 +2890,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev) PHY_INTERFACE_MODE_GMII); if (!mp->phy) err = -ENODEV; - phy_addr_set(mp, mp->phy->addr); + else + phy_addr_set(mp, mp->phy->addr); } else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) { mp->phy = phy_scan(mp, pd->phy_addr); diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index b8e232b4ea2d..d5f0d72e5e33 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1378,7 +1378,7 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, dev_kfree_skb_any(skb); dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, - rx_desc->data_size, DMA_FROM_DEVICE); + MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); } if (rx_done) @@ -1424,7 +1424,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, } dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, - rx_desc->data_size, DMA_FROM_DEVICE); + MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c index 40626690e8a8..c11d063473e5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c @@ -140,7 +140,6 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; - struct mlx4_en_tx_ring *tx_ring; int i, carrier_ok; memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST); @@ -150,16 +149,10 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf) carrier_ok = netif_carrier_ok(dev); netif_carrier_off(dev); -retry_tx: /* Wait until all tx queues are empty. * there should not be any additional incoming traffic * since we turned the carrier off */ msleep(200); - for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) { - tx_ring = priv->tx_ring[i]; - if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb)) - goto retry_tx; - } if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) { diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 5789ea2c934d..01fc6515384d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -2635,6 +2635,8 @@ static int __init mlx4_init(void) return -ENOMEM; ret = pci_register_driver(&mlx4_driver); + if (ret < 0) + destroy_workqueue(mlx4_wq); return ret < 0 ? ret : 0; } diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c index 0951f7aca1ef..822616e3c375 100644 --- a/drivers/net/ethernet/micrel/ks8842.c +++ b/drivers/net/ethernet/micrel/ks8842.c @@ -459,8 +459,7 @@ static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev) sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4; ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, - &ctl->sg, 1, DMA_MEM_TO_DEV, - DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); + &ctl->sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); if (!ctl->adesc) return NETDEV_TX_BUSY; @@ -571,8 +570,7 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev) sg_dma_len(sg) = DMA_BUFFER_SIZE; ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, - sg, 1, DMA_DEV_TO_MEM, - DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); + sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); if (!ctl->adesc) goto out; diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 2d045be4b5cf..1e8b9514718b 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -5150,8 +5150,10 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 { struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); - int result; - memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64)); + int result, count; + + count = nv_get_sset_count(dev, ETH_SS_TEST); + memset(buffer, 0, count * sizeof(u64)); if (!nv_link_test(dev)) { test->flags |= ETH_TEST_FL_FAILED; @@ -5195,7 +5197,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 return; } - if (!nv_loopback_test(dev)) { + if (count > NV_TEST_COUNT_BASE && !nv_loopback_test(dev)) { test->flags |= ETH_TEST_FL_FAILED; buffer[3] = 1; } diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 5a0f04c2c813..27ffe0ebf0a6 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -245,16 +245,8 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) /* Get ieee1588's dev information */ pdev = adapter->ptp_pdev; - switch (cfg.tx_type) { - case HWTSTAMP_TX_OFF: - adapter->hwts_tx_en = 0; - break; - case HWTSTAMP_TX_ON: - adapter->hwts_tx_en = 1; - break; - default: + if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) return -ERANGE; - } switch (cfg.rx_filter) { case HWTSTAMP_FILTER_NONE: @@ -284,6 +276,8 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) return -ERANGE; } + adapter->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON; + /* Clear out any old time stamps. */ pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED); diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h index 0c9c4e895595..03517478e589 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge.h +++ b/drivers/net/ethernet/qlogic/qlge/qlge.h @@ -18,7 +18,7 @@ */ #define DRV_NAME "qlge" #define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " -#define DRV_VERSION "1.00.00.33" +#define DRV_VERSION "1.00.00.34" #define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c index 0780e039b271..8dee1beb9854 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c @@ -181,6 +181,7 @@ static const char ql_gstrings_test[][ETH_GSTRING_LEN] = { }; #define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN) #define QLGE_STATS_LEN ARRAY_SIZE(ql_gstrings_stats) +#define QLGE_RCV_MAC_ERR_STATS 7 static int ql_update_ring_coalescing(struct ql_adapter *qdev) { @@ -280,6 +281,9 @@ static void ql_update_stats(struct ql_adapter *qdev) iter++; } + /* Update receive mac error statistics */ + iter += QLGE_RCV_MAC_ERR_STATS; + /* * Get Per-priority TX pause frame counter statistics. */ diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index a245dc18d769..449f506d2e8f 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -2376,14 +2376,6 @@ static netdev_features_t qlge_fix_features(struct net_device *ndev, netdev_features_t features) { int err; - /* - * Since there is no support for separate rx/tx vlan accel - * enable/disable make sure tx flag is always in same state as rx. - */ - if (features & NETIF_F_HW_VLAN_CTAG_RX) - features |= NETIF_F_HW_VLAN_CTAG_TX; - else - features &= ~NETIF_F_HW_VLAN_CTAG_TX; /* Update the behavior of vlan accel in the adapter */ err = qlge_update_hw_vlan_features(ndev, features); diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index f2a2128165dd..737c1a881f78 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -678,9 +678,6 @@ static void cp_tx (struct cp_private *cp) le32_to_cpu(txd->opts1) & 0xffff, PCI_DMA_TODEVICE); - bytes_compl += skb->len; - pkts_compl++; - if (status & LastFrag) { if (status & (TxError | TxFIFOUnder)) { netif_dbg(cp, tx_err, cp->dev, @@ -702,6 +699,8 @@ static void cp_tx (struct cp_private *cp) netif_dbg(cp, tx_done, cp->dev, "tx done, slot %d\n", tx_tail); } + bytes_compl += skb->len; + pkts_compl++; dev_kfree_skb_irq(skb); } diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 799387570766..c737f0ea5de7 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -3465,6 +3465,11 @@ static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp) rtl_writephy(tp, 0x14, 0x9065); rtl_writephy(tp, 0x14, 0x1065); + /* Check ALDPS bit, disable it if enabled */ + rtl_writephy(tp, 0x1f, 0x0a43); + if (rtl_readphy(tp, 0x10) & 0x0004) + rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0004); + rtl_writephy(tp, 0x1f, 0x0000); } diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 2e27837ce6a2..fd844b53e385 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -585,7 +585,7 @@ static void efx_start_datapath(struct efx_nic *efx) EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + efx->type->rx_buffer_padding); rx_buf_len = (sizeof(struct efx_rx_page_state) + - NET_IP_ALIGN + efx->rx_dma_len); + efx->rx_ip_align + efx->rx_dma_len); if (rx_buf_len <= PAGE_SIZE) { efx->rx_scatter = efx->type->always_rx_scatter; efx->rx_buffer_order = 0; @@ -645,6 +645,8 @@ static void efx_start_datapath(struct efx_nic *efx) WARN_ON(channel->rx_pkt_n_frags); } + efx_ptp_start_datapath(efx); + if (netif_device_present(efx->net_dev)) netif_tx_wake_all_queues(efx->net_dev); } @@ -659,6 +661,8 @@ static void efx_stop_datapath(struct efx_nic *efx) EFX_ASSERT_RESET_SERIALISED(efx); BUG_ON(efx->port_enabled); + efx_ptp_stop_datapath(efx); + /* Stop RX refill */ efx_for_each_channel(channel, efx) { efx_for_each_channel_rx_queue(rx_queue, channel) @@ -2540,6 +2544,8 @@ static int efx_init_struct(struct efx_nic *efx, efx->net_dev = net_dev; efx->rx_prefix_size = efx->type->rx_prefix_size; + efx->rx_ip_align = + NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0; efx->rx_packet_hash_offset = efx->type->rx_hash_offset - efx->type->rx_prefix_size; spin_lock_init(&efx->stats_lock); diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 366c8e3e3784..4b0bd8a1514d 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -50,6 +50,7 @@ struct efx_mcdi_async_param { static void efx_mcdi_timeout_async(unsigned long context); static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, bool *was_attached_out); +static bool efx_mcdi_poll_once(struct efx_nic *efx); static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) { @@ -237,6 +238,21 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx) } } +static bool efx_mcdi_poll_once(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + rmb(); + if (!efx->type->mcdi_poll_response(efx)) + return false; + + spin_lock_bh(&mcdi->iface_lock); + efx_mcdi_read_response_header(efx); + spin_unlock_bh(&mcdi->iface_lock); + + return true; +} + static int efx_mcdi_poll(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); @@ -272,18 +288,13 @@ static int efx_mcdi_poll(struct efx_nic *efx) time = jiffies; - rmb(); - if (efx->type->mcdi_poll_response(efx)) + if (efx_mcdi_poll_once(efx)) break; if (time_after(time, finish)) return -ETIMEDOUT; } - spin_lock_bh(&mcdi->iface_lock); - efx_mcdi_read_response_header(efx); - spin_unlock_bh(&mcdi->iface_lock); - /* Return rc=0 like wait_event_timeout() */ return 0; } @@ -619,6 +630,16 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, rc = efx_mcdi_await_completion(efx); if (rc != 0) { + netif_err(efx, hw, efx->net_dev, + "MC command 0x%x inlen %d mode %d timed out\n", + cmd, (int)inlen, mcdi->mode); + + if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) { + netif_err(efx, hw, efx->net_dev, + "MCDI request was completed without an event\n"); + rc = 0; + } + /* Close the race with efx_mcdi_ev_cpl() executing just too late * and completing a request we've just cancelled, by ensuring * that the seqno check therein fails. @@ -627,11 +648,9 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, ++mcdi->seqno; ++mcdi->credits; spin_unlock_bh(&mcdi->iface_lock); + } - netif_err(efx, hw, efx->net_dev, - "MC command 0x%x inlen %d mode %d timed out\n", - cmd, (int)inlen, mcdi->mode); - } else { + if (rc == 0) { size_t hdr_len, data_len; /* At the very least we need a memory barrier here to ensure diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index 656a3277c2b2..15816cacb548 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -75,6 +75,8 @@ struct efx_mcdi_mon { unsigned long last_update; struct device *device; struct efx_mcdi_mon_attribute *attrs; + struct attribute_group group; + const struct attribute_group *groups[2]; unsigned int n_attrs; }; diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c index 4cc5d95b2a5a..d72ad4fc3617 100644 --- a/drivers/net/ethernet/sfc/mcdi_mon.c +++ b/drivers/net/ethernet/sfc/mcdi_mon.c @@ -139,17 +139,10 @@ static int efx_mcdi_mon_update(struct efx_nic *efx) return rc; } -static ssize_t efx_mcdi_mon_show_name(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - return sprintf(buf, "%s\n", KBUILD_MODNAME); -} - static int efx_mcdi_mon_get_entry(struct device *dev, unsigned int index, efx_dword_t *entry) { - struct efx_nic *efx = dev_get_drvdata(dev); + struct efx_nic *efx = dev_get_drvdata(dev->parent); struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); int rc; @@ -263,7 +256,7 @@ static ssize_t efx_mcdi_mon_show_label(struct device *dev, efx_mcdi_sensor_type[mon_attr->type].label); } -static int +static void efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name, ssize_t (*reader)(struct device *, struct device_attribute *, char *), @@ -272,7 +265,6 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name, { struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs]; - int rc; strlcpy(attr->name, name, sizeof(attr->name)); attr->index = index; @@ -286,10 +278,7 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name, attr->dev_attr.attr.name = attr->name; attr->dev_attr.attr.mode = S_IRUGO; attr->dev_attr.show = reader; - rc = device_create_file(&efx->pci_dev->dev, &attr->dev_attr); - if (rc == 0) - ++hwmon->n_attrs; - return rc; + hwmon->group.attrs[hwmon->n_attrs++] = &attr->dev_attr.attr; } int efx_mcdi_mon_probe(struct efx_nic *efx) @@ -338,26 +327,22 @@ int efx_mcdi_mon_probe(struct efx_nic *efx) efx_mcdi_mon_update(efx); /* Allocate space for the maximum possible number of - * attributes for this set of sensors: name of the driver plus + * attributes for this set of sensors: * value, min, max, crit, alarm and label for each sensor. */ - n_attrs = 1 + 6 * n_sensors; + n_attrs = 6 * n_sensors; hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL); if (!hwmon->attrs) { rc = -ENOMEM; goto fail; } - - hwmon->device = hwmon_device_register(&efx->pci_dev->dev); - if (IS_ERR(hwmon->device)) { - rc = PTR_ERR(hwmon->device); + hwmon->group.attrs = kcalloc(n_attrs + 1, sizeof(struct attribute *), + GFP_KERNEL); + if (!hwmon->group.attrs) { + rc = -ENOMEM; goto fail; } - rc = efx_mcdi_mon_add_attr(efx, "name", efx_mcdi_mon_show_name, 0, 0, 0); - if (rc) - goto fail; - for (i = 0, j = -1, type = -1; ; i++) { enum efx_hwmon_type hwmon_type; const char *hwmon_prefix; @@ -372,7 +357,7 @@ int efx_mcdi_mon_probe(struct efx_nic *efx) page = type / 32; j = -1; if (page == n_pages) - return 0; + goto hwmon_register; MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, page); @@ -453,28 +438,22 @@ int efx_mcdi_mon_probe(struct efx_nic *efx) if (min1 != max1) { snprintf(name, sizeof(name), "%s%u_input", hwmon_prefix, hwmon_index); - rc = efx_mcdi_mon_add_attr( + efx_mcdi_mon_add_attr( efx, name, efx_mcdi_mon_show_value, i, type, 0); - if (rc) - goto fail; if (hwmon_type != EFX_HWMON_POWER) { snprintf(name, sizeof(name), "%s%u_min", hwmon_prefix, hwmon_index); - rc = efx_mcdi_mon_add_attr( + efx_mcdi_mon_add_attr( efx, name, efx_mcdi_mon_show_limit, i, type, min1); - if (rc) - goto fail; } snprintf(name, sizeof(name), "%s%u_max", hwmon_prefix, hwmon_index); - rc = efx_mcdi_mon_add_attr( + efx_mcdi_mon_add_attr( efx, name, efx_mcdi_mon_show_limit, i, type, max1); - if (rc) - goto fail; if (min2 != max2) { /* Assume max2 is critical value. @@ -482,32 +461,38 @@ int efx_mcdi_mon_probe(struct efx_nic *efx) */ snprintf(name, sizeof(name), "%s%u_crit", hwmon_prefix, hwmon_index); - rc = efx_mcdi_mon_add_attr( + efx_mcdi_mon_add_attr( efx, name, efx_mcdi_mon_show_limit, i, type, max2); - if (rc) - goto fail; } } snprintf(name, sizeof(name), "%s%u_alarm", hwmon_prefix, hwmon_index); - rc = efx_mcdi_mon_add_attr( + efx_mcdi_mon_add_attr( efx, name, efx_mcdi_mon_show_alarm, i, type, 0); - if (rc) - goto fail; if (type < ARRAY_SIZE(efx_mcdi_sensor_type) && efx_mcdi_sensor_type[type].label) { snprintf(name, sizeof(name), "%s%u_label", hwmon_prefix, hwmon_index); - rc = efx_mcdi_mon_add_attr( + efx_mcdi_mon_add_attr( efx, name, efx_mcdi_mon_show_label, i, type, 0); - if (rc) - goto fail; } } +hwmon_register: + hwmon->groups[0] = &hwmon->group; + hwmon->device = hwmon_device_register_with_groups(&efx->pci_dev->dev, + KBUILD_MODNAME, NULL, + hwmon->groups); + if (IS_ERR(hwmon->device)) { + rc = PTR_ERR(hwmon->device); + goto fail; + } + + return 0; + fail: efx_mcdi_mon_remove(efx); return rc; @@ -516,14 +501,11 @@ fail: void efx_mcdi_mon_remove(struct efx_nic *efx) { struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); - unsigned int i; - for (i = 0; i < hwmon->n_attrs; i++) - device_remove_file(&efx->pci_dev->dev, - &hwmon->attrs[i].dev_attr); - kfree(hwmon->attrs); if (hwmon->device) hwmon_device_unregister(hwmon->device); + kfree(hwmon->attrs); + kfree(hwmon->group.attrs); efx_nic_free_buffer(efx, &hwmon->dma_buf); } diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index b14a717ac3e8..542a0d252ae0 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -683,6 +683,8 @@ struct vfdi_status; * @n_channels: Number of channels in use * @n_rx_channels: Number of channels used for RX (= number of RX queues) * @n_tx_channels: Number of channels used for TX + * @rx_ip_align: RX DMA address offset to have IP header aligned in + * in accordance with NET_IP_ALIGN * @rx_dma_len: Current maximum RX DMA length * @rx_buffer_order: Order (log2) of number of pages for each RX buffer * @rx_buffer_truesize: Amortised allocation size of an RX buffer, @@ -816,6 +818,7 @@ struct efx_nic { unsigned rss_spread; unsigned tx_channel_offset; unsigned n_tx_channels; + unsigned int rx_ip_align; unsigned int rx_dma_len; unsigned int rx_buffer_order; unsigned int rx_buffer_truesize; diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 11b6112d9249..91c63ec79c5f 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -560,6 +560,8 @@ void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info); bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev); +void efx_ptp_start_datapath(struct efx_nic *efx); +void efx_ptp_stop_datapath(struct efx_nic *efx); extern const struct efx_nic_type falcon_a1_nic_type; extern const struct efx_nic_type falcon_b0_nic_type; diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 03acf57df045..3dd39dcfe36b 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -220,6 +220,7 @@ struct efx_ptp_timeset { * @evt_list: List of MC receive events awaiting packets * @evt_free_list: List of free events * @evt_lock: Lock for manipulating evt_list and evt_free_list + * @evt_overflow: Boolean indicating that event list has overflowed * @rx_evts: Instantiated events (on evt_list and evt_free_list) * @workwq: Work queue for processing pending PTP operations * @work: Work task @@ -270,6 +271,7 @@ struct efx_ptp_data { struct list_head evt_list; struct list_head evt_free_list; spinlock_t evt_lock; + bool evt_overflow; struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS]; struct workqueue_struct *workwq; struct work_struct work; @@ -635,6 +637,11 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx) } } } + /* If the event overflow flag is set and the event list is now empty + * clear the flag to re-enable the overflow warning message. + */ + if (ptp->evt_overflow && list_empty(&ptp->evt_list)) + ptp->evt_overflow = false; spin_unlock_bh(&ptp->evt_lock); } @@ -676,6 +683,11 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx, break; } } + /* If the event overflow flag is set and the event list is now empty + * clear the flag to re-enable the overflow warning message. + */ + if (ptp->evt_overflow && list_empty(&ptp->evt_list)) + ptp->evt_overflow = false; spin_unlock_bh(&ptp->evt_lock); return rc; @@ -705,8 +717,9 @@ static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q) __skb_queue_tail(q, skb); } else if (time_after(jiffies, match->expiry)) { match->state = PTP_PACKET_STATE_TIMED_OUT; - netif_warn(efx, rx_err, efx->net_dev, - "PTP packet - no timestamp seen\n"); + if (net_ratelimit()) + netif_warn(efx, rx_err, efx->net_dev, + "PTP packet - no timestamp seen\n"); __skb_queue_tail(q, skb); } else { /* Replace unprocessed entry and stop */ @@ -788,9 +801,14 @@ fail: static int efx_ptp_stop(struct efx_nic *efx) { struct efx_ptp_data *ptp = efx->ptp_data; - int rc = efx_ptp_disable(efx); struct list_head *cursor; struct list_head *next; + int rc; + + if (ptp == NULL) + return 0; + + rc = efx_ptp_disable(efx); if (ptp->rxfilter_installed) { efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, @@ -809,11 +827,19 @@ static int efx_ptp_stop(struct efx_nic *efx) list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) { list_move(cursor, &efx->ptp_data->evt_free_list); } + ptp->evt_overflow = false; spin_unlock_bh(&efx->ptp_data->evt_lock); return rc; } +static int efx_ptp_restart(struct efx_nic *efx) +{ + if (efx->ptp_data && efx->ptp_data->enabled) + return efx_ptp_start(efx); + return 0; +} + static void efx_ptp_pps_worker(struct work_struct *work) { struct efx_ptp_data *ptp = @@ -901,6 +927,7 @@ static int efx_ptp_probe_channel(struct efx_channel *channel) spin_lock_init(&ptp->evt_lock); for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++) list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list); + ptp->evt_overflow = false; ptp->phc_clock_info.owner = THIS_MODULE; snprintf(ptp->phc_clock_info.name, @@ -989,7 +1016,11 @@ bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb) skb->len >= PTP_MIN_LENGTH && skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM && likely(skb->protocol == htons(ETH_P_IP)) && + skb_transport_header_was_set(skb) && + skb_network_header_len(skb) >= sizeof(struct iphdr) && ip_hdr(skb)->protocol == IPPROTO_UDP && + skb_headlen(skb) >= + skb_transport_offset(skb) + sizeof(struct udphdr) && udp_hdr(skb)->dest == htons(PTP_EVENT_PORT); } @@ -1106,7 +1137,7 @@ static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, { if ((enable_wanted != efx->ptp_data->enabled) || (enable_wanted && (efx->ptp_data->mode != new_mode))) { - int rc; + int rc = 0; if (enable_wanted) { /* Change of mode requires disable */ @@ -1123,7 +1154,8 @@ static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, * succeed. */ efx->ptp_data->mode = new_mode; - rc = efx_ptp_start(efx); + if (netif_running(efx->net_dev)) + rc = efx_ptp_start(efx); if (rc == 0) { rc = efx_ptp_synchronize(efx, PTP_SYNC_ATTEMPTS * 2); @@ -1295,8 +1327,13 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp) list_add_tail(&evt->link, &ptp->evt_list); queue_work(ptp->workwq, &ptp->work); - } else { - netif_err(efx, rx_err, efx->net_dev, "No free PTP event"); + } else if (!ptp->evt_overflow) { + /* Log a warning message and set the event overflow flag. + * The message won't be logged again until the event queue + * becomes empty. + */ + netif_err(efx, rx_err, efx->net_dev, "PTP event queue overflow\n"); + ptp->evt_overflow = true; } spin_unlock_bh(&ptp->evt_lock); } @@ -1389,7 +1426,7 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta) if (rc != 0) return rc; - ptp_data->current_adjfreq = delta; + ptp_data->current_adjfreq = adjustment_ns; return 0; } @@ -1404,7 +1441,7 @@ static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST); MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); - MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, 0); + MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, ptp_data->current_adjfreq); MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec); MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec); return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), @@ -1491,3 +1528,14 @@ void efx_ptp_probe(struct efx_nic *efx) efx->extra_channel_type[EFX_EXTRA_CHANNEL_PTP] = &efx_ptp_channel_type; } + +void efx_ptp_start_datapath(struct efx_nic *efx) +{ + if (efx_ptp_restart(efx)) + netif_err(efx, drv, efx->net_dev, "Failed to restart PTP.\n"); +} + +void efx_ptp_stop_datapath(struct efx_nic *efx) +{ + efx_ptp_stop(efx); +} diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 8f09e686fc23..42488df1f4ec 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -94,7 +94,7 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx, void efx_rx_config_page_split(struct efx_nic *efx) { - efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + NET_IP_ALIGN, + efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align, EFX_RX_BUF_ALIGNMENT); efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) / @@ -189,9 +189,9 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue) do { index = rx_queue->added_count & rx_queue->ptr_mask; rx_buf = efx_rx_buffer(rx_queue, index); - rx_buf->dma_addr = dma_addr + NET_IP_ALIGN; + rx_buf->dma_addr = dma_addr + efx->rx_ip_align; rx_buf->page = page; - rx_buf->page_offset = page_offset + NET_IP_ALIGN; + rx_buf->page_offset = page_offset + efx->rx_ip_align; rx_buf->len = efx->rx_dma_len; rx_buf->flags = 0; ++rx_queue->added_count; diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 0c9b5d94154f..8bf29eb4a5a0 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -82,6 +82,7 @@ static const char version[] = #include <linux/mii.h> #include <linux/workqueue.h> #include <linux/of.h> +#include <linux/of_device.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> @@ -2184,6 +2185,15 @@ static void smc_release_datacs(struct platform_device *pdev, struct net_device * } } +#if IS_BUILTIN(CONFIG_OF) +static const struct of_device_id smc91x_match[] = { + { .compatible = "smsc,lan91c94", }, + { .compatible = "smsc,lan91c111", }, + {}, +}; +MODULE_DEVICE_TABLE(of, smc91x_match); +#endif + /* * smc_init(void) * Input parameters: @@ -2198,6 +2208,7 @@ static void smc_release_datacs(struct platform_device *pdev, struct net_device * static int smc_drv_probe(struct platform_device *pdev) { struct smc91x_platdata *pd = dev_get_platdata(&pdev->dev); + const struct of_device_id *match = NULL; struct smc_local *lp; struct net_device *ndev; struct resource *res, *ires; @@ -2217,11 +2228,34 @@ static int smc_drv_probe(struct platform_device *pdev) */ lp = netdev_priv(ndev); + lp->cfg.flags = 0; if (pd) { memcpy(&lp->cfg, pd, sizeof(lp->cfg)); lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags); - } else { + } + +#if IS_BUILTIN(CONFIG_OF) + match = of_match_device(of_match_ptr(smc91x_match), &pdev->dev); + if (match) { + struct device_node *np = pdev->dev.of_node; + u32 val; + + /* Combination of IO widths supported, default to 16-bit */ + if (!of_property_read_u32(np, "reg-io-width", &val)) { + if (val & 1) + lp->cfg.flags |= SMC91X_USE_8BIT; + if ((val == 0) || (val & 2)) + lp->cfg.flags |= SMC91X_USE_16BIT; + if (val & 4) + lp->cfg.flags |= SMC91X_USE_32BIT; + } else { + lp->cfg.flags |= SMC91X_USE_16BIT; + } + } +#endif + + if (!pd && !match) { lp->cfg.flags |= (SMC_CAN_USE_8BIT) ? SMC91X_USE_8BIT : 0; lp->cfg.flags |= (SMC_CAN_USE_16BIT) ? SMC91X_USE_16BIT : 0; lp->cfg.flags |= (SMC_CAN_USE_32BIT) ? SMC91X_USE_32BIT : 0; @@ -2370,15 +2404,6 @@ static int smc_drv_resume(struct device *dev) return 0; } -#ifdef CONFIG_OF -static const struct of_device_id smc91x_match[] = { - { .compatible = "smsc,lan91c94", }, - { .compatible = "smsc,lan91c111", }, - {}, -}; -MODULE_DEVICE_TABLE(of, smc91x_match); -#endif - static struct dev_pm_ops smc_drv_pm_ops = { .suspend = smc_drv_suspend, .resume = smc_drv_resume, diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h index c9d4c872e81d..749654b976bc 100644 --- a/drivers/net/ethernet/smsc/smc91x.h +++ b/drivers/net/ethernet/smsc/smc91x.h @@ -46,7 +46,8 @@ defined(CONFIG_MACH_LITTLETON) ||\ defined(CONFIG_MACH_ZYLONITE2) ||\ defined(CONFIG_ARCH_VIPER) ||\ - defined(CONFIG_MACH_STARGATE2) + defined(CONFIG_MACH_STARGATE2) ||\ + defined(CONFIG_ARCH_VERSATILE) #include <asm/mach-types.h> @@ -154,6 +155,8 @@ static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg) #define SMC_outl(v, a, r) writel(v, (a) + (r)) #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) +#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) +#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) #define SMC_IRQ_FLAGS (-1) /* from resource */ /* We actually can't write halfwords properly if not word aligned */ @@ -206,23 +209,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg) #define RPC_LSA_DEFAULT RPC_LED_TX_RX #define RPC_LSB_DEFAULT RPC_LED_100_10 -#elif defined(CONFIG_ARCH_VERSATILE) - -#define SMC_CAN_USE_8BIT 1 -#define SMC_CAN_USE_16BIT 1 -#define SMC_CAN_USE_32BIT 1 -#define SMC_NOWAIT 1 - -#define SMC_inb(a, r) readb((a) + (r)) -#define SMC_inw(a, r) readw((a) + (r)) -#define SMC_inl(a, r) readl((a) + (r)) -#define SMC_outb(v, a, r) writeb(v, (a) + (r)) -#define SMC_outw(v, a, r) writew(v, (a) + (r)) -#define SMC_outl(v, a, r) writel(v, (a) + (r)) -#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) -#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) -#define SMC_IRQ_FLAGS (-1) /* from resource */ - #elif defined(CONFIG_MN10300) /* diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 8d4ccd35a016..8a7a23a84ac5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -435,16 +435,9 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) if (config.flags) return -EINVAL; - switch (config.tx_type) { - case HWTSTAMP_TX_OFF: - priv->hwts_tx_en = 0; - break; - case HWTSTAMP_TX_ON: - priv->hwts_tx_en = 1; - break; - default: + if (config.tx_type != HWTSTAMP_TX_OFF && + config.tx_type != HWTSTAMP_TX_ON) return -ERANGE; - } if (priv->adv_ts) { switch (config.rx_filter) { @@ -576,6 +569,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) } } priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); + priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; if (!priv->hwts_tx_en && !priv->hwts_rx_en) priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0); diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index dd0dd6279b4e..4f1d2549130e 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -2019,7 +2019,6 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM - /*| NETIF_F_FRAGLIST */ ; ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 90d41d26ec6d..5120d9ce1dd4 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -967,14 +967,19 @@ static inline void cpsw_add_dual_emac_def_ale_entries( priv->host_port, ALE_VLAN, slave->port_vlan); } -static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) +static void soft_reset_slave(struct cpsw_slave *slave) { char name[32]; - u32 slave_port; - - sprintf(name, "slave-%d", slave->slave_num); + snprintf(name, sizeof(name), "slave-%d", slave->slave_num); soft_reset(name, &slave->sliver->soft_reset); +} + +static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) +{ + u32 slave_port; + + soft_reset_slave(slave); /* setup priority mapping */ __raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map); @@ -1146,6 +1151,12 @@ static int cpsw_ndo_open(struct net_device *ndev) * receive descs */ cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i); + + if (cpts_register(&priv->pdev->dev, priv->cpts, + priv->data.cpts_clock_mult, + priv->data.cpts_clock_shift)) + dev_err(priv->dev, "error registering cpts device\n"); + } /* Enable Interrupt pacing if configured */ @@ -1192,6 +1203,7 @@ static int cpsw_ndo_stop(struct net_device *ndev) netif_carrier_off(priv->ndev); if (cpsw_common_res_usage_state(priv) <= 1) { + cpts_unregister(priv->cpts); cpsw_intr_disable(priv); cpdma_ctlr_int_ctrl(priv->dma, false); cpdma_ctlr_stop(priv->dma); @@ -1323,6 +1335,10 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) struct cpts *cpts = priv->cpts; struct hwtstamp_config cfg; + if (priv->version != CPSW_VERSION_1 && + priv->version != CPSW_VERSION_2) + return -EOPNOTSUPP; + if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) return -EFAULT; @@ -1330,16 +1346,8 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) if (cfg.flags) return -EINVAL; - switch (cfg.tx_type) { - case HWTSTAMP_TX_OFF: - cpts->tx_enable = 0; - break; - case HWTSTAMP_TX_ON: - cpts->tx_enable = 1; - break; - default: + if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) return -ERANGE; - } switch (cfg.rx_filter) { case HWTSTAMP_FILTER_NONE: @@ -1366,6 +1374,8 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) return -ERANGE; } + cpts->tx_enable = cfg.tx_type == HWTSTAMP_TX_ON; + switch (priv->version) { case CPSW_VERSION_1: cpsw_hwtstamp_v1(priv); @@ -1374,7 +1384,7 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) cpsw_hwtstamp_v2(priv); break; default: - return -ENOTSUPP; + WARN_ON(1); } return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; @@ -1813,6 +1823,8 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, } i++; + if (i == data->slaves) + break; } return 0; @@ -1980,9 +1992,15 @@ static int cpsw_probe(struct platform_device *pdev) goto clean_runtime_disable_ret; } priv->regs = ss_regs; - priv->version = __raw_readl(&priv->regs->id_ver); priv->host_port = HOST_PORT_NUM; + /* Need to enable clocks with runtime PM api to access module + * registers + */ + pm_runtime_get_sync(&pdev->dev); + priv->version = readl(&priv->regs->id_ver); + pm_runtime_put_sync(&pdev->dev); + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); priv->wr_regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->wr_regs)) { @@ -2152,8 +2170,6 @@ static int cpsw_remove(struct platform_device *pdev) unregister_netdev(cpsw_get_slave_ndev(priv, 1)); unregister_netdev(ndev); - cpts_unregister(priv->cpts); - cpsw_ale_destroy(priv->ale); cpdma_chan_destroy(priv->txch); cpdma_chan_destroy(priv->rxch); @@ -2173,8 +2189,9 @@ static int cpsw_suspend(struct device *dev) if (netif_running(ndev)) cpsw_ndo_stop(ndev); - soft_reset("sliver 0", &priv->slaves[0].sliver->soft_reset); - soft_reset("sliver 1", &priv->slaves[1].sliver->soft_reset); + + for_each_slave(priv, soft_reset_slave); + pm_runtime_put_sync(&pdev->dev); /* Select sleep pin state */ diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 41ba974bf37c..cd9b164a0434 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -61,6 +61,7 @@ #include <linux/davinci_emac.h> #include <linux/of.h> #include <linux/of_address.h> +#include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/of_net.h> @@ -1752,10 +1753,14 @@ static const struct net_device_ops emac_netdev_ops = { #endif }; +static const struct of_device_id davinci_emac_of_match[]; + static struct emac_platform_data * davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv) { struct device_node *np; + const struct of_device_id *match; + const struct emac_platform_data *auxdata; struct emac_platform_data *pdata = NULL; const u8 *mac_addr; @@ -1793,7 +1798,20 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv) priv->phy_node = of_parse_phandle(np, "phy-handle", 0); if (!priv->phy_node) - pdata->phy_id = ""; + pdata->phy_id = NULL; + + auxdata = pdev->dev.platform_data; + if (auxdata) { + pdata->interrupt_enable = auxdata->interrupt_enable; + pdata->interrupt_disable = auxdata->interrupt_disable; + } + + match = of_match_device(davinci_emac_of_match, &pdev->dev); + if (match && match->data) { + auxdata = match->data; + pdata->version = auxdata->version; + pdata->hw_ram_addr = auxdata->hw_ram_addr; + } pdev->dev.platform_data = pdata; @@ -2020,8 +2038,14 @@ static const struct dev_pm_ops davinci_emac_pm_ops = { }; #if IS_ENABLED(CONFIG_OF) +static const struct emac_platform_data am3517_emac_data = { + .version = EMAC_VERSION_2, + .hw_ram_addr = 0x01e20000, +}; + static const struct of_device_id davinci_emac_of_match[] = { {.compatible = "ti,davinci-dm6467-emac", }, + {.compatible = "ti,am3517-emac", .data = &am3517_emac_data, }, {}, }; MODULE_DEVICE_TABLE(of, davinci_emac_of_match); diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index d022bf936572..ad61d26a44f3 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -2172,16 +2172,13 @@ static int velocity_poll(struct napi_struct *napi, int budget) unsigned int rx_done; unsigned long flags; - spin_lock_irqsave(&vptr->lock, flags); /* * Do rx and tx twice for performance (taken from the VIA * out-of-tree driver). */ - rx_done = velocity_rx_srv(vptr, budget / 2); - velocity_tx_srv(vptr); - rx_done += velocity_rx_srv(vptr, budget - rx_done); + rx_done = velocity_rx_srv(vptr, budget); + spin_lock_irqsave(&vptr->lock, flags); velocity_tx_srv(vptr); - /* If budget not fully consumed, exit the polling mode */ if (rx_done < budget) { napi_complete(napi); @@ -2342,6 +2339,8 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu) if (ret < 0) goto out_free_tmp_vptr_1; + napi_disable(&vptr->napi); + spin_lock_irqsave(&vptr->lock, flags); netif_stop_queue(dev); @@ -2362,6 +2361,8 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu) velocity_give_many_rx_descs(vptr); + napi_enable(&vptr->napi); + mac_enable_int(vptr->mac_regs); netif_start_queue(dev); diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 1f2364126323..2166e879a096 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1017,7 +1017,7 @@ static int temac_of_probe(struct platform_device *op) platform_set_drvdata(op, ndev); SET_NETDEV_DEV(ndev, &op->dev); ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ - ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST; + ndev->features = NETIF_F_SG; ndev->netdev_ops = &temac_netdev_ops; ndev->ethtool_ops = &temac_ethtool_ops; #if 0 diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index b2ff038d6d20..f9293da19e26 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1486,7 +1486,7 @@ static int axienet_of_probe(struct platform_device *op) SET_NETDEV_DEV(ndev, &op->dev); ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ - ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST; + ndev->features = NETIF_F_SG; ndev->netdev_ops = &axienet_netdev_ops; ndev->ethtool_ops = &axienet_ethtool_ops; diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 74234a51c851..fefb8cd5eb65 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -163,26 +163,9 @@ static void xemaclite_enable_interrupts(struct net_local *drvdata) __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK, drvdata->base_addr + XEL_TSR_OFFSET); - /* Enable the Tx interrupts for the second Buffer if - * configured in HW */ - if (drvdata->tx_ping_pong != 0) { - reg_data = __raw_readl(drvdata->base_addr + - XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); - __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK, - drvdata->base_addr + XEL_BUFFER_OFFSET + - XEL_TSR_OFFSET); - } - /* Enable the Rx interrupts for the first buffer */ __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET); - /* Enable the Rx interrupts for the second Buffer if - * configured in HW */ - if (drvdata->rx_ping_pong != 0) { - __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + - XEL_BUFFER_OFFSET + XEL_RSR_OFFSET); - } - /* Enable the Global Interrupt Enable */ __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); } @@ -206,31 +189,10 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata) __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK), drvdata->base_addr + XEL_TSR_OFFSET); - /* Disable the Tx interrupts for the second Buffer - * if configured in HW */ - if (drvdata->tx_ping_pong != 0) { - reg_data = __raw_readl(drvdata->base_addr + XEL_BUFFER_OFFSET + - XEL_TSR_OFFSET); - __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK), - drvdata->base_addr + XEL_BUFFER_OFFSET + - XEL_TSR_OFFSET); - } - /* Disable the Rx interrupts for the first buffer */ reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET); __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK), drvdata->base_addr + XEL_RSR_OFFSET); - - /* Disable the Rx interrupts for the second buffer - * if configured in HW */ - if (drvdata->rx_ping_pong != 0) { - - reg_data = __raw_readl(drvdata->base_addr + XEL_BUFFER_OFFSET + - XEL_RSR_OFFSET); - __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK), - drvdata->base_addr + XEL_BUFFER_OFFSET + - XEL_RSR_OFFSET); - } } /** @@ -258,6 +220,13 @@ static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr, *to_u16_ptr++ = *from_u16_ptr++; *to_u16_ptr++ = *from_u16_ptr++; + /* This barrier resolves occasional issues seen around + * cases where the data is not properly flushed out + * from the processor store buffers to the destination + * memory locations. + */ + wmb(); + /* Output a word */ *to_u32_ptr++ = align_buffer; } @@ -273,6 +242,12 @@ static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr, for (; length > 0; length--) *to_u8_ptr++ = *from_u8_ptr++; + /* This barrier resolves occasional issues seen around + * cases where the data is not properly flushed out + * from the processor store buffers to the destination + * memory locations. + */ + wmb(); *to_u32_ptr = align_buffer; } } diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index e78802e75ea6..bcc224a83734 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -389,16 +389,8 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) ch = PORT2CHANNEL(port); regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; - switch (cfg.tx_type) { - case HWTSTAMP_TX_OFF: - port->hwts_tx_en = 0; - break; - case HWTSTAMP_TX_ON: - port->hwts_tx_en = 1; - break; - default: + if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) return -ERANGE; - } switch (cfg.rx_filter) { case HWTSTAMP_FILTER_NONE: @@ -416,6 +408,8 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) return -ERANGE; } + port->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON; + /* Clear out any old time stamps. */ __raw_writel(TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED, ®s->channel[ch].ch_event); diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 9dccb1edfd2a..2a89da080317 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -628,6 +628,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, const struct iovec *iv, unsigned long total_len, size_t count, int noblock) { + int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN); struct sk_buff *skb; struct macvlan_dev *vlan; unsigned long len = total_len; @@ -670,6 +671,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) { copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN; + if (copylen > good_linear) + copylen = good_linear; linear = copylen; if (iov_pages(iv, vnet_hdr_len + copylen, count) <= MAX_SKB_FRAGS) @@ -678,7 +681,10 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, if (!zerocopy) { copylen = len; - linear = vnet_hdr.hdr_len; + if (vnet_hdr.hdr_len > good_linear) + linear = good_linear; + else + linear = vnet_hdr.hdr_len; } skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen, @@ -738,7 +744,7 @@ err: rcu_read_lock(); vlan = rcu_dereference(q->vlan); if (vlan) - vlan->dev->stats.tx_dropped++; + this_cpu_inc(vlan->pcpu_stats->tx_dropped); rcu_read_unlock(); return err; @@ -761,11 +767,10 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, const struct sk_buff *skb, const struct iovec *iv, int len) { - struct macvlan_dev *vlan; int ret; int vnet_hdr_len = 0; int vlan_offset = 0; - int copied; + int copied, total; if (q->flags & IFF_VNET_HDR) { struct virtio_net_hdr vnet_hdr; @@ -780,7 +785,8 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr))) return -EFAULT; } - copied = vnet_hdr_len; + total = copied = vnet_hdr_len; + total += skb->len; if (!vlan_tx_tag_present(skb)) len = min_t(int, skb->len, len); @@ -795,6 +801,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); len = min_t(int, skb->len + VLAN_HLEN, len); + total += VLAN_HLEN; copy = min_t(int, vlan_offset, len); ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy); @@ -812,19 +819,9 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, } ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len); - copied += len; done: - rcu_read_lock(); - vlan = rcu_dereference(q->vlan); - if (vlan) { - preempt_disable(); - macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0); - preempt_enable(); - } - rcu_read_unlock(); - - return ret ? ret : copied; + return ret ? ret : total; } static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb, @@ -879,7 +876,9 @@ static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv, } ret = macvtap_do_read(q, iocb, iv, len, file->f_flags & O_NONBLOCK); - ret = min_t(ssize_t, ret, len); /* XXX copied from tun.c. Why? */ + ret = min_t(ssize_t, ret, len); + if (ret > 0) + iocb->ki_pos = ret; out: return ret; } diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 3ae28f420868..26fa05a472b4 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -336,6 +336,21 @@ static struct phy_driver ksphy_driver[] = { .resume = genphy_resume, .driver = { .owner = THIS_MODULE,}, }, { + .phy_id = PHY_ID_KSZ8041RNLI, + .phy_id_mask = 0x00fffff0, + .name = "Micrel KSZ8041RNLI", + .features = PHY_BASIC_FEATURES | + SUPPORTED_Pause | SUPPORTED_Asym_Pause, + .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, + .config_init = kszphy_config_init, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + .ack_interrupt = kszphy_ack_interrupt, + .config_intr = kszphy_config_intr, + .suspend = genphy_suspend, + .resume = genphy_resume, + .driver = { .owner = THIS_MODULE,}, +}, { .phy_id = PHY_ID_KSZ8051, .phy_id_mask = 0x00fffff0, .name = "Micrel KSZ8051", diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 74630e94fa3b..d6447b3f7409 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -697,7 +697,7 @@ static int genphy_config_advert(struct phy_device *phydev) * to the values in phydev. Assumes that the values are valid. * Please see phy_sanitize_settings(). */ -static int genphy_setup_forced(struct phy_device *phydev) +int genphy_setup_forced(struct phy_device *phydev) { int err; int ctl = 0; @@ -716,7 +716,7 @@ static int genphy_setup_forced(struct phy_device *phydev) return err; } - +EXPORT_SYMBOL(genphy_setup_forced); /** * genphy_restart_aneg - Enable and Restart Autonegotiation diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c index 69b482bce7d2..14372c65a7e8 100644 --- a/drivers/net/phy/vitesse.c +++ b/drivers/net/phy/vitesse.c @@ -3,7 +3,7 @@ * * Author: Kriston Carson * - * Copyright (c) 2005, 2009 Freescale Semiconductor, Inc. + * Copyright (c) 2005, 2009, 2011 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -18,6 +18,11 @@ #include <linux/ethtool.h> #include <linux/phy.h> +/* Vitesse Extended Page Magic Register(s) */ +#define MII_VSC82X4_EXT_PAGE_16E 0x10 +#define MII_VSC82X4_EXT_PAGE_17E 0x11 +#define MII_VSC82X4_EXT_PAGE_18E 0x12 + /* Vitesse Extended Control Register 1 */ #define MII_VSC8244_EXT_CON1 0x17 #define MII_VSC8244_EXTCON1_INIT 0x0000 @@ -54,7 +59,14 @@ #define MII_VSC8221_AUXCONSTAT_INIT 0x0004 /* need to set this bit? */ #define MII_VSC8221_AUXCONSTAT_RESERVED 0x0004 +/* Vitesse Extended Page Access Register */ +#define MII_VSC82X4_EXT_PAGE_ACCESS 0x1f + +#define PHY_ID_VSC8234 0x000fc620 #define PHY_ID_VSC8244 0x000fc6c0 +#define PHY_ID_VSC8514 0x00070670 +#define PHY_ID_VSC8574 0x000704a0 +#define PHY_ID_VSC8662 0x00070660 #define PHY_ID_VSC8221 0x000fc550 #define PHY_ID_VSC8211 0x000fc4b0 @@ -118,7 +130,10 @@ static int vsc82xx_config_intr(struct phy_device *phydev) if (phydev->interrupts == PHY_INTERRUPT_ENABLED) err = phy_write(phydev, MII_VSC8244_IMASK, - phydev->drv->phy_id == PHY_ID_VSC8244 ? + (phydev->drv->phy_id == PHY_ID_VSC8234 || + phydev->drv->phy_id == PHY_ID_VSC8244 || + phydev->drv->phy_id == PHY_ID_VSC8514 || + phydev->drv->phy_id == PHY_ID_VSC8574) ? MII_VSC8244_IMASK_MASK : MII_VSC8221_IMASK_MASK); else { @@ -149,21 +164,126 @@ static int vsc8221_config_init(struct phy_device *phydev) */ } -/* Vitesse 824x */ +/* vsc82x4_config_autocross_enable - Enable auto MDI/MDI-X for forced links + * @phydev: target phy_device struct + * + * Enable auto MDI/MDI-X when in 10/100 forced link speeds by writing + * special values in the VSC8234/VSC8244 extended reserved registers + */ +static int vsc82x4_config_autocross_enable(struct phy_device *phydev) +{ + int ret; + + if (phydev->autoneg == AUTONEG_ENABLE || phydev->speed > SPEED_100) + return 0; + + /* map extended registers set 0x10 - 0x1e */ + ret = phy_write(phydev, MII_VSC82X4_EXT_PAGE_ACCESS, 0x52b5); + if (ret >= 0) + ret = phy_write(phydev, MII_VSC82X4_EXT_PAGE_18E, 0x0012); + if (ret >= 0) + ret = phy_write(phydev, MII_VSC82X4_EXT_PAGE_17E, 0x2803); + if (ret >= 0) + ret = phy_write(phydev, MII_VSC82X4_EXT_PAGE_16E, 0x87fa); + /* map standard registers set 0x10 - 0x1e */ + if (ret >= 0) + ret = phy_write(phydev, MII_VSC82X4_EXT_PAGE_ACCESS, 0x0000); + else + phy_write(phydev, MII_VSC82X4_EXT_PAGE_ACCESS, 0x0000); + + return ret; +} + +/* vsc82x4_config_aneg - restart auto-negotiation or write BMCR + * @phydev: target phy_device struct + * + * Description: If auto-negotiation is enabled, we configure the + * advertising, and then restart auto-negotiation. If it is not + * enabled, then we write the BMCR and also start the auto + * MDI/MDI-X feature + */ +static int vsc82x4_config_aneg(struct phy_device *phydev) +{ + int ret; + + /* Enable auto MDI/MDI-X when in 10/100 forced link speeds by + * writing special values in the VSC8234 extended reserved registers + */ + if (phydev->autoneg != AUTONEG_ENABLE && phydev->speed <= SPEED_100) { + ret = genphy_setup_forced(phydev); + + if (ret < 0) /* error */ + return ret; + + return vsc82x4_config_autocross_enable(phydev); + } + + return genphy_config_aneg(phydev); +} + +/* Vitesse 82xx */ static struct phy_driver vsc82xx_driver[] = { { + .phy_id = PHY_ID_VSC8234, + .name = "Vitesse VSC8234", + .phy_id_mask = 0x000ffff0, + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .config_init = &vsc824x_config_init, + .config_aneg = &vsc82x4_config_aneg, + .read_status = &genphy_read_status, + .ack_interrupt = &vsc824x_ack_interrupt, + .config_intr = &vsc82xx_config_intr, + .driver = { .owner = THIS_MODULE,}, +}, { .phy_id = PHY_ID_VSC8244, .name = "Vitesse VSC8244", .phy_id_mask = 0x000fffc0, .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = &vsc824x_config_init, - .config_aneg = &genphy_config_aneg, + .config_aneg = &vsc82x4_config_aneg, .read_status = &genphy_read_status, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, .driver = { .owner = THIS_MODULE,}, }, { + .phy_id = PHY_ID_VSC8514, + .name = "Vitesse VSC8514", + .phy_id_mask = 0x000ffff0, + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .config_init = &vsc824x_config_init, + .config_aneg = &vsc82x4_config_aneg, + .read_status = &genphy_read_status, + .ack_interrupt = &vsc824x_ack_interrupt, + .config_intr = &vsc82xx_config_intr, + .driver = { .owner = THIS_MODULE,}, +}, { + .phy_id = PHY_ID_VSC8574, + .name = "Vitesse VSC8574", + .phy_id_mask = 0x000ffff0, + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .config_init = &vsc824x_config_init, + .config_aneg = &vsc82x4_config_aneg, + .read_status = &genphy_read_status, + .ack_interrupt = &vsc824x_ack_interrupt, + .config_intr = &vsc82xx_config_intr, + .driver = { .owner = THIS_MODULE,}, +}, { + .phy_id = PHY_ID_VSC8662, + .name = "Vitesse VSC8662", + .phy_id_mask = 0x000ffff0, + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .config_init = &vsc824x_config_init, + .config_aneg = &vsc82x4_config_aneg, + .read_status = &genphy_read_status, + .ack_interrupt = &vsc824x_ack_interrupt, + .config_intr = &vsc82xx_config_intr, + .driver = { .owner = THIS_MODULE,}, +}, { /* Vitesse 8221 */ .phy_id = PHY_ID_VSC8221, .phy_id_mask = 0x000ffff0, @@ -207,7 +327,11 @@ module_init(vsc82xx_init); module_exit(vsc82xx_exit); static struct mdio_device_id __maybe_unused vitesse_tbl[] = { + { PHY_ID_VSC8234, 0x000ffff0 }, { PHY_ID_VSC8244, 0x000fffc0 }, + { PHY_ID_VSC8514, 0x000ffff0 }, + { PHY_ID_VSC8574, 0x000ffff0 }, + { PHY_ID_VSC8662, 0x000ffff0 }, { PHY_ID_VSC8221, 0x000ffff0 }, { PHY_ID_VSC8211, 0x000ffff0 }, { } diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 5f66e30d9823..82ee6ed954cb 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -979,8 +979,6 @@ static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock, if (error < 0) goto end; - m->msg_namelen = 0; - if (skb) { total_len = min_t(size_t, total_len, skb->len); error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len); diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 6574eb8766f9..736050d6b451 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1366,6 +1366,8 @@ static int team_user_linkup_option_get(struct team *team, return 0; } +static void __team_carrier_check(struct team *team); + static int team_user_linkup_option_set(struct team *team, struct team_gsetter_ctx *ctx) { @@ -1373,6 +1375,7 @@ static int team_user_linkup_option_set(struct team *team, port->user.linkup = ctx->data.bool_val; team_refresh_port_linkup(port); + __team_carrier_check(port->team); return 0; } @@ -1392,6 +1395,7 @@ static int team_user_linkup_en_option_set(struct team *team, port->user.linkup_enabled = ctx->data.bool_val; team_refresh_port_linkup(port); + __team_carrier_check(port->team); return 0; } @@ -2650,7 +2654,7 @@ static int team_nl_cmd_port_list_get(struct sk_buff *skb, return err; } -static struct genl_ops team_nl_ops[] = { +static const struct genl_ops team_nl_ops[] = { { .cmd = TEAM_CMD_NOOP, .doit = team_nl_cmd_noop, @@ -2676,15 +2680,15 @@ static struct genl_ops team_nl_ops[] = { }, }; -static struct genl_multicast_group team_change_event_mcgrp = { - .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, +static const struct genl_multicast_group team_nl_mcgrps[] = { + { .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, }, }; static int team_nl_send_multicast(struct sk_buff *skb, struct team *team, u32 portid) { - return genlmsg_multicast_netns(dev_net(team->dev), skb, 0, - team_change_event_mcgrp.id, GFP_KERNEL); + return genlmsg_multicast_netns(&team_nl_family, dev_net(team->dev), + skb, 0, 0, GFP_KERNEL); } static int team_nl_send_event_options_get(struct team *team, @@ -2703,23 +2707,8 @@ static int team_nl_send_event_port_get(struct team *team, static int team_nl_init(void) { - int err; - - err = genl_register_family_with_ops(&team_nl_family, team_nl_ops, - ARRAY_SIZE(team_nl_ops)); - if (err) - return err; - - err = genl_register_mc_group(&team_nl_family, &team_change_event_mcgrp); - if (err) - goto err_change_event_grp_reg; - - return 0; - -err_change_event_grp_reg: - genl_unregister_family(&team_nl_family); - - return err; + return genl_register_family_with_ops_groups(&team_nl_family, team_nl_ops, + team_nl_mcgrps); } static void team_nl_fini(void) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 7cb105c103fe..7c8343a4f918 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -981,6 +981,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, struct sk_buff *skb; size_t len = total_len, align = NET_SKB_PAD, linear; struct virtio_net_hdr gso = { 0 }; + int good_linear; int offset = 0; int copylen; bool zerocopy = false; @@ -1021,12 +1022,16 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, return -EINVAL; } + good_linear = SKB_MAX_HEAD(align); + if (msg_control) { /* There are 256 bytes to be copied in skb, so there is * enough room for skb expand head in case it is used. * The rest of the buffer is mapped from userspace. */ copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN; + if (copylen > good_linear) + copylen = good_linear; linear = copylen; if (iov_pages(iv, offset + copylen, count) <= MAX_SKB_FRAGS) zerocopy = true; @@ -1034,7 +1039,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, if (!zerocopy) { copylen = len; - linear = gso.hdr_len; + if (gso.hdr_len > good_linear) + linear = good_linear; + else + linear = gso.hdr_len; } skb = tun_alloc_skb(tfile, align, copylen, linear, noblock); @@ -1176,7 +1184,7 @@ static ssize_t tun_put_user(struct tun_struct *tun, { struct tun_pi pi = { 0, skb->protocol }; ssize_t total = 0; - int vlan_offset = 0; + int vlan_offset = 0, copied; if (!(tun->flags & TUN_NO_PI)) { if ((len -= sizeof(pi)) < 0) @@ -1240,6 +1248,8 @@ static ssize_t tun_put_user(struct tun_struct *tun, total += tun->vnet_hdr_sz; } + copied = total; + total += skb->len; if (!vlan_tx_tag_present(skb)) { len = min_t(int, skb->len, len); } else { @@ -1254,24 +1264,24 @@ static ssize_t tun_put_user(struct tun_struct *tun, vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); len = min_t(int, skb->len + VLAN_HLEN, len); + total += VLAN_HLEN; copy = min_t(int, vlan_offset, len); - ret = skb_copy_datagram_const_iovec(skb, 0, iv, total, copy); + ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy); len -= copy; - total += copy; + copied += copy; if (ret || !len) goto done; copy = min_t(int, sizeof(veth), len); - ret = memcpy_toiovecend(iv, (void *)&veth, total, copy); + ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy); len -= copy; - total += copy; + copied += copy; if (ret || !len) goto done; } - skb_copy_datagram_const_iovec(skb, vlan_offset, iv, total, len); - total += len; + skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len); done: tun->dev->stats.tx_packets++; @@ -1348,6 +1358,8 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv, ret = tun_do_read(tun, tfile, iocb, iv, len, file->f_flags & O_NONBLOCK); ret = min_t(ssize_t, ret, len); + if (ret > 0) + iocb->ki_pos = ret; out: tun_put(tun); return ret; diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index f74786aa37be..e15ec2b12035 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -66,7 +66,7 @@ static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx); static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer); static struct usb_driver cdc_ncm_driver; -static u8 cdc_ncm_setup(struct usbnet *dev) +static int cdc_ncm_setup(struct usbnet *dev) { struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; struct usb_cdc_ncm_ntb_parameters ncm_parm; diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index f3fce412c0c1..51073721e224 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -24,7 +24,7 @@ #include <linux/ipv6.h> /* Version Information */ -#define DRIVER_VERSION "v1.01.0 (2013/08/12)" +#define DRIVER_VERSION "v1.02.0 (2013/10/28)" #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" #define DRIVER_DESC "Realtek RTL8152 Based USB 2.0 Ethernet Adapters" #define MODULENAME "r8152" @@ -307,22 +307,22 @@ enum rtl8152_flags { #define MCU_TYPE_USB 0x0000 struct rx_desc { - u32 opts1; + __le32 opts1; #define RX_LEN_MASK 0x7fff - u32 opts2; - u32 opts3; - u32 opts4; - u32 opts5; - u32 opts6; + __le32 opts2; + __le32 opts3; + __le32 opts4; + __le32 opts5; + __le32 opts6; }; struct tx_desc { - u32 opts1; + __le32 opts1; #define TX_FS (1 << 31) /* First segment of a packet */ #define TX_LS (1 << 30) /* Final segment of a packet */ #define TX_LEN_MASK 0x3ffff - u32 opts2; + __le32 opts2; #define UDP_CS (1 << 31) /* Calculate UDP/IP checksum */ #define TCP_CS (1 << 30) /* Calculate TCP/IP checksum */ #define IPV4_CS (1 << 29) /* Calculate IPv4 checksum */ @@ -365,6 +365,7 @@ struct r8152 { struct mii_if_info mii; int intr_interval; u32 msg_enable; + u32 tx_qlen; u16 ocp_base; u8 *intr_buff; u8 version; @@ -876,7 +877,7 @@ static void write_bulk_callback(struct urb *urb) static void intr_callback(struct urb *urb) { struct r8152 *tp; - __u16 *d; + __le16 *d; int status = urb->status; int res; @@ -1136,14 +1137,14 @@ r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, struct sk_buff *skb) static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) { - u32 remain; + int remain; u8 *tx_data; tx_data = agg->head; agg->skb_num = agg->skb_len = 0; - remain = rx_buf_sz - sizeof(struct tx_desc); + remain = rx_buf_sz; - while (remain >= ETH_ZLEN) { + while (remain >= ETH_ZLEN + sizeof(struct tx_desc)) { struct tx_desc *tx_desc; struct sk_buff *skb; unsigned int len; @@ -1152,12 +1153,14 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) if (!skb) break; + remain -= sizeof(*tx_desc); len = skb->len; if (remain < len) { skb_queue_head(&tp->tx_queue, skb); break; } + tx_data = tx_agg_align(tx_data); tx_desc = (struct tx_desc *)tx_data; tx_data += sizeof(*tx_desc); @@ -1167,11 +1170,18 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) agg->skb_len += len; dev_kfree_skb_any(skb); - tx_data = tx_agg_align(tx_data + len); - remain = rx_buf_sz - sizeof(*tx_desc) - - (u32)((void *)tx_data - agg->head); + tx_data += len; + remain = rx_buf_sz - (int)(tx_agg_align(tx_data) - agg->head); } + netif_tx_lock(tp->netdev); + + if (netif_queue_stopped(tp->netdev) && + skb_queue_len(&tp->tx_queue) < tp->tx_qlen) + netif_wake_queue(tp->netdev); + + netif_tx_unlock(tp->netdev); + usb_fill_bulk_urb(agg->urb, tp->udev, usb_sndbulkpipe(tp->udev, 2), agg->head, (int)(tx_data - (u8 *)agg->head), (usb_complete_t)write_bulk_callback, agg); @@ -1188,7 +1198,6 @@ static void rx_bottom(struct r8152 *tp) list_for_each_safe(cursor, next, &tp->rx_done) { struct rx_desc *rx_desc; struct rx_agg *agg; - unsigned pkt_len; int len_used = 0; struct urb *urb; u8 *rx_data; @@ -1204,17 +1213,22 @@ static void rx_bottom(struct r8152 *tp) rx_desc = agg->head; rx_data = agg->head; - pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK; - len_used += sizeof(struct rx_desc) + pkt_len; + len_used += sizeof(struct rx_desc); - while (urb->actual_length >= len_used) { + while (urb->actual_length > len_used) { struct net_device *netdev = tp->netdev; struct net_device_stats *stats; + unsigned int pkt_len; struct sk_buff *skb; + pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK; if (pkt_len < ETH_ZLEN) break; + len_used += pkt_len; + if (urb->actual_length < len_used) + break; + stats = rtl8152_get_stats(netdev); pkt_len -= 4; /* CRC */ @@ -1234,9 +1248,8 @@ static void rx_bottom(struct r8152 *tp) rx_data = rx_agg_align(rx_data + pkt_len + 4); rx_desc = (struct rx_desc *)rx_data; - pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK; len_used = (int)(rx_data - (u8 *)agg->head); - len_used += sizeof(struct rx_desc) + pkt_len; + len_used += sizeof(struct rx_desc); } submit: @@ -1384,53 +1397,17 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct r8152 *tp = netdev_priv(netdev); - struct net_device_stats *stats = rtl8152_get_stats(netdev); - unsigned long flags; - struct tx_agg *agg = NULL; - struct tx_desc *tx_desc; - unsigned int len; - u8 *tx_data; - int res; skb_tx_timestamp(skb); - /* If tx_queue is not empty, it means at least one previous packt */ - /* is waiting for sending. Don't send current one before it. */ - if (skb_queue_empty(&tp->tx_queue)) - agg = r8152_get_tx_agg(tp); - - if (!agg) { - skb_queue_tail(&tp->tx_queue, skb); - return NETDEV_TX_OK; - } + skb_queue_tail(&tp->tx_queue, skb); - tx_desc = (struct tx_desc *)agg->head; - tx_data = agg->head + sizeof(*tx_desc); - agg->skb_num = agg->skb_len = 0; + if (list_empty(&tp->tx_free) && + skb_queue_len(&tp->tx_queue) > tp->tx_qlen) + netif_stop_queue(netdev); - len = skb->len; - r8152_tx_csum(tp, tx_desc, skb); - memcpy(tx_data, skb->data, len); - dev_kfree_skb_any(skb); - agg->skb_num++; - agg->skb_len += len; - usb_fill_bulk_urb(agg->urb, tp->udev, usb_sndbulkpipe(tp->udev, 2), - agg->head, len + sizeof(*tx_desc), - (usb_complete_t)write_bulk_callback, agg); - res = usb_submit_urb(agg->urb, GFP_ATOMIC); - if (res) { - /* Can we get/handle EPIPE here? */ - if (res == -ENODEV) { - netif_device_detach(tp->netdev); - } else { - netif_warn(tp, tx_err, netdev, - "failed tx_urb %d\n", res); - stats->tx_dropped++; - spin_lock_irqsave(&tp->tx_lock, flags); - list_add_tail(&agg->list, &tp->tx_free); - spin_unlock_irqrestore(&tp->tx_lock, flags); - } - } + if (!list_empty(&tp->tx_free)) + tasklet_schedule(&tp->tl); return NETDEV_TX_OK; } @@ -1459,6 +1436,14 @@ static void rtl8152_nic_reset(struct r8152 *tp) } } +static void set_tx_qlen(struct r8152 *tp) +{ + struct net_device *netdev = tp->netdev; + + tp->tx_qlen = rx_buf_sz / (netdev->mtu + VLAN_ETH_HLEN + VLAN_HLEN + + sizeof(struct tx_desc)); +} + static inline u8 rtl8152_get_speed(struct r8152 *tp) { return ocp_read_byte(tp, MCU_TYPE_PLA, PLA_PHYSTATUS); @@ -1470,6 +1455,7 @@ static int rtl8152_enable(struct r8152 *tp) int i, ret; u8 speed; + set_tx_qlen(tp); speed = rtl8152_get_speed(tp); if (speed & _10bps) { ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR); diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 90a429b7ebad..8494bb53ebdc 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -204,9 +204,6 @@ static void intr_complete (struct urb *urb) break; } - if (!netif_running (dev->net)) - return; - status = usb_submit_urb (urb, GFP_ATOMIC); if (status != 0) netif_err(dev, timer, dev->net, diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index cdc7c90a6a9e..d208f8604981 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -36,7 +36,10 @@ module_param(csum, bool, 0444); module_param(gso, bool, 0444); /* FIXME: MTU in config. */ -#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) +#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) +#define MERGE_BUFFER_LEN (ALIGN(GOOD_PACKET_LEN + \ + sizeof(struct virtio_net_hdr_mrg_rxbuf), \ + L1_CACHE_BYTES)) #define GOOD_COPY_LEN 128 #define VIRTNET_DRIVER_VERSION "1.0.0" @@ -296,35 +299,76 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq, return skb; } -static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb) +static struct sk_buff *receive_small(void *buf, unsigned int len) { - struct skb_vnet_hdr *hdr = skb_vnet_hdr(head_skb); + struct sk_buff * skb = buf; + + len -= sizeof(struct virtio_net_hdr); + skb_trim(skb, len); + + return skb; +} + +static struct sk_buff *receive_big(struct net_device *dev, + struct receive_queue *rq, + void *buf, + unsigned int len) +{ + struct page *page = buf; + struct sk_buff *skb = page_to_skb(rq, page, 0, len, PAGE_SIZE); + + if (unlikely(!skb)) + goto err; + + return skb; + +err: + dev->stats.rx_dropped++; + give_pages(rq, page); + return NULL; +} + +static struct sk_buff *receive_mergeable(struct net_device *dev, + struct receive_queue *rq, + void *buf, + unsigned int len) +{ + struct skb_vnet_hdr *hdr = buf; + int num_buf = hdr->mhdr.num_buffers; + struct page *page = virt_to_head_page(buf); + int offset = buf - page_address(page); + struct sk_buff *head_skb = page_to_skb(rq, page, offset, len, + MERGE_BUFFER_LEN); struct sk_buff *curr_skb = head_skb; - char *buf; - struct page *page; - int num_buf, len, offset; - num_buf = hdr->mhdr.num_buffers; + if (unlikely(!curr_skb)) + goto err_skb; + while (--num_buf) { - int num_skb_frags = skb_shinfo(curr_skb)->nr_frags; + int num_skb_frags; + buf = virtqueue_get_buf(rq->vq, &len); if (unlikely(!buf)) { - pr_debug("%s: rx error: %d buffers missing\n", - head_skb->dev->name, hdr->mhdr.num_buffers); - head_skb->dev->stats.rx_length_errors++; - return -EINVAL; + pr_debug("%s: rx error: %d buffers out of %d missing\n", + dev->name, num_buf, hdr->mhdr.num_buffers); + dev->stats.rx_length_errors++; + goto err_buf; } - if (unlikely(len > MAX_PACKET_LEN)) { + if (unlikely(len > MERGE_BUFFER_LEN)) { pr_debug("%s: rx error: merge buffer too long\n", - head_skb->dev->name); - len = MAX_PACKET_LEN; + dev->name); + len = MERGE_BUFFER_LEN; } + + page = virt_to_head_page(buf); + --rq->num; + + num_skb_frags = skb_shinfo(curr_skb)->nr_frags; if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); - if (unlikely(!nskb)) { - head_skb->dev->stats.rx_dropped++; - return -ENOMEM; - } + + if (unlikely(!nskb)) + goto err_skb; if (curr_skb == head_skb) skb_shinfo(curr_skb)->frag_list = nskb; else @@ -336,22 +380,39 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb) if (curr_skb != head_skb) { head_skb->data_len += len; head_skb->len += len; - head_skb->truesize += MAX_PACKET_LEN; + head_skb->truesize += MERGE_BUFFER_LEN; } - page = virt_to_head_page(buf); - offset = buf - (char *)page_address(page); + offset = buf - page_address(page); if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { put_page(page); skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, - len, MAX_PACKET_LEN); + len, MERGE_BUFFER_LEN); } else { skb_add_rx_frag(curr_skb, num_skb_frags, page, - offset, len, - MAX_PACKET_LEN); + offset, len, MERGE_BUFFER_LEN); } + } + + return head_skb; + +err_skb: + put_page(page); + while (--num_buf) { + buf = virtqueue_get_buf(rq->vq, &len); + if (unlikely(!buf)) { + pr_debug("%s: rx error: %d buffers missing\n", + dev->name, num_buf); + dev->stats.rx_length_errors++; + break; + } + page = virt_to_head_page(buf); + put_page(page); --rq->num; } - return 0; +err_buf: + dev->stats.rx_dropped++; + dev_kfree_skb(head_skb); + return NULL; } static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) @@ -360,48 +421,29 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) struct net_device *dev = vi->dev; struct virtnet_stats *stats = this_cpu_ptr(vi->stats); struct sk_buff *skb; - struct page *page; struct skb_vnet_hdr *hdr; if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { pr_debug("%s: short packet %i\n", dev->name, len); dev->stats.rx_length_errors++; - if (vi->big_packets) - give_pages(rq, buf); - else if (vi->mergeable_rx_bufs) + if (vi->mergeable_rx_bufs) put_page(virt_to_head_page(buf)); + else if (vi->big_packets) + give_pages(rq, buf); else dev_kfree_skb(buf); return; } - if (!vi->mergeable_rx_bufs && !vi->big_packets) { - skb = buf; - len -= sizeof(struct virtio_net_hdr); - skb_trim(skb, len); - } else if (vi->mergeable_rx_bufs) { - struct page *page = virt_to_head_page(buf); - skb = page_to_skb(rq, page, - (char *)buf - (char *)page_address(page), - len, MAX_PACKET_LEN); - if (unlikely(!skb)) { - dev->stats.rx_dropped++; - put_page(page); - return; - } - if (receive_mergeable(rq, skb)) { - dev_kfree_skb(skb); - return; - } - } else { - page = buf; - skb = page_to_skb(rq, page, 0, len, PAGE_SIZE); - if (unlikely(!skb)) { - dev->stats.rx_dropped++; - give_pages(rq, page); - return; - } - } + if (vi->mergeable_rx_bufs) + skb = receive_mergeable(dev, rq, buf, len); + else if (vi->big_packets) + skb = receive_big(dev, rq, buf, len); + else + skb = receive_small(buf, len); + + if (unlikely(!skb)) + return; hdr = skb_vnet_hdr(skb); @@ -471,11 +513,11 @@ static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp) struct skb_vnet_hdr *hdr; int err; - skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp); + skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp); if (unlikely(!skb)) return -ENOMEM; - skb_put(skb, MAX_PACKET_LEN); + skb_put(skb, GOOD_PACKET_LEN); hdr = skb_vnet_hdr(skb); sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr); @@ -542,20 +584,20 @@ static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) int err; if (gfp & __GFP_WAIT) { - if (skb_page_frag_refill(MAX_PACKET_LEN, &vi->alloc_frag, + if (skb_page_frag_refill(MERGE_BUFFER_LEN, &vi->alloc_frag, gfp)) { buf = (char *)page_address(vi->alloc_frag.page) + vi->alloc_frag.offset; get_page(vi->alloc_frag.page); - vi->alloc_frag.offset += MAX_PACKET_LEN; + vi->alloc_frag.offset += MERGE_BUFFER_LEN; } } else { - buf = netdev_alloc_frag(MAX_PACKET_LEN); + buf = netdev_alloc_frag(MERGE_BUFFER_LEN); } if (!buf) return -ENOMEM; - sg_init_one(rq->sg, buf, MAX_PACKET_LEN); + sg_init_one(rq->sg, buf, MERGE_BUFFER_LEN); err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp); if (err < 0) put_page(virt_to_head_page(buf)); @@ -1082,7 +1124,7 @@ static void virtnet_set_rx_mode(struct net_device *dev) if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, VIRTIO_NET_CTRL_MAC_TABLE_SET, sg, NULL)) - dev_warn(&dev->dev, "Failed to set MAC fitler table.\n"); + dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); kfree(buf); } @@ -1325,6 +1367,11 @@ static void virtnet_config_changed(struct virtio_device *vdev) static void virtnet_free_queues(struct virtnet_info *vi) { + int i; + + for (i = 0; i < vi->max_queue_pairs; i++) + netif_napi_del(&vi->rq[i].napi); + kfree(vi->rq); kfree(vi->sq); } @@ -1354,10 +1401,10 @@ static void free_unused_bufs(struct virtnet_info *vi) struct virtqueue *vq = vi->rq[i].vq; while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { - if (vi->big_packets) - give_pages(&vi->rq[i], buf); - else if (vi->mergeable_rx_bufs) + if (vi->mergeable_rx_bufs) put_page(virt_to_head_page(buf)); + else if (vi->big_packets) + give_pages(&vi->rq[i], buf); else dev_kfree_skb(buf); --vi->rq[i].num; @@ -1619,8 +1666,8 @@ static int virtnet_probe(struct virtio_device *vdev) if (err) goto free_stats; - netif_set_real_num_tx_queues(dev, 1); - netif_set_real_num_rx_queues(dev, 1); + netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); + netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); err = register_netdev(dev); if (err) { diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 0358c07f7669..249e01c5600c 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1668,7 +1668,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, netdev_dbg(dev, "circular route to %pI4\n", &dst->sin.sin_addr.s_addr); dev->stats.collisions++; - goto tx_error; + goto rt_tx_error; } /* Bypass encapsulation if the destination is local */ diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index 1ec52356b5a1..130657db5c43 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -3984,18 +3984,20 @@ static void ar9003_hw_quick_drop_apply(struct ath_hw *ah, u16 freq) int quick_drop; s32 t[3], f[3] = {5180, 5500, 5785}; - if (!(pBase->miscConfiguration & BIT(1))) + if (!(pBase->miscConfiguration & BIT(4))) return; - if (freq < 4000) - quick_drop = eep->modalHeader2G.quick_drop; - else { - t[0] = eep->base_ext1.quick_drop_low; - t[1] = eep->modalHeader5G.quick_drop; - t[2] = eep->base_ext1.quick_drop_high; - quick_drop = ar9003_hw_power_interpolate(freq, f, t, 3); + if (AR_SREV_9300(ah) || AR_SREV_9580(ah) || AR_SREV_9340(ah)) { + if (freq < 4000) { + quick_drop = eep->modalHeader2G.quick_drop; + } else { + t[0] = eep->base_ext1.quick_drop_low; + t[1] = eep->modalHeader5G.quick_drop; + t[2] = eep->base_ext1.quick_drop_high; + quick_drop = ar9003_hw_power_interpolate(freq, f, t, 3); + } + REG_RMW_FIELD(ah, AR_PHY_AGC, AR_PHY_AGC_QUICK_DROP, quick_drop); } - REG_RMW_FIELD(ah, AR_PHY_AGC, AR_PHY_AGC_QUICK_DROP, quick_drop); } static void ar9003_hw_txend_to_xpa_off_apply(struct ath_hw *ah, bool is2ghz) @@ -4035,7 +4037,7 @@ static void ar9003_hw_xlna_bias_strength_apply(struct ath_hw *ah, bool is2ghz) struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; u8 bias; - if (!(eep->baseEepHeader.featureEnable & 0x40)) + if (!(eep->baseEepHeader.miscConfiguration & 0x40)) return; if (!AR_SREV_9300(ah)) diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c index b07f164d65cf..20e49095db2a 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c @@ -187,17 +187,17 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah) INIT_INI_ARRAY(&ah->iniCckfirJapan2484, ar9485_1_1_baseband_core_txfir_coeff_japan_2484); - /* Load PCIE SERDES settings from INI */ - - /* Awake Setting */ - - INIT_INI_ARRAY(&ah->iniPcieSerdes, - ar9485_1_1_pcie_phy_clkreq_disable_L1); - - /* Sleep Setting */ - - INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, - ar9485_1_1_pcie_phy_clkreq_disable_L1); + if (ah->config.no_pll_pwrsave) { + INIT_INI_ARRAY(&ah->iniPcieSerdes, + ar9485_1_1_pcie_phy_clkreq_disable_L1); + INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, + ar9485_1_1_pcie_phy_clkreq_disable_L1); + } else { + INIT_INI_ARRAY(&ah->iniPcieSerdes, + ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1); + INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, + ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1); + } } else if (AR_SREV_9462_21(ah)) { INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], ar9462_2p1_mac_core); diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 11f53589a3f3..d39b79f5e841 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -701,6 +701,54 @@ static int ar9550_hw_get_modes_txgain_index(struct ath_hw *ah, return ret; } +static void ar9003_doubler_fix(struct ath_hw *ah) +{ + if (AR_SREV_9300(ah) || AR_SREV_9580(ah) || AR_SREV_9550(ah)) { + REG_RMW(ah, AR_PHY_65NM_CH0_RXTX2, + 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK_S | + 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S, 0); + REG_RMW(ah, AR_PHY_65NM_CH1_RXTX2, + 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK_S | + 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S, 0); + REG_RMW(ah, AR_PHY_65NM_CH2_RXTX2, + 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK_S | + 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S, 0); + + udelay(200); + + REG_CLR_BIT(ah, AR_PHY_65NM_CH0_RXTX2, + AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK); + REG_CLR_BIT(ah, AR_PHY_65NM_CH1_RXTX2, + AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK); + REG_CLR_BIT(ah, AR_PHY_65NM_CH2_RXTX2, + AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK); + + udelay(1); + + REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX2, + AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK, 1); + REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX2, + AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK, 1); + REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX2, + AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK, 1); + + udelay(200); + + REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_SYNTH12, + AR_PHY_65NM_CH0_SYNTH12_VREFMUL3, 0xf); + + REG_RMW(ah, AR_PHY_65NM_CH0_RXTX2, 0, + 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK_S | + 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S); + REG_RMW(ah, AR_PHY_65NM_CH1_RXTX2, 0, + 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK_S | + 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S); + REG_RMW(ah, AR_PHY_65NM_CH2_RXTX2, 0, + 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK_S | + 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S); + } +} + static int ar9003_hw_process_ini(struct ath_hw *ah, struct ath9k_channel *chan) { @@ -726,6 +774,8 @@ static int ar9003_hw_process_ini(struct ath_hw *ah, modesIndex); } + ar9003_doubler_fix(ah); + /* * RXGAIN initvals. */ diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h index fca624322dc8..2af667beb273 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h @@ -656,13 +656,24 @@ #define AR_PHY_SYNTH4_LONG_SHIFT_SELECT ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x00000001 : 0x00000002) #define AR_PHY_SYNTH4_LONG_SHIFT_SELECT_S ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0 : 1) #define AR_PHY_65NM_CH0_SYNTH7 0x16098 +#define AR_PHY_65NM_CH0_SYNTH12 0x160ac #define AR_PHY_65NM_CH0_BIAS1 0x160c0 #define AR_PHY_65NM_CH0_BIAS2 0x160c4 #define AR_PHY_65NM_CH0_BIAS4 0x160cc +#define AR_PHY_65NM_CH0_RXTX2 0x16104 +#define AR_PHY_65NM_CH1_RXTX2 0x16504 +#define AR_PHY_65NM_CH2_RXTX2 0x16904 #define AR_PHY_65NM_CH0_RXTX4 0x1610c #define AR_PHY_65NM_CH1_RXTX4 0x1650c #define AR_PHY_65NM_CH2_RXTX4 0x1690c +#define AR_PHY_65NM_CH0_SYNTH12_VREFMUL3 0x00780000 +#define AR_PHY_65NM_CH0_SYNTH12_VREFMUL3_S 19 +#define AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK 0x00000004 +#define AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK_S 2 +#define AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK 0x00000008 +#define AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S 3 + #define AR_CH0_TOP (AR_SREV_9300(ah) ? 0x16288 : \ (((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x1628c : 0x16280))) #define AR_CH0_TOP_XPABIASLVL (AR_SREV_9550(ah) ? 0x3c0 : 0x300) diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h index 4dbc294df7e3..57fc5f459d0a 100644 --- a/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h @@ -361,7 +361,7 @@ static const u32 ar9462_2p1_baseband_postamble[][5] = { {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32365a5e}, {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, - {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, + {0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5}, {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282}, {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27}, @@ -400,7 +400,7 @@ static const u32 ar9462_2p1_baseband_postamble[][5] = { {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000}, {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c}, - {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce}, + {0x0000ae20, 0x000001a6, 0x000001a6, 0x000001aa, 0x000001aa}, {0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550}, }; @@ -472,7 +472,7 @@ static const u32 ar9462_2p1_radio_postamble[][5] = { static const u32 ar9462_2p1_soc_preamble[][2] = { /* Addr allmodes */ - {0x000040a4, 0x00a0c1c9}, + {0x000040a4, 0x00a0c9c9}, {0x00007020, 0x00000000}, {0x00007034, 0x00000002}, {0x00007038, 0x000004c2}, diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h index 6f899c692647..7c1845221e1c 100644 --- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h @@ -32,13 +32,6 @@ static const u32 ar9485_1_1_mac_postamble[][5] = { {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440}, }; -static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = { - /* Addr allmodes */ - {0x00018c00, 0x18012e5e}, - {0x00018c04, 0x000801d8}, - {0x00018c08, 0x0000080c}, -}; - static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = { /* Addr allmodes */ {0x00009e00, 0x037216a0}, @@ -1101,20 +1094,6 @@ static const u32 ar9485_common_rx_gain_1_1[][2] = { {0x0000a1fc, 0x00000296}, }; -static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_enable_L1[][2] = { - /* Addr allmodes */ - {0x00018c00, 0x18052e5e}, - {0x00018c04, 0x000801d8}, - {0x00018c08, 0x0000080c}, -}; - -static const u32 ar9485_1_1_pcie_phy_clkreq_enable_L1[][2] = { - /* Addr allmodes */ - {0x00018c00, 0x18053e5e}, - {0x00018c04, 0x000801d8}, - {0x00018c08, 0x0000080c}, -}; - static const u32 ar9485_1_1_soc_preamble[][2] = { /* Addr allmodes */ {0x00004014, 0xba280400}, @@ -1173,13 +1152,6 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = { {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, }; -static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = { - /* Addr allmodes */ - {0x00018c00, 0x18013e5e}, - {0x00018c04, 0x000801d8}, - {0x00018c08, 0x0000080c}, -}; - static const u32 ar9485_1_1_radio_postamble[][2] = { /* Addr allmodes */ {0x0001609c, 0x0b283f31}, @@ -1358,4 +1330,18 @@ static const u32 ar9485_1_1_baseband_core_txfir_coeff_japan_2484[][2] = { {0x0000a3a0, 0xca9228ee}, }; +static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = { + /* Addr allmodes */ + {0x00018c00, 0x18013e5e}, + {0x00018c04, 0x000801d8}, + {0x00018c08, 0x0000080c}, +}; + +static const u32 ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1[][2] = { + /* Addr allmodes */ + {0x00018c00, 0x1801265e}, + {0x00018c04, 0x000801d8}, + {0x00018c08, 0x0000080c}, +}; + #endif /* INITVALS_9485_H */ diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index e7a38d844a6a..60a5da53668f 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -632,15 +632,16 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs); /* Main driver core */ /********************/ -#define ATH9K_PCI_CUS198 0x0001 -#define ATH9K_PCI_CUS230 0x0002 -#define ATH9K_PCI_CUS217 0x0004 -#define ATH9K_PCI_CUS252 0x0008 -#define ATH9K_PCI_WOW 0x0010 -#define ATH9K_PCI_BT_ANT_DIV 0x0020 -#define ATH9K_PCI_D3_L1_WAR 0x0040 -#define ATH9K_PCI_AR9565_1ANT 0x0080 -#define ATH9K_PCI_AR9565_2ANT 0x0100 +#define ATH9K_PCI_CUS198 0x0001 +#define ATH9K_PCI_CUS230 0x0002 +#define ATH9K_PCI_CUS217 0x0004 +#define ATH9K_PCI_CUS252 0x0008 +#define ATH9K_PCI_WOW 0x0010 +#define ATH9K_PCI_BT_ANT_DIV 0x0020 +#define ATH9K_PCI_D3_L1_WAR 0x0040 +#define ATH9K_PCI_AR9565_1ANT 0x0080 +#define ATH9K_PCI_AR9565_2ANT 0x0100 +#define ATH9K_PCI_NO_PLL_PWRSAVE 0x0200 /* * Default cache line size, in bytes. diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c index 90b8342d1ed4..8824610c21fb 100644 --- a/drivers/net/wireless/ath/ath9k/dfs_debug.c +++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c @@ -44,14 +44,20 @@ static ssize_t read_file_dfs(struct file *file, char __user *user_buf, if (buf == NULL) return -ENOMEM; - if (sc->dfs_detector) - dfs_pool_stats = sc->dfs_detector->get_stats(sc->dfs_detector); - len += scnprintf(buf + len, size - len, "DFS support for " "macVersion = 0x%x, macRev = 0x%x: %s\n", hw_ver->macVersion, hw_ver->macRev, (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ? "enabled" : "disabled"); + + if (!sc->dfs_detector) { + len += scnprintf(buf + len, size - len, + "DFS detector not enabled\n"); + goto exit; + } + + dfs_pool_stats = sc->dfs_detector->get_stats(sc->dfs_detector); + len += scnprintf(buf + len, size - len, "Pulse detector statistics:\n"); ATH9K_DFS_STAT("pulse events reported ", pulses_total); ATH9K_DFS_STAT("invalid pulse events ", pulses_no_dfs); @@ -76,6 +82,7 @@ static ssize_t read_file_dfs(struct file *file, char __user *user_buf, ATH9K_DFS_POOL_STAT("Seqs. alloc error ", pseq_alloc_error); ATH9K_DFS_POOL_STAT("Seqs. in use ", pseq_used); +exit: if (len > size) len = size; diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 54b04155e43b..8918035da3a3 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -146,10 +146,9 @@ static void ath9k_hw_set_clockrate(struct ath_hw *ah) else clockrate = ATH9K_CLOCK_RATE_5GHZ_OFDM; - if (IS_CHAN_HT40(chan)) - clockrate *= 2; - - if (ah->curchan) { + if (chan) { + if (IS_CHAN_HT40(chan)) + clockrate *= 2; if (IS_CHAN_HALF_RATE(chan)) clockrate /= 2; if (IS_CHAN_QUARTER_RATE(chan)) diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index 9ea24f1cba73..a2c9a5dbac6b 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -316,6 +316,7 @@ struct ath9k_ops_config { u32 ant_ctrl_comm2g_switch_enable; bool xatten_margin_cfg; bool alt_mingainidx; + bool no_pll_pwrsave; }; enum ath9k_int { diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index d8643ebabd30..710192ed27ed 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -609,6 +609,11 @@ static void ath9k_init_platform(struct ath_softc *sc) ah->config.pcie_waen = 0x0040473b; ath_info(common, "Enable WAR for ASPM D3/L1\n"); } + + if (sc->driver_data & ATH9K_PCI_NO_PLL_PWRSAVE) { + ah->config.no_pll_pwrsave = true; + ath_info(common, "Disable PLL PowerSave\n"); + } } static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob, @@ -863,8 +868,8 @@ static const struct ieee80211_iface_combination if_comb[] = { .max_interfaces = 1, .num_different_channels = 1, .beacon_int_infra_match = true, - .radar_detect_widths = BIT(NL80211_CHAN_NO_HT) | - BIT(NL80211_CHAN_HT20), + .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20), } }; diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index 7e4c2524b630..b5656fce4ff5 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -195,6 +195,93 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = { 0x3219), .driver_data = ATH9K_PCI_BT_ANT_DIV }, + /* AR9485 cards with PLL power-save disabled by default. */ + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_AZWAVE, + 0x2C97), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_AZWAVE, + 0x2100), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + 0x1C56, /* ASKEY */ + 0x4001), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + 0x11AD, /* LITEON */ + 0x6627), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + 0x11AD, /* LITEON */ + 0x6628), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_FOXCONN, + 0xE04E), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_FOXCONN, + 0xE04F), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + 0x144F, /* ASKEY */ + 0x7197), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + 0x1B9A, /* XAVI */ + 0x2000), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + 0x1B9A, /* XAVI */ + 0x2001), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_AZWAVE, + 0x1186), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_AZWAVE, + 0x1F86), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_AZWAVE, + 0x1195), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_AZWAVE, + 0x1F95), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + 0x1B9A, /* XAVI */ + 0x1C00), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + 0x1B9A, /* XAVI */ + 0x1C01), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0032, + PCI_VENDOR_ID_ASUSTEK, + 0x850D), + .driver_data = ATH9K_PCI_NO_PLL_PWRSAVE }, + { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */ { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */ diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 09cdbcd09739..b5a19e098f2d 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1276,6 +1276,10 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf, if (!rts_thresh || (len > rts_thresh)) rts = true; } + + if (!aggr) + len = fi->framelen; + ath_buf_set_rate(sc, bf, &info, len, rts); } diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c index c00687e05688..1217c52ab28e 100644 --- a/drivers/net/wireless/ath/regd.c +++ b/drivers/net/wireless/ath/regd.c @@ -362,7 +362,8 @@ static int __ath_reg_dyn_country(struct wiphy *wiphy, { u16 country_code; - if (!ath_is_world_regd(reg)) + if (request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE && + !ath_is_world_regd(reg)) return -EINVAL; country_code = ath_regd_find_country_by_name(request->alpha2); diff --git a/drivers/net/wireless/ath/wcn36xx/debug.c b/drivers/net/wireless/ath/wcn36xx/debug.c index 5b84f7ae0b1e..ef44a2da644d 100644 --- a/drivers/net/wireless/ath/wcn36xx/debug.c +++ b/drivers/net/wireless/ath/wcn36xx/debug.c @@ -126,7 +126,7 @@ static ssize_t write_file_dump(struct file *file, if (begin == NULL) break; - if (kstrtoul(begin, 0, (unsigned long *)(arg + i)) != 0) + if (kstrtou32(begin, 0, &arg[i]) != 0) break; } diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index f8c3a10510c2..366339421d4f 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -1286,7 +1286,8 @@ int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif, } else { wcn36xx_err("Beacon is to big: beacon size=%d\n", msg_body.beacon_length); - return -ENOMEM; + ret = -ENOMEM; + goto out; } memcpy(msg_body.bssid, vif->addr, ETH_ALEN); @@ -1327,7 +1328,8 @@ int wcn36xx_smd_update_proberesp_tmpl(struct wcn36xx *wcn, if (skb->len > BEACON_TEMPLATE_SIZE) { wcn36xx_warn("probe response template is too big: %d\n", skb->len); - return -E2BIG; + ret = -E2BIG; + goto out; } msg.probe_resp_template_len = skb->len; @@ -1606,7 +1608,8 @@ int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn, /* TODO: it also support ARP response type */ } else { wcn36xx_warn("unknow keep alive packet type %d\n", packet_type); - return -EINVAL; + ret = -EINVAL; + goto out; } PREPARE_HAL_BUF(wcn->hal_buf, msg_body); @@ -2038,13 +2041,20 @@ static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len) case WCN36XX_HAL_DELETE_STA_CONTEXT_IND: mutex_lock(&wcn->hal_ind_mutex); msg_ind = kmalloc(sizeof(*msg_ind), GFP_KERNEL); - msg_ind->msg_len = len; - msg_ind->msg = kmalloc(len, GFP_KERNEL); - memcpy(msg_ind->msg, buf, len); - list_add_tail(&msg_ind->list, &wcn->hal_ind_queue); - queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work); - wcn36xx_dbg(WCN36XX_DBG_HAL, "indication arrived\n"); + if (msg_ind) { + msg_ind->msg_len = len; + msg_ind->msg = kmalloc(len, GFP_KERNEL); + memcpy(msg_ind->msg, buf, len); + list_add_tail(&msg_ind->list, &wcn->hal_ind_queue); + queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work); + wcn36xx_dbg(WCN36XX_DBG_HAL, "indication arrived\n"); + } mutex_unlock(&wcn->hal_ind_mutex); + if (msg_ind) + break; + /* FIXME: Do something smarter then just printing an error. */ + wcn36xx_err("Run out of memory while handling SMD_EVENT (%d)\n", + msg_header->msg_type); break; default: wcn36xx_err("SMD_EVENT (%d) not supported\n", diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig index b00a7e92225f..54e36fcb3954 100644 --- a/drivers/net/wireless/brcm80211/Kconfig +++ b/drivers/net/wireless/brcm80211/Kconfig @@ -5,6 +5,8 @@ config BRCMSMAC tristate "Broadcom IEEE802.11n PCIe SoftMAC WLAN driver" depends on MAC80211 depends on BCMA + select NEW_LEDS if BCMA_DRIVER_GPIO + select LEDS_CLASS if BCMA_DRIVER_GPIO select BRCMUTIL select FW_LOADER select CRC_CCITT diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c index 905704e335d7..abc9ceca70f3 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c @@ -109,6 +109,8 @@ static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev, brcmf_err("Disable F2 failed:%d\n", err_ret); } + } else { + err_ret = -ENOENT; } } else if ((regaddr == SDIO_CCCR_ABORT) || (regaddr == SDIO_CCCR_IENx)) { diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c index 5b5b952d47b1..4a2293041821 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c @@ -823,6 +823,7 @@ static s32 brcmf_p2p_run_escan(struct brcmf_cfg80211_info *cfg, } err = brcmf_p2p_escan(p2p, num_nodfs, chanspecs, search_state, action, P2PAPI_BSSCFG_DEVICE); + kfree(chanspecs); } exit: if (err) diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index 85879dbaa402..3c34a72a5d64 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c @@ -67,8 +67,8 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL7260_UCODE_API_MAX 7 -#define IWL3160_UCODE_API_MAX 7 +#define IWL7260_UCODE_API_MAX 8 +#define IWL3160_UCODE_API_MAX 8 /* Oldest version we won't warn about */ #define IWL7260_UCODE_API_OK 7 @@ -130,6 +130,7 @@ const struct iwl_cfg iwl7260_2ac_cfg = { .ht_params = &iwl7000_ht_params, .nvm_ver = IWL7260_NVM_VERSION, .nvm_calib_ver = IWL7260_TX_POWER_VERSION, + .host_interrupt_operation_mode = true, }; const struct iwl_cfg iwl7260_2ac_cfg_high_temp = { @@ -140,6 +141,7 @@ const struct iwl_cfg iwl7260_2ac_cfg_high_temp = { .nvm_ver = IWL7260_NVM_VERSION, .nvm_calib_ver = IWL7260_TX_POWER_VERSION, .high_temp = true, + .host_interrupt_operation_mode = true, }; const struct iwl_cfg iwl7260_2n_cfg = { @@ -149,6 +151,7 @@ const struct iwl_cfg iwl7260_2n_cfg = { .ht_params = &iwl7000_ht_params, .nvm_ver = IWL7260_NVM_VERSION, .nvm_calib_ver = IWL7260_TX_POWER_VERSION, + .host_interrupt_operation_mode = true, }; const struct iwl_cfg iwl7260_n_cfg = { @@ -158,6 +161,7 @@ const struct iwl_cfg iwl7260_n_cfg = { .ht_params = &iwl7000_ht_params, .nvm_ver = IWL7260_NVM_VERSION, .nvm_calib_ver = IWL7260_TX_POWER_VERSION, + .host_interrupt_operation_mode = true, }; const struct iwl_cfg iwl3160_2ac_cfg = { @@ -167,6 +171,7 @@ const struct iwl_cfg iwl3160_2ac_cfg = { .ht_params = &iwl7000_ht_params, .nvm_ver = IWL3160_NVM_VERSION, .nvm_calib_ver = IWL3160_TX_POWER_VERSION, + .host_interrupt_operation_mode = true, }; const struct iwl_cfg iwl3160_2n_cfg = { @@ -176,6 +181,7 @@ const struct iwl_cfg iwl3160_2n_cfg = { .ht_params = &iwl7000_ht_params, .nvm_ver = IWL3160_NVM_VERSION, .nvm_calib_ver = IWL3160_TX_POWER_VERSION, + .host_interrupt_operation_mode = true, }; const struct iwl_cfg iwl3160_n_cfg = { @@ -185,6 +191,7 @@ const struct iwl_cfg iwl3160_n_cfg = { .ht_params = &iwl7000_ht_params, .nvm_ver = IWL3160_NVM_VERSION, .nvm_calib_ver = IWL3160_TX_POWER_VERSION, + .host_interrupt_operation_mode = true, }; const struct iwl_cfg iwl7265_2ac_cfg = { @@ -196,5 +203,23 @@ const struct iwl_cfg iwl7265_2ac_cfg = { .nvm_calib_ver = IWL7265_TX_POWER_VERSION, }; +const struct iwl_cfg iwl7265_2n_cfg = { + .name = "Intel(R) Dual Band Wireless N 7265", + .fw_name_pre = IWL7265_FW_PRE, + IWL_DEVICE_7000, + .ht_params = &iwl7000_ht_params, + .nvm_ver = IWL7265_NVM_VERSION, + .nvm_calib_ver = IWL7265_TX_POWER_VERSION, +}; + +const struct iwl_cfg iwl7265_n_cfg = { + .name = "Intel(R) Wireless N 7265", + .fw_name_pre = IWL7265_FW_PRE, + IWL_DEVICE_7000, + .ht_params = &iwl7000_ht_params, + .nvm_ver = IWL7265_NVM_VERSION, + .nvm_calib_ver = IWL7265_TX_POWER_VERSION, +}; + MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h index 18f232e8e812..03fd9aa8bfda 100644 --- a/drivers/net/wireless/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/iwlwifi/iwl-config.h @@ -207,6 +207,8 @@ struct iwl_eeprom_params { * @rx_with_siso_diversity: 1x1 device with rx antenna diversity * @internal_wimax_coex: internal wifi/wimax combo device * @high_temp: Is this NIC is designated to be in high temperature. + * @host_interrupt_operation_mode: device needs host interrupt operation + * mode set * * We enable the driver to be backward compatible wrt. hardware features. * API differences in uCode shouldn't be handled here but through TLVs @@ -235,6 +237,7 @@ struct iwl_cfg { enum iwl_led_mode led_mode; const bool rx_with_siso_diversity; const bool internal_wimax_coex; + const bool host_interrupt_operation_mode; bool high_temp; }; @@ -294,6 +297,8 @@ extern const struct iwl_cfg iwl3160_2ac_cfg; extern const struct iwl_cfg iwl3160_2n_cfg; extern const struct iwl_cfg iwl3160_n_cfg; extern const struct iwl_cfg iwl7265_2ac_cfg; +extern const struct iwl_cfg iwl7265_2n_cfg; +extern const struct iwl_cfg iwl7265_n_cfg; #endif /* CONFIG_IWLMVM */ #endif /* __IWL_CONFIG_H__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h index 54a4fdc631b7..da4eca8b3007 100644 --- a/drivers/net/wireless/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/iwlwifi/iwl-csr.h @@ -495,14 +495,11 @@ enum secure_load_status_reg { * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit * * default interrupt coalescing timer is 64 x 32 = 2048 usecs - * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs */ #define IWL_HOST_INT_TIMEOUT_MAX (0xFF) #define IWL_HOST_INT_TIMEOUT_DEF (0x40) #define IWL_HOST_INT_TIMEOUT_MIN (0x0) -#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF) -#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10) -#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0) +#define IWL_HOST_INT_OPER_MODE BIT(31) /***************************************************************************** * 7000/3000 series SHR DTS addresses * diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c index 5d066cbc5ac7..75b72a956552 100644 --- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c @@ -391,7 +391,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm) BT_VALID_LUT | BT_VALID_WIFI_RX_SW_PRIO_BOOST | BT_VALID_WIFI_TX_SW_PRIO_BOOST | - BT_VALID_MULTI_PRIO_LUT | BT_VALID_CORUN_LUT_20 | BT_VALID_CORUN_LUT_40 | BT_VALID_ANT_ISOLATION | @@ -842,6 +841,11 @@ static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac, sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], lockdep_is_held(&mvm->mutex)); + + /* This can happen if the station has been removed right now */ + if (IS_ERR_OR_NULL(sta)) + return; + mvmsta = (void *)sta->drv_priv; data->num_bss_ifaces++; diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c index 6f45966817bb..b9b81e881dd0 100644 --- a/drivers/net/wireless/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/iwlwifi/mvm/d3.c @@ -895,7 +895,7 @@ static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm, /* new API returns next, not last-used seqno */ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) - err -= 0x10; + err = (u16) (err - 0x10); } iwl_free_resp(&cmd); @@ -1549,7 +1549,7 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, if (gtkdata.unhandled_cipher) return false; if (!gtkdata.num_keys) - return true; + goto out; if (!gtkdata.last_gtk) return false; @@ -1600,6 +1600,7 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, (void *)&replay_ctr, GFP_KERNEL); } +out: mvmvif->seqno_valid = true; /* +0x10 because the set API expects next-to-use, not last-used */ mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10; diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c index 9864d713eb2c..a8fe6b41f9a3 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c @@ -119,6 +119,10 @@ static ssize_t iwl_dbgfs_sta_drain_write(struct file *file, if (sscanf(buf, "%d %d", &sta_id, &drain) != 2) return -EINVAL; + if (sta_id < 0 || sta_id >= IWL_MVM_STATION_COUNT) + return -EINVAL; + if (drain < 0 || drain > 1) + return -EINVAL; mutex_lock(&mvm->mutex); diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c index 33cf56fdfc41..95ce4b601fef 100644 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c @@ -176,8 +176,11 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, * P2P Device discoveribility, while there are other higher priority * events in the system). */ - if (WARN_ONCE(!le32_to_cpu(notif->status), - "Failed to schedule time event\n")) { + if (!le32_to_cpu(notif->status)) { + bool start = le32_to_cpu(notif->action) & + TE_V2_NOTIF_HOST_EVENT_START; + IWL_WARN(mvm, "Time Event %s notification failure\n", + start ? "start" : "end"); if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, NULL)) { iwl_mvm_te_clear_data(mvm, te_data); return; diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index 941c0c88f982..86605027c41d 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -353,6 +353,27 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { /* 7265 Series */ {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5012, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x500A, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)}, #endif /* CONFIG_IWLMVM */ {0} diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h index fa22639b63c9..051268c037b1 100644 --- a/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/iwlwifi/pcie/internal.h @@ -477,4 +477,12 @@ static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); } +static inline void iwl_nic_error(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + set_bit(STATUS_FW_ERROR, &trans_pcie->status); + iwl_op_mode_nic_error(trans->op_mode); +} + #endif /* __iwl_trans_int_pcie_h__ */ diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index 3f237b42eb36..be3995afa9d0 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c @@ -489,6 +489,10 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) /* Set interrupt coalescing timer to default (2048 usecs) */ iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); + + /* W/A for interrupt coalescing bug in 7260 and 3160 */ + if (trans->cfg->host_interrupt_operation_mode) + iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE); } static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) @@ -796,12 +800,13 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) iwl_pcie_dump_csr(trans); iwl_dump_fh(trans, NULL); + /* set the ERROR bit before we wake up the caller */ set_bit(STATUS_FW_ERROR, &trans_pcie->status); clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); wake_up(&trans_pcie->wait_command_queue); local_bh_disable(); - iwl_op_mode_nic_error(trans->op_mode); + iwl_nic_error(trans); local_bh_enable(); } diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 5d9337bec67a..cde9c16f6e4f 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -279,9 +279,6 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans) spin_lock_irqsave(&trans_pcie->irq_lock, flags); iwl_pcie_apm_init(trans); - /* Set interrupt coalescing calibration timer to default (512 usecs) */ - iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF); - spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); iwl_pcie_set_pwr(trans, false); diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index 059c5acad3a0..0adde919a258 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -207,7 +207,7 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data) IWL_ERR(trans, "scratch %d = 0x%08x\n", i, le32_to_cpu(txq->scratchbufs[i].scratch)); - iwl_op_mode_nic_error(trans->op_mode); + iwl_nic_error(trans); } /* @@ -1023,7 +1023,7 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) if (nfreed++ > 0) { IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", idx, q->write_ptr, q->read_ptr); - iwl_op_mode_nic_error(trans->op_mode); + iwl_nic_error(trans); } } @@ -1562,7 +1562,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, get_cmd_string(trans_pcie, cmd->id)); ret = -ETIMEDOUT; - iwl_op_mode_nic_error(trans->op_mode); + iwl_nic_error(trans); goto cancel; } diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c index 668dd27616a0..cc6a0a586f0b 100644 --- a/drivers/net/wireless/libertas/debugfs.c +++ b/drivers/net/wireless/libertas/debugfs.c @@ -913,7 +913,10 @@ static ssize_t lbs_debugfs_write(struct file *f, const char __user *buf, char *p2; struct debug_data *d = f->private_data; - pdata = kmalloc(cnt, GFP_KERNEL); + if (cnt == 0) + return 0; + + pdata = kmalloc(cnt + 1, GFP_KERNEL); if (pdata == NULL) return 0; @@ -922,6 +925,7 @@ static ssize_t lbs_debugfs_write(struct file *f, const char __user *buf, kfree(pdata); return 0; } + pdata[cnt] = '\0'; p0 = pdata; for (i = 0; i < num_of_items; i++) { diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c index ef8c98e21098..f499efc6abcf 100644 --- a/drivers/net/wireless/libertas/if_cs.c +++ b/drivers/net/wireless/libertas/if_cs.c @@ -902,6 +902,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev) if (card->model == MODEL_UNKNOWN) { pr_err("unsupported manf_id 0x%04x / card_id 0x%04x\n", p_dev->manf_id, p_dev->card_id); + ret = -ENODEV; goto out2; } diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index de0df86704e7..c72438bb2faf 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -383,6 +383,14 @@ struct hwsim_radiotap_hdr { __le16 rt_chbitmask; } __packed; +struct hwsim_radiotap_ack_hdr { + struct ieee80211_radiotap_header hdr; + u8 rt_flags; + u8 pad; + __le16 rt_channel; + __le16 rt_chbitmask; +} __packed; + /* MAC80211_HWSIM netlinf family */ static struct genl_family hwsim_genl_family = { .id = GENL_ID_GENERATE, @@ -500,7 +508,7 @@ static void mac80211_hwsim_monitor_ack(struct ieee80211_channel *chan, const u8 *addr) { struct sk_buff *skb; - struct hwsim_radiotap_hdr *hdr; + struct hwsim_radiotap_ack_hdr *hdr; u16 flags; struct ieee80211_hdr *hdr11; @@ -511,14 +519,14 @@ static void mac80211_hwsim_monitor_ack(struct ieee80211_channel *chan, if (skb == NULL) return; - hdr = (struct hwsim_radiotap_hdr *) skb_put(skb, sizeof(*hdr)); + hdr = (struct hwsim_radiotap_ack_hdr *) skb_put(skb, sizeof(*hdr)); hdr->hdr.it_version = PKTHDR_RADIOTAP_VERSION; hdr->hdr.it_pad = 0; hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr)); hdr->hdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | (1 << IEEE80211_RADIOTAP_CHANNEL)); hdr->rt_flags = 0; - hdr->rt_rate = 0; + hdr->pad = 0; hdr->rt_channel = cpu_to_le16(chan->center_freq); flags = IEEE80211_CHAN_2GHZ; hdr->rt_chbitmask = cpu_to_le16(flags); @@ -1230,7 +1238,7 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw, HRTIMER_MODE_REL); } else if (!info->enable_beacon) { unsigned int count = 0; - ieee80211_iterate_active_interfaces( + ieee80211_iterate_active_interfaces_atomic( data->hw, IEEE80211_IFACE_ITER_NORMAL, mac80211_hwsim_bcn_en_iter, &count); wiphy_debug(hw->wiphy, " beaconing vifs remaining: %u", @@ -2097,7 +2105,7 @@ out: } /* Generic Netlink operations array */ -static struct genl_ops hwsim_ops[] = { +static const struct genl_ops hwsim_ops[] = { { .cmd = HWSIM_CMD_REGISTER, .policy = hwsim_genl_policy, @@ -2148,8 +2156,7 @@ static int hwsim_init_netlink(void) printk(KERN_INFO "mac80211_hwsim: initializing netlink\n"); - rc = genl_register_family_with_ops(&hwsim_genl_family, - hwsim_ops, ARRAY_SIZE(hwsim_ops)); + rc = genl_register_family_with_ops(&hwsim_genl_family, hwsim_ops); if (rc) goto failure; diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index fbad00a5abc8..aeaea0e3b4c4 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -2210,8 +2210,10 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, priv->bss_started = 0; priv->bss_num = 0; - if (mwifiex_cfg80211_init_p2p_client(priv)) - return ERR_PTR(-EFAULT); + if (mwifiex_cfg80211_init_p2p_client(priv)) { + wdev = ERR_PTR(-EFAULT); + goto done; + } break; default: @@ -2224,7 +2226,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, if (!dev) { wiphy_err(wiphy, "no memory available for netdevice\n"); priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; - return ERR_PTR(-ENOMEM); + wdev = ERR_PTR(-ENOMEM); + goto done; } mwifiex_init_priv_params(priv, dev); @@ -2264,7 +2267,9 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, wiphy_err(wiphy, "cannot register virtual network device\n"); free_netdev(dev); priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; - return ERR_PTR(-EFAULT); + priv->netdev = NULL; + wdev = ERR_PTR(-EFAULT); + goto done; } sema_init(&priv->async_sem, 1); @@ -2274,6 +2279,13 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, #ifdef CONFIG_DEBUG_FS mwifiex_dev_debugfs_init(priv); #endif + +done: + if (IS_ERR(wdev)) { + kfree(priv->wdev); + priv->wdev = NULL; + } + return wdev; } EXPORT_SYMBOL_GPL(mwifiex_add_virtual_intf); @@ -2298,7 +2310,10 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) unregister_netdevice(wdev->netdev); /* Clear the priv in adapter */ + priv->netdev->ieee80211_ptr = NULL; priv->netdev = NULL; + kfree(wdev); + priv->wdev = NULL; priv->media_connected = false; diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h index f80f30b6160e..c8385ec77a86 100644 --- a/drivers/net/wireless/mwifiex/fw.h +++ b/drivers/net/wireless/mwifiex/fw.h @@ -1020,8 +1020,8 @@ struct mwifiex_power_group { } __packed; struct mwifiex_types_power_group { - u16 type; - u16 length; + __le16 type; + __le16 length; } __packed; struct host_cmd_ds_txpwr_cfg { diff --git a/drivers/net/wireless/mwifiex/ie.c b/drivers/net/wireless/mwifiex/ie.c index 220af4fe0fc6..81ac001ee741 100644 --- a/drivers/net/wireless/mwifiex/ie.c +++ b/drivers/net/wireless/mwifiex/ie.c @@ -82,7 +82,7 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv, struct mwifiex_ie_list *ie_list) { u16 travel_len, index, mask; - s16 input_len; + s16 input_len, tlv_len; struct mwifiex_ie *ie; u8 *tmp; @@ -91,11 +91,13 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv, ie_list->len = 0; - while (input_len > 0) { + while (input_len >= sizeof(struct mwifiex_ie_types_header)) { ie = (struct mwifiex_ie *)(((u8 *)ie_list) + travel_len); - input_len -= le16_to_cpu(ie->ie_length) + MWIFIEX_IE_HDR_SIZE; - travel_len += le16_to_cpu(ie->ie_length) + MWIFIEX_IE_HDR_SIZE; + tlv_len = le16_to_cpu(ie->ie_length); + travel_len += tlv_len + MWIFIEX_IE_HDR_SIZE; + if (input_len < tlv_len + MWIFIEX_IE_HDR_SIZE) + return -1; index = le16_to_cpu(ie->ie_index); mask = le16_to_cpu(ie->mgmt_subtype_mask); @@ -132,6 +134,7 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv, le16_add_cpu(&ie_list->len, le16_to_cpu(priv->mgmt_ie[index].ie_length) + MWIFIEX_IE_HDR_SIZE); + input_len -= tlv_len + MWIFIEX_IE_HDR_SIZE; } if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index 9d7c9d354d34..78e8a6666cc6 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c @@ -411,13 +411,14 @@ static void mwifiex_terminate_workqueue(struct mwifiex_adapter *adapter) */ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) { - int ret, i; + int ret; char fmt[64]; struct mwifiex_private *priv; struct mwifiex_adapter *adapter = context; struct mwifiex_fw_image fw; struct semaphore *sem = adapter->card_sem; bool init_failed = false; + struct wireless_dev *wdev; if (!firmware) { dev_err(adapter->dev, @@ -469,14 +470,16 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) priv = adapter->priv[MWIFIEX_BSS_ROLE_STA]; if (mwifiex_register_cfg80211(adapter)) { dev_err(adapter->dev, "cannot register with cfg80211\n"); - goto err_register_cfg80211; + goto err_init_fw; } rtnl_lock(); /* Create station interface by default */ - if (!mwifiex_add_virtual_intf(adapter->wiphy, "mlan%d", - NL80211_IFTYPE_STATION, NULL, NULL)) { + wdev = mwifiex_add_virtual_intf(adapter->wiphy, "mlan%d", + NL80211_IFTYPE_STATION, NULL, NULL); + if (IS_ERR(wdev)) { dev_err(adapter->dev, "cannot create default STA interface\n"); + rtnl_unlock(); goto err_add_intf; } rtnl_unlock(); @@ -486,17 +489,6 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) goto done; err_add_intf: - for (i = 0; i < adapter->priv_num; i++) { - priv = adapter->priv[i]; - - if (!priv) - continue; - - if (priv->wdev && priv->netdev) - mwifiex_del_virtual_intf(adapter->wiphy, priv->wdev); - } - rtnl_unlock(); -err_register_cfg80211: wiphy_unregister(adapter->wiphy); wiphy_free(adapter->wiphy); err_init_fw: @@ -1006,12 +998,6 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem) wiphy_unregister(priv->wdev->wiphy); wiphy_free(priv->wdev->wiphy); - for (i = 0; i < adapter->priv_num; i++) { - priv = adapter->priv[i]; - if (priv) - kfree(priv->wdev); - } - mwifiex_terminate_workqueue(adapter); /* Unregister device */ diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c index 33fa9432b241..03688aa14e8a 100644 --- a/drivers/net/wireless/mwifiex/pcie.c +++ b/drivers/net/wireless/mwifiex/pcie.c @@ -232,7 +232,6 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev) } mwifiex_remove_card(card->adapter, &add_remove_card_sem); - kfree(card); } static void mwifiex_pcie_shutdown(struct pci_dev *pdev) @@ -2313,6 +2312,7 @@ static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter) pci_release_region(pdev, 0); pci_set_drvdata(pdev, NULL); } + kfree(card); } /* diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c index 1576104e3d95..b44a31523461 100644 --- a/drivers/net/wireless/mwifiex/sdio.c +++ b/drivers/net/wireless/mwifiex/sdio.c @@ -196,7 +196,6 @@ mwifiex_sdio_remove(struct sdio_func *func) } mwifiex_remove_card(card->adapter, &add_remove_card_sem); - kfree(card); } /* @@ -1029,7 +1028,10 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter, struct sk_buff *skb, u32 upld_typ) { u8 *cmd_buf; + __le16 *curr_ptr = (__le16 *)skb->data; + u16 pkt_len = le16_to_cpu(*curr_ptr); + skb_trim(skb, pkt_len); skb_pull(skb, INTF_HEADER_LEN); switch (upld_typ) { @@ -1742,7 +1744,6 @@ mwifiex_unregister_dev(struct mwifiex_adapter *adapter) sdio_claim_host(card->func); sdio_disable_func(card->func); sdio_release_host(card->func); - sdio_set_drvdata(card->func, NULL); } } @@ -1770,7 +1771,6 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) return ret; } - sdio_set_drvdata(func, card); adapter->dev = &func->dev; @@ -1798,6 +1798,8 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter) int ret; u8 sdio_ireg; + sdio_set_drvdata(card->func, card); + /* * Read the HOST_INT_STATUS_REG for ACK the first interrupt got * from the bootloader. If we don't do this we get a interrupt @@ -1880,6 +1882,8 @@ static void mwifiex_cleanup_sdio(struct mwifiex_adapter *adapter) kfree(card->mpa_rx.len_arr); kfree(card->mpa_tx.buf); kfree(card->mpa_rx.buf); + sdio_set_drvdata(card->func, NULL); + kfree(card); } /* diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c index 7d66018a2e33..2181ee283d82 100644 --- a/drivers/net/wireless/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/mwifiex/sta_cmd.c @@ -239,14 +239,14 @@ static int mwifiex_cmd_tx_power_cfg(struct host_cmd_ds_command *cmd, memmove(cmd_txp_cfg, txp, sizeof(struct host_cmd_ds_txpwr_cfg) + sizeof(struct mwifiex_types_power_group) + - pg_tlv->length); + le16_to_cpu(pg_tlv->length)); pg_tlv = (struct mwifiex_types_power_group *) ((u8 *) cmd_txp_cfg + sizeof(struct host_cmd_ds_txpwr_cfg)); cmd->size = cpu_to_le16(le16_to_cpu(cmd->size) + sizeof(struct mwifiex_types_power_group) + - pg_tlv->length); + le16_to_cpu(pg_tlv->length)); } else { memmove(cmd_txp_cfg, txp, sizeof(*txp)); } diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c index 58a6013712d2..2675ca7f8d14 100644 --- a/drivers/net/wireless/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c @@ -274,17 +274,20 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv, struct host_cmd_ds_tx_rate_cfg *rate_cfg = &resp->params.tx_rate_cfg; struct mwifiex_rate_scope *rate_scope; struct mwifiex_ie_types_header *head; - u16 tlv, tlv_buf_len; + u16 tlv, tlv_buf_len, tlv_buf_left; u8 *tlv_buf; u32 i; - tlv_buf = ((u8 *)rate_cfg) + - sizeof(struct host_cmd_ds_tx_rate_cfg); - tlv_buf_len = le16_to_cpu(*(__le16 *) (tlv_buf + sizeof(u16))); + tlv_buf = ((u8 *)rate_cfg) + sizeof(struct host_cmd_ds_tx_rate_cfg); + tlv_buf_left = le16_to_cpu(resp->size) - S_DS_GEN - sizeof(*rate_cfg); - while (tlv_buf && tlv_buf_len > 0) { - tlv = (*tlv_buf); - tlv = tlv | (*(tlv_buf + 1) << 8); + while (tlv_buf_left >= sizeof(*head)) { + head = (struct mwifiex_ie_types_header *)tlv_buf; + tlv = le16_to_cpu(head->type); + tlv_buf_len = le16_to_cpu(head->len); + + if (tlv_buf_left < (sizeof(*head) + tlv_buf_len)) + break; switch (tlv) { case TLV_TYPE_RATE_SCOPE: @@ -304,9 +307,8 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv, /* Add RATE_DROP tlv here */ } - head = (struct mwifiex_ie_types_header *) tlv_buf; - tlv_buf += le16_to_cpu(head->len) + sizeof(*head); - tlv_buf_len -= le16_to_cpu(head->len); + tlv_buf += (sizeof(*head) + tlv_buf_len); + tlv_buf_left -= (sizeof(*head) + tlv_buf_len); } priv->is_data_rate_auto = mwifiex_is_rate_auto(priv); @@ -340,13 +342,17 @@ static int mwifiex_get_power_level(struct mwifiex_private *priv, void *data_buf) ((u8 *) data_buf + sizeof(struct host_cmd_ds_txpwr_cfg)); pg = (struct mwifiex_power_group *) ((u8 *) pg_tlv_hdr + sizeof(struct mwifiex_types_power_group)); - length = pg_tlv_hdr->length; - if (length > 0) { - max_power = pg->power_max; - min_power = pg->power_min; - length -= sizeof(struct mwifiex_power_group); - } - while (length) { + length = le16_to_cpu(pg_tlv_hdr->length); + + /* At least one structure required to update power */ + if (length < sizeof(struct mwifiex_power_group)) + return 0; + + max_power = pg->power_max; + min_power = pg->power_min; + length -= sizeof(struct mwifiex_power_group); + + while (length >= sizeof(struct mwifiex_power_group)) { pg++; if (max_power < pg->power_max) max_power = pg->power_max; @@ -356,10 +362,8 @@ static int mwifiex_get_power_level(struct mwifiex_private *priv, void *data_buf) length -= sizeof(struct mwifiex_power_group); } - if (pg_tlv_hdr->length > 0) { - priv->min_tx_power_level = (u8) min_power; - priv->max_tx_power_level = (u8) max_power; - } + priv->min_tx_power_level = (u8) min_power; + priv->max_tx_power_level = (u8) max_power; return 0; } diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c index f084412eee0b..a09398fe9e2a 100644 --- a/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c @@ -319,8 +319,8 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, if (bss_desc && bss_desc->ssid.ssid_len && (!mwifiex_ssid_cmp(&priv->curr_bss_params.bss_descriptor. ssid, &bss_desc->ssid))) { - kfree(bss_desc); - return 0; + ret = 0; + goto done; } /* Exit Adhoc mode first */ @@ -638,8 +638,9 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv, txp_cfg->mode = cpu_to_le32(1); pg_tlv = (struct mwifiex_types_power_group *) (buf + sizeof(struct host_cmd_ds_txpwr_cfg)); - pg_tlv->type = TLV_TYPE_POWER_GROUP; - pg_tlv->length = 4 * sizeof(struct mwifiex_power_group); + pg_tlv->type = cpu_to_le16(TLV_TYPE_POWER_GROUP); + pg_tlv->length = + cpu_to_le16(4 * sizeof(struct mwifiex_power_group)); pg = (struct mwifiex_power_group *) (buf + sizeof(struct host_cmd_ds_txpwr_cfg) + sizeof(struct mwifiex_types_power_group)); diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c index 1cfe5a738c47..92f76d655e6c 100644 --- a/drivers/net/wireless/mwifiex/uap_txrx.c +++ b/drivers/net/wireless/mwifiex/uap_txrx.c @@ -97,6 +97,7 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv, struct mwifiex_txinfo *tx_info; int hdr_chop; struct timeval tv; + struct ethhdr *p_ethhdr; u8 rfc1042_eth_hdr[ETH_ALEN] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; uap_rx_pd = (struct uap_rxpd *)(skb->data); @@ -112,14 +113,36 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv, } if (!memcmp(&rx_pkt_hdr->rfc1042_hdr, - rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr))) + rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr))) { + /* Replace the 803 header and rfc1042 header (llc/snap) with + * an Ethernet II header, keep the src/dst and snap_type + * (ethertype). + * + * The firmware only passes up SNAP frames converting all RX + * data from 802.11 to 802.2/LLC/SNAP frames. + * + * To create the Ethernet II, just move the src, dst address + * right before the snap_type. + */ + p_ethhdr = (struct ethhdr *) + ((u8 *)(&rx_pkt_hdr->eth803_hdr) + + sizeof(rx_pkt_hdr->eth803_hdr) + + sizeof(rx_pkt_hdr->rfc1042_hdr) + - sizeof(rx_pkt_hdr->eth803_hdr.h_dest) + - sizeof(rx_pkt_hdr->eth803_hdr.h_source) + - sizeof(rx_pkt_hdr->rfc1042_hdr.snap_type)); + memcpy(p_ethhdr->h_source, rx_pkt_hdr->eth803_hdr.h_source, + sizeof(p_ethhdr->h_source)); + memcpy(p_ethhdr->h_dest, rx_pkt_hdr->eth803_hdr.h_dest, + sizeof(p_ethhdr->h_dest)); /* Chop off the rxpd + the excess memory from * 802.2/llc/snap header that was removed. */ - hdr_chop = (u8 *)eth_hdr - (u8 *)uap_rx_pd; - else + hdr_chop = (u8 *)p_ethhdr - (u8 *)uap_rx_pd; + } else { /* Chop off the rxpd */ hdr_chop = (u8 *)&rx_pkt_hdr->eth803_hdr - (u8 *)uap_rx_pd; + } /* Chop off the leading header bytes so the it points * to the start of either the reconstructed EthII frame diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c index 1c70b8d09227..edf5b7a24900 100644 --- a/drivers/net/wireless/mwifiex/usb.c +++ b/drivers/net/wireless/mwifiex/usb.c @@ -350,7 +350,6 @@ static int mwifiex_usb_probe(struct usb_interface *intf, card->udev = udev; card->intf = intf; - usb_card = card; pr_debug("info: bcdUSB=%#x Device Class=%#x SubClass=%#x Protocol=%#x\n", udev->descriptor.bcdUSB, udev->descriptor.bDeviceClass, @@ -525,25 +524,28 @@ static int mwifiex_usb_resume(struct usb_interface *intf) static void mwifiex_usb_disconnect(struct usb_interface *intf) { struct usb_card_rec *card = usb_get_intfdata(intf); - struct mwifiex_adapter *adapter; - if (!card || !card->adapter) { - pr_err("%s: card or card->adapter is NULL\n", __func__); + if (!card) { + pr_err("%s: card is NULL\n", __func__); return; } - adapter = card->adapter; - if (!adapter->priv_num) - return; - mwifiex_usb_free(card); - dev_dbg(adapter->dev, "%s: removing card\n", __func__); - mwifiex_remove_card(adapter, &add_remove_card_sem); + if (card->adapter) { + struct mwifiex_adapter *adapter = card->adapter; + + if (!adapter->priv_num) + return; + + dev_dbg(adapter->dev, "%s: removing card\n", __func__); + mwifiex_remove_card(adapter, &add_remove_card_sem); + } usb_set_intfdata(intf, NULL); usb_put_dev(interface_to_usbdev(intf)); kfree(card); + usb_card = NULL; return; } @@ -754,6 +756,7 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) card->adapter = adapter; adapter->dev = &card->udev->dev; strcpy(adapter->fw_name, USB8797_DEFAULT_FW_NAME); + usb_card = card; return 0; } @@ -762,7 +765,7 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter) { struct usb_card_rec *card = (struct usb_card_rec *)adapter->card; - usb_set_intfdata(card->intf, NULL); + card->adapter = NULL; } static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, @@ -1004,7 +1007,7 @@ static void mwifiex_usb_cleanup_module(void) if (!down_interruptible(&add_remove_card_sem)) up(&add_remove_card_sem); - if (usb_card) { + if (usb_card && usb_card->adapter) { struct mwifiex_adapter *adapter = usb_card->adapter; int i; diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c index 5dd0ccc70b86..13eaeed03898 100644 --- a/drivers/net/wireless/mwifiex/wmm.c +++ b/drivers/net/wireless/mwifiex/wmm.c @@ -722,6 +722,9 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv, tlv_hdr = (struct mwifiex_ie_types_data *) curr; tlv_len = le16_to_cpu(tlv_hdr->header.len); + if (resp_len < tlv_len + sizeof(tlv_hdr->header)) + break; + switch (le16_to_cpu(tlv_hdr->header.type)) { case TLV_TYPE_WMMQSTATUS: tlv_wmm_qstatus = diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c index 41a16d30c79c..e05d9b4c8317 100644 --- a/drivers/net/wireless/prism54/islpci_dev.c +++ b/drivers/net/wireless/prism54/islpci_dev.c @@ -811,6 +811,10 @@ static const struct net_device_ops islpci_netdev_ops = { .ndo_validate_addr = eth_validate_addr, }; +static struct device_type wlan_type = { + .name = "wlan", +}; + struct net_device * islpci_setup(struct pci_dev *pdev) { @@ -821,9 +825,8 @@ islpci_setup(struct pci_dev *pdev) return ndev; pci_set_drvdata(pdev, ndev); -#if defined(SET_NETDEV_DEV) SET_NETDEV_DEV(ndev, &pdev->dev); -#endif + SET_NETDEV_DEVTYPE(ndev, &wlan_type); /* setup the structure members */ ndev->base_addr = pci_resource_start(pdev, 0); diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index c5738f14c4ba..776aff3678ff 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -2640,7 +2640,7 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev, if (rt2x00_rt(rt2x00dev, RT5392)) { rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr); - if (info->default_power1 > POWER_BOUND) + if (info->default_power2 > POWER_BOUND) rt2x00_set_field8(&rfcsr, RFCSR50_TX, POWER_BOUND); else rt2x00_set_field8(&rfcsr, RFCSR50_TX, diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c index 080b1fcae5fa..9dd92a700442 100644 --- a/drivers/net/wireless/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c @@ -181,6 +181,7 @@ static void rt2x00lib_autowakeup(struct work_struct *work) static void rt2x00lib_bc_buffer_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { + struct ieee80211_tx_control control = {}; struct rt2x00_dev *rt2x00dev = data; struct sk_buff *skb; @@ -195,7 +196,7 @@ static void rt2x00lib_bc_buffer_iter(void *data, u8 *mac, */ skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif); while (skb) { - rt2x00mac_tx(rt2x00dev->hw, NULL, skb); + rt2x00mac_tx(rt2x00dev->hw, &control, skb); skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif); } } diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h index a0935987fa3a..7f40ab8e1bd8 100644 --- a/drivers/net/wireless/rt2x00/rt2x00lib.h +++ b/drivers/net/wireless/rt2x00/rt2x00lib.h @@ -146,7 +146,7 @@ void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length); * @local: frame is not from mac80211 */ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, - bool local); + struct ieee80211_sta *sta, bool local); /** * rt2x00queue_update_beacon - Send new beacon from mac80211 diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c index 7c157857f5ce..2183e7978399 100644 --- a/drivers/net/wireless/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/rt2x00/rt2x00mac.c @@ -90,7 +90,7 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev, frag_skb->data, data_length, tx_info, (struct ieee80211_rts *)(skb->data)); - retval = rt2x00queue_write_tx_frame(queue, skb, true); + retval = rt2x00queue_write_tx_frame(queue, skb, NULL, true); if (retval) { dev_kfree_skb_any(skb); rt2x00_warn(rt2x00dev, "Failed to send RTS/CTS frame\n"); @@ -151,7 +151,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, goto exit_fail; } - if (unlikely(rt2x00queue_write_tx_frame(queue, skb, false))) + if (unlikely(rt2x00queue_write_tx_frame(queue, skb, control->sta, false))) goto exit_fail; /* diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c index 50590b1420a5..a5d38e8ad9e4 100644 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c @@ -635,7 +635,7 @@ static void rt2x00queue_bar_check(struct queue_entry *entry) } int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, - bool local) + struct ieee80211_sta *sta, bool local) { struct ieee80211_tx_info *tx_info; struct queue_entry *entry; @@ -649,7 +649,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, * after that we are free to use the skb->cb array * for our information. */ - rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, NULL); + rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, sta); /* * All information is retrieved from the skb->cb array, diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c index 9a78e3daf742..ff784072fb42 100644 --- a/drivers/net/wireless/rtlwifi/base.c +++ b/drivers/net/wireless/rtlwifi/base.c @@ -37,6 +37,7 @@ #include <linux/ip.h> #include <linux/module.h> +#include <linux/udp.h> /* *NOTICE!!!: This file will be very big, we should @@ -1074,64 +1075,52 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) if (!ieee80211_is_data(fc)) return false; + ip = (const struct iphdr *)(skb->data + mac_hdr_len + + SNAP_SIZE + PROTOC_TYPE_SIZE); + ether_type = be16_to_cpup((__be16 *) + (skb->data + mac_hdr_len + SNAP_SIZE)); - ip = (struct iphdr *)((u8 *) skb->data + mac_hdr_len + - SNAP_SIZE + PROTOC_TYPE_SIZE); - ether_type = *(u16 *) ((u8 *) skb->data + mac_hdr_len + SNAP_SIZE); - /* ether_type = ntohs(ether_type); */ - - if (ETH_P_IP == ether_type) { - if (IPPROTO_UDP == ip->protocol) { - struct udphdr *udp = (struct udphdr *)((u8 *) ip + - (ip->ihl << 2)); - if (((((u8 *) udp)[1] == 68) && - (((u8 *) udp)[3] == 67)) || - ((((u8 *) udp)[1] == 67) && - (((u8 *) udp)[3] == 68))) { - /* - * 68 : UDP BOOTP client - * 67 : UDP BOOTP server - */ - RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), - DBG_DMESG, "dhcp %s !!\n", - is_tx ? "Tx" : "Rx"); - - if (is_tx) { - rtlpriv->enter_ps = false; - schedule_work(&rtlpriv-> - works.lps_change_work); - ppsc->last_delaylps_stamp_jiffies = - jiffies; - } + switch (ether_type) { + case ETH_P_IP: { + struct udphdr *udp; + u16 src; + u16 dst; - return true; - } - } - } else if (ETH_P_ARP == ether_type) { - if (is_tx) { - rtlpriv->enter_ps = false; - schedule_work(&rtlpriv->works.lps_change_work); - ppsc->last_delaylps_stamp_jiffies = jiffies; - } + if (ip->protocol != IPPROTO_UDP) + return false; + udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2)); + src = be16_to_cpu(udp->source); + dst = be16_to_cpu(udp->dest); - return true; - } else if (ETH_P_PAE == ether_type) { + /* If this case involves port 68 (UDP BOOTP client) connecting + * with port 67 (UDP BOOTP server), then return true so that + * the lowest speed is used. + */ + if (!((src == 68 && dst == 67) || (src == 67 && dst == 68))) + return false; + + RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG, + "dhcp %s !!\n", is_tx ? "Tx" : "Rx"); + break; + } + case ETH_P_ARP: + break; + case ETH_P_PAE: RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG, "802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx"); - - if (is_tx) { - rtlpriv->enter_ps = false; - schedule_work(&rtlpriv->works.lps_change_work); - ppsc->last_delaylps_stamp_jiffies = jiffies; - } - - return true; - } else if (ETH_P_IPV6 == ether_type) { - /* IPv6 */ - return true; + break; + case ETH_P_IPV6: + /* TODO: Is this right? */ + return false; + default: + return false; } - - return false; + if (is_tx) { + rtlpriv->enter_ps = false; + schedule_work(&rtlpriv->works.lps_change_work); + ppsc->last_delaylps_stamp_jiffies = jiffies; + } + return true; } EXPORT_SYMBOL_GPL(rtl_is_special_data); diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c index ae13fb94b2e8..2ffc7298f686 100644 --- a/drivers/net/wireless/rtlwifi/efuse.c +++ b/drivers/net/wireless/rtlwifi/efuse.c @@ -262,9 +262,9 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf) sizeof(u8), GFP_ATOMIC); if (!efuse_tbl) return; - efuse_word = kmalloc(EFUSE_MAX_WORD_UNIT * sizeof(u16 *), GFP_ATOMIC); + efuse_word = kzalloc(EFUSE_MAX_WORD_UNIT * sizeof(u16 *), GFP_ATOMIC); if (!efuse_word) - goto done; + goto out; for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) { efuse_word[i] = kmalloc(efuse_max_section * sizeof(u16), GFP_ATOMIC); @@ -378,6 +378,7 @@ done: for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) kfree(efuse_word[i]); kfree(efuse_word); +out: kfree(efuse_tbl); } diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c index 393685390f3e..e26312fb4356 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c @@ -769,7 +769,7 @@ static long _rtl92c_signal_scale_mapping(struct ieee80211_hw *hw, static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw, struct rtl_stats *pstats, - struct rx_desc_92c *pdesc, + struct rx_desc_92c *p_desc, struct rx_fwinfo_92c *p_drvinfo, bool packet_match_bssid, bool packet_toself, @@ -784,11 +784,11 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw, u32 rssi, total_rssi = 0; bool in_powersavemode = false; bool is_cck_rate; + u8 *pdesc = (u8 *)p_desc; - is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc); + is_cck_rate = RX_HAL_IS_CCK_RATE(p_desc); pstats->packet_matchbssid = packet_match_bssid; pstats->packet_toself = packet_toself; - pstats->is_cck = is_cck_rate; pstats->packet_beacon = packet_beacon; pstats->is_cck = is_cck_rate; pstats->RX_SIGQ[0] = -1; diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c index 25e50ffc44ec..1bc21ccfa71b 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c @@ -303,10 +303,10 @@ out: bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, struct ieee80211_rx_status *rx_status, - u8 *p_desc, struct sk_buff *skb) + u8 *pdesc, struct sk_buff *skb) { struct rx_fwinfo_92c *p_drvinfo; - struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc; + struct rx_desc_92c *p_desc = (struct rx_desc_92c *)pdesc; u32 phystatus = GET_RX_DESC_PHY_STATUS(pdesc); stats->length = (u16) GET_RX_DESC_PKT_LEN(pdesc); @@ -345,11 +345,11 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw, if (phystatus) { p_drvinfo = (struct rx_fwinfo_92c *)(skb->data + stats->rx_bufshift); - rtl92c_translate_rx_signal_stuff(hw, skb, stats, pdesc, + rtl92c_translate_rx_signal_stuff(hw, skb, stats, p_desc, p_drvinfo); } /*rx_status->qual = stats->signal; */ - rx_status->signal = stats->rssi + 10; + rx_status->signal = stats->recvsignalpower + 10; return true; } diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c index 945ddecf90c9..0eb0f4ae5920 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c @@ -525,7 +525,7 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, p_drvinfo); } /*rx_status->qual = stats->signal; */ - rx_status->signal = stats->rssi + 10; + rx_status->signal = stats->recvsignalpower + 10; return true; } diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c index 5061f1db3f02..92d38ab3c60e 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c +++ b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c @@ -265,7 +265,7 @@ static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw, rtlefuse->pwrgroup_ht40 [RF90_PATH_A][chnl - 1]) { pwrdiff_limit[i] = - rtlefuse->pwrgroup_ht20 + rtlefuse->pwrgroup_ht40 [RF90_PATH_A][chnl - 1]; } } else { diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c index 222d2e792ca6..27efbcdac6a9 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c @@ -329,7 +329,7 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, } /*rx_status->qual = stats->signal; */ - rx_status->signal = stats->rssi + 10; + rx_status->signal = stats->recvsignalpower + 10; return true; } diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h index d224dc3bb092..0c65386fa30d 100644 --- a/drivers/net/wireless/rtlwifi/wifi.h +++ b/drivers/net/wireless/rtlwifi/wifi.h @@ -77,11 +77,7 @@ #define RTL_SLOT_TIME_9 9 #define RTL_SLOT_TIME_20 20 -/*related with tcp/ip. */ -/*if_ehther.h*/ -#define ETH_P_PAE 0x888E /*Port Access Entity (IEEE 802.1X) */ -#define ETH_P_IP 0x0800 /*Internet Protocol packet */ -#define ETH_P_ARP 0x0806 /*Address Resolution packet */ +/*related to tcp/ip. */ #define SNAP_SIZE 6 #define PROTOC_TYPE_SIZE 2 diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index b78ee10a956a..870f1fa58370 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -368,11 +368,11 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, unsigned long rx_ring_ref, unsigned int tx_evtchn, unsigned int rx_evtchn) { + struct task_struct *task; int err = -ENOMEM; - /* Already connected through? */ - if (vif->tx_irq) - return 0; + BUG_ON(vif->tx_irq); + BUG_ON(vif->task); err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); if (err < 0) @@ -411,14 +411,16 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, } init_waitqueue_head(&vif->wq); - vif->task = kthread_create(xenvif_kthread, - (void *)vif, "%s", vif->dev->name); - if (IS_ERR(vif->task)) { + task = kthread_create(xenvif_kthread, + (void *)vif, "%s", vif->dev->name); + if (IS_ERR(task)) { pr_warn("Could not allocate kthread for %s\n", vif->dev->name); - err = PTR_ERR(vif->task); + err = PTR_ERR(task); goto err_rx_unbind; } + vif->task = task; + rtnl_lock(); if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) dev_set_mtu(vif->dev, ETH_DATA_LEN); @@ -461,6 +463,11 @@ void xenvif_disconnect(struct xenvif *vif) if (netif_carrier_ok(vif->dev)) xenvif_carrier_off(vif); + if (vif->task) { + kthread_stop(vif->task); + vif->task = NULL; + } + if (vif->tx_irq) { if (vif->tx_irq == vif->rx_irq) unbind_from_irqhandler(vif->tx_irq, vif); @@ -471,9 +478,6 @@ void xenvif_disconnect(struct xenvif *vif) vif->tx_irq = 0; } - if (vif->task) - kthread_stop(vif->task); - xenvif_unmap_frontend_rings(vif); } diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 919b6509455c..e884ee1fe7ed 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -39,6 +39,7 @@ #include <linux/udp.h> #include <net/tcp.h> +#include <net/ip6_checksum.h> #include <xen/xen.h> #include <xen/events.h> @@ -451,7 +452,7 @@ static int xenvif_gop_skb(struct sk_buff *skb, } /* Set up a GSO prefix descriptor, if necessary */ - if ((1 << skb_shinfo(skb)->gso_type) & vif->gso_prefix_mask) { + if ((1 << gso_type) & vif->gso_prefix_mask) { req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); meta = npo->meta + npo->meta_prod++; meta->gso_type = gso_type; @@ -1148,75 +1149,92 @@ static int xenvif_set_skb_gso(struct xenvif *vif, return 0; } -static inline void maybe_pull_tail(struct sk_buff *skb, unsigned int len) +static inline int maybe_pull_tail(struct sk_buff *skb, unsigned int len, + unsigned int max) { - if (skb_is_nonlinear(skb) && skb_headlen(skb) < len) { - /* If we need to pullup then pullup to the max, so we - * won't need to do it again. - */ - int target = min_t(int, skb->len, MAX_TCP_HEADER); - __pskb_pull_tail(skb, target - skb_headlen(skb)); - } + if (skb_headlen(skb) >= len) + return 0; + + /* If we need to pullup then pullup to the max, so we + * won't need to do it again. + */ + if (max > skb->len) + max = skb->len; + + if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) + return -ENOMEM; + + if (skb_headlen(skb) < len) + return -EPROTO; + + return 0; } +/* This value should be large enough to cover a tagged ethernet header plus + * maximally sized IP and TCP or UDP headers. + */ +#define MAX_IP_HDR_LEN 128 + static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb, int recalculate_partial_csum) { - struct iphdr *iph = (void *)skb->data; - unsigned int header_size; unsigned int off; - int err = -EPROTO; + bool fragment; + int err; - off = sizeof(struct iphdr); + fragment = false; - header_size = skb->network_header + off + MAX_IPOPTLEN; - maybe_pull_tail(skb, header_size); + err = maybe_pull_tail(skb, + sizeof(struct iphdr), + MAX_IP_HDR_LEN); + if (err < 0) + goto out; - off = iph->ihl * 4; + if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF)) + fragment = true; - switch (iph->protocol) { + off = ip_hdrlen(skb); + + err = -EPROTO; + + switch (ip_hdr(skb)->protocol) { case IPPROTO_TCP: + err = maybe_pull_tail(skb, + off + sizeof(struct tcphdr), + MAX_IP_HDR_LEN); + if (err < 0) + goto out; + if (!skb_partial_csum_set(skb, off, offsetof(struct tcphdr, check))) goto out; - if (recalculate_partial_csum) { - struct tcphdr *tcph = tcp_hdr(skb); - - header_size = skb->network_header + - off + - sizeof(struct tcphdr); - maybe_pull_tail(skb, header_size); - - tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, - skb->len - off, - IPPROTO_TCP, 0); - } + if (recalculate_partial_csum) + tcp_hdr(skb)->check = + ~csum_tcpudp_magic(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, + skb->len - off, + IPPROTO_TCP, 0); break; case IPPROTO_UDP: + err = maybe_pull_tail(skb, + off + sizeof(struct udphdr), + MAX_IP_HDR_LEN); + if (err < 0) + goto out; + if (!skb_partial_csum_set(skb, off, offsetof(struct udphdr, check))) goto out; - if (recalculate_partial_csum) { - struct udphdr *udph = udp_hdr(skb); - - header_size = skb->network_header + - off + - sizeof(struct udphdr); - maybe_pull_tail(skb, header_size); - - udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, - skb->len - off, - IPPROTO_UDP, 0); - } + if (recalculate_partial_csum) + udp_hdr(skb)->check = + ~csum_tcpudp_magic(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, + skb->len - off, + IPPROTO_UDP, 0); break; default: - if (net_ratelimit()) - netdev_err(vif->dev, - "Attempting to checksum a non-TCP/UDP packet, " - "dropping a protocol %d packet\n", - iph->protocol); goto out; } @@ -1226,121 +1244,138 @@ out: return err; } +/* This value should be large enough to cover a tagged ethernet header plus + * an IPv6 header, all options, and a maximal TCP or UDP header. + */ +#define MAX_IPV6_HDR_LEN 256 + +#define OPT_HDR(type, skb, off) \ + (type *)(skb_network_header(skb) + (off)) + static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb, int recalculate_partial_csum) { - int err = -EPROTO; - struct ipv6hdr *ipv6h = (void *)skb->data; + int err; u8 nexthdr; - unsigned int header_size; unsigned int off; + unsigned int len; bool fragment; bool done; + fragment = false; done = false; off = sizeof(struct ipv6hdr); - header_size = skb->network_header + off; - maybe_pull_tail(skb, header_size); + err = maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); + if (err < 0) + goto out; - nexthdr = ipv6h->nexthdr; + nexthdr = ipv6_hdr(skb)->nexthdr; - while ((off <= sizeof(struct ipv6hdr) + ntohs(ipv6h->payload_len)) && - !done) { + len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); + while (off <= len && !done) { switch (nexthdr) { case IPPROTO_DSTOPTS: case IPPROTO_HOPOPTS: case IPPROTO_ROUTING: { - struct ipv6_opt_hdr *hp = (void *)(skb->data + off); + struct ipv6_opt_hdr *hp; - header_size = skb->network_header + - off + - sizeof(struct ipv6_opt_hdr); - maybe_pull_tail(skb, header_size); + err = maybe_pull_tail(skb, + off + + sizeof(struct ipv6_opt_hdr), + MAX_IPV6_HDR_LEN); + if (err < 0) + goto out; + hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); nexthdr = hp->nexthdr; off += ipv6_optlen(hp); break; } case IPPROTO_AH: { - struct ip_auth_hdr *hp = (void *)(skb->data + off); + struct ip_auth_hdr *hp; - header_size = skb->network_header + - off + - sizeof(struct ip_auth_hdr); - maybe_pull_tail(skb, header_size); + err = maybe_pull_tail(skb, + off + + sizeof(struct ip_auth_hdr), + MAX_IPV6_HDR_LEN); + if (err < 0) + goto out; + hp = OPT_HDR(struct ip_auth_hdr, skb, off); nexthdr = hp->nexthdr; - off += (hp->hdrlen+2)<<2; + off += ipv6_authlen(hp); + break; + } + case IPPROTO_FRAGMENT: { + struct frag_hdr *hp; + + err = maybe_pull_tail(skb, + off + + sizeof(struct frag_hdr), + MAX_IPV6_HDR_LEN); + if (err < 0) + goto out; + + hp = OPT_HDR(struct frag_hdr, skb, off); + + if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) + fragment = true; + + nexthdr = hp->nexthdr; + off += sizeof(struct frag_hdr); break; } - case IPPROTO_FRAGMENT: - fragment = true; - /* fall through */ default: done = true; break; } } - if (!done) { - if (net_ratelimit()) - netdev_err(vif->dev, "Failed to parse packet header\n"); - goto out; - } + err = -EPROTO; - if (fragment) { - if (net_ratelimit()) - netdev_err(vif->dev, "Packet is a fragment!\n"); + if (!done || fragment) goto out; - } switch (nexthdr) { case IPPROTO_TCP: + err = maybe_pull_tail(skb, + off + sizeof(struct tcphdr), + MAX_IPV6_HDR_LEN); + if (err < 0) + goto out; + if (!skb_partial_csum_set(skb, off, offsetof(struct tcphdr, check))) goto out; - if (recalculate_partial_csum) { - struct tcphdr *tcph = tcp_hdr(skb); - - header_size = skb->network_header + - off + - sizeof(struct tcphdr); - maybe_pull_tail(skb, header_size); - - tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, - &ipv6h->daddr, - skb->len - off, - IPPROTO_TCP, 0); - } + if (recalculate_partial_csum) + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + skb->len - off, + IPPROTO_TCP, 0); break; case IPPROTO_UDP: + err = maybe_pull_tail(skb, + off + sizeof(struct udphdr), + MAX_IPV6_HDR_LEN); + if (err < 0) + goto out; + if (!skb_partial_csum_set(skb, off, offsetof(struct udphdr, check))) goto out; - if (recalculate_partial_csum) { - struct udphdr *udph = udp_hdr(skb); - - header_size = skb->network_header + - off + - sizeof(struct udphdr); - maybe_pull_tail(skb, header_size); - - udph->check = ~csum_ipv6_magic(&ipv6h->saddr, - &ipv6h->daddr, - skb->len - off, - IPPROTO_UDP, 0); - } + if (recalculate_partial_csum) + udp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + skb->len - off, + IPPROTO_UDP, 0); break; default: - if (net_ratelimit()) - netdev_err(vif->dev, - "Attempting to checksum a non-TCP/UDP packet, " - "dropping a protocol %d packet\n", - nexthdr); goto out; } @@ -1410,14 +1445,15 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size) return false; } -static unsigned xenvif_tx_build_gops(struct xenvif *vif) +static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget) { struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop; struct sk_buff *skb; int ret; while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX - < MAX_PENDING_REQS)) { + < MAX_PENDING_REQS) && + (skb_queue_len(&vif->tx_queue) < budget)) { struct xen_netif_tx_request txreq; struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; struct page *page; @@ -1439,7 +1475,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif) continue; } - RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do); + work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx); if (!work_to_do) break; @@ -1579,14 +1615,13 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif) } -static int xenvif_tx_submit(struct xenvif *vif, int budget) +static int xenvif_tx_submit(struct xenvif *vif) { struct gnttab_copy *gop = vif->tx_copy_ops; struct sk_buff *skb; int work_done = 0; - while (work_done < budget && - (skb = __skb_dequeue(&vif->tx_queue)) != NULL) { + while ((skb = __skb_dequeue(&vif->tx_queue)) != NULL) { struct xen_netif_tx_request *txp; u16 pending_idx; unsigned data_len; @@ -1661,14 +1696,14 @@ int xenvif_tx_action(struct xenvif *vif, int budget) if (unlikely(!tx_work_todo(vif))) return 0; - nr_gops = xenvif_tx_build_gops(vif); + nr_gops = xenvif_tx_build_gops(vif, budget); if (nr_gops == 0) return 0; gnttab_batch_copy(vif->tx_copy_ops, nr_gops); - work_done = xenvif_tx_submit(vif, nr_gops); + work_done = xenvif_tx_submit(vif); return work_done; } diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index d85e66979711..e59acb1daa23 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -277,12 +277,13 @@ static void xennet_alloc_rx_buffers(struct net_device *dev) if (!page) { kfree_skb(skb); no_skb: - /* Any skbuffs queued for refill? Force them out. */ - if (i != 0) - goto refill; /* Could not allocate any skbuffs. Try again later. */ mod_timer(&np->rx_refill_timer, jiffies + (HZ/10)); + + /* Any skbuffs queued for refill? Force them out. */ + if (i != 0) + goto refill; break; } diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c index 1cb6e51e6bda..170e8e60cdb7 100644 --- a/drivers/ntb/ntb_hw.c +++ b/drivers/ntb/ntb_hw.c @@ -141,6 +141,24 @@ void ntb_unregister_event_callback(struct ntb_device *ndev) ndev->event_cb = NULL; } +static void ntb_irq_work(unsigned long data) +{ + struct ntb_db_cb *db_cb = (struct ntb_db_cb *)data; + int rc; + + rc = db_cb->callback(db_cb->data, db_cb->db_num); + if (rc) + tasklet_schedule(&db_cb->irq_work); + else { + struct ntb_device *ndev = db_cb->ndev; + unsigned long mask; + + mask = readw(ndev->reg_ofs.ldb_mask); + clear_bit(db_cb->db_num * ndev->bits_per_vector, &mask); + writew(mask, ndev->reg_ofs.ldb_mask); + } +} + /** * ntb_register_db_callback() - register a callback for doorbell interrupt * @ndev: pointer to ntb_device instance @@ -155,7 +173,7 @@ void ntb_unregister_event_callback(struct ntb_device *ndev) * RETURNS: An appropriate -ERRNO error value on error, or zero for success. */ int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx, - void *data, void (*func)(void *data, int db_num)) + void *data, int (*func)(void *data, int db_num)) { unsigned long mask; @@ -166,6 +184,10 @@ int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx, ndev->db_cb[idx].callback = func; ndev->db_cb[idx].data = data; + ndev->db_cb[idx].ndev = ndev; + + tasklet_init(&ndev->db_cb[idx].irq_work, ntb_irq_work, + (unsigned long) &ndev->db_cb[idx]); /* unmask interrupt */ mask = readw(ndev->reg_ofs.ldb_mask); @@ -194,6 +216,8 @@ void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx) set_bit(idx * ndev->bits_per_vector, &mask); writew(mask, ndev->reg_ofs.ldb_mask); + tasklet_disable(&ndev->db_cb[idx].irq_work); + ndev->db_cb[idx].callback = NULL; } @@ -678,6 +702,7 @@ static int ntb_xeon_setup(struct ntb_device *ndev) return -EINVAL; ndev->limits.max_mw = SNB_ERRATA_MAX_MW; + ndev->limits.max_db_bits = SNB_MAX_DB_BITS; ndev->reg_ofs.spad_write = ndev->mw[1].vbase + SNB_SPAD_OFFSET; ndev->reg_ofs.rdb = ndev->mw[1].vbase + @@ -688,8 +713,21 @@ static int ntb_xeon_setup(struct ntb_device *ndev) */ writeq(ndev->mw[1].bar_sz + 0x1000, ndev->reg_base + SNB_PBAR4LMT_OFFSET); + /* HW errata on the Limit registers. They can only be + * written when the base register is 4GB aligned and + * < 32bit. This should already be the case based on the + * driver defaults, but write the Limit registers first + * just in case. + */ } else { ndev->limits.max_mw = SNB_MAX_MW; + + /* HW Errata on bit 14 of b2bdoorbell register. Writes + * will not be mirrored to the remote system. Shrink + * the number of bits by one, since bit 14 is the last + * bit. + */ + ndev->limits.max_db_bits = SNB_MAX_DB_BITS - 1; ndev->reg_ofs.spad_write = ndev->reg_base + SNB_B2B_SPAD_OFFSET; ndev->reg_ofs.rdb = ndev->reg_base + @@ -699,6 +737,12 @@ static int ntb_xeon_setup(struct ntb_device *ndev) * something silly */ writeq(0, ndev->reg_base + SNB_PBAR4LMT_OFFSET); + /* HW errata on the Limit registers. They can only be + * written when the base register is 4GB aligned and + * < 32bit. This should already be the case based on the + * driver defaults, but write the Limit registers first + * just in case. + */ } /* The Xeon errata workaround requires setting SBAR Base @@ -769,6 +813,7 @@ static int ntb_xeon_setup(struct ntb_device *ndev) * have an equal amount. */ ndev->limits.max_spads = SNB_MAX_COMPAT_SPADS / 2; + ndev->limits.max_db_bits = SNB_MAX_DB_BITS; /* Note: The SDOORBELL is the cause of the errata. You REALLY * don't want to touch it. */ @@ -793,6 +838,7 @@ static int ntb_xeon_setup(struct ntb_device *ndev) * have an equal amount. */ ndev->limits.max_spads = SNB_MAX_COMPAT_SPADS / 2; + ndev->limits.max_db_bits = SNB_MAX_DB_BITS; ndev->reg_ofs.rdb = ndev->reg_base + SNB_PDOORBELL_OFFSET; ndev->reg_ofs.ldb = ndev->reg_base + SNB_SDOORBELL_OFFSET; ndev->reg_ofs.ldb_mask = ndev->reg_base + SNB_SDBMSK_OFFSET; @@ -819,7 +865,6 @@ static int ntb_xeon_setup(struct ntb_device *ndev) ndev->reg_ofs.lnk_stat = ndev->reg_base + SNB_SLINK_STATUS_OFFSET; ndev->reg_ofs.spci_cmd = ndev->reg_base + SNB_PCICMD_OFFSET; - ndev->limits.max_db_bits = SNB_MAX_DB_BITS; ndev->limits.msix_cnt = SNB_MSIX_CNT; ndev->bits_per_vector = SNB_DB_BITS_PER_VEC; @@ -934,12 +979,16 @@ static irqreturn_t bwd_callback_msix_irq(int irq, void *data) { struct ntb_db_cb *db_cb = data; struct ntb_device *ndev = db_cb->ndev; + unsigned long mask; dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for DB %d\n", irq, db_cb->db_num); - if (db_cb->callback) - db_cb->callback(db_cb->data, db_cb->db_num); + mask = readw(ndev->reg_ofs.ldb_mask); + set_bit(db_cb->db_num * ndev->bits_per_vector, &mask); + writew(mask, ndev->reg_ofs.ldb_mask); + + tasklet_schedule(&db_cb->irq_work); /* No need to check for the specific HB irq, any interrupt means * we're connected. @@ -955,12 +1004,16 @@ static irqreturn_t xeon_callback_msix_irq(int irq, void *data) { struct ntb_db_cb *db_cb = data; struct ntb_device *ndev = db_cb->ndev; + unsigned long mask; dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for DB %d\n", irq, db_cb->db_num); - if (db_cb->callback) - db_cb->callback(db_cb->data, db_cb->db_num); + mask = readw(ndev->reg_ofs.ldb_mask); + set_bit(db_cb->db_num * ndev->bits_per_vector, &mask); + writew(mask, ndev->reg_ofs.ldb_mask); + + tasklet_schedule(&db_cb->irq_work); /* On Sandybridge, there are 16 bits in the interrupt register * but only 4 vectors. So, 5 bits are assigned to the first 3 @@ -986,7 +1039,7 @@ static irqreturn_t xeon_event_msix_irq(int irq, void *dev) dev_err(&ndev->pdev->dev, "Error determining link status\n"); /* bit 15 is always the link bit */ - writew(1 << ndev->limits.max_db_bits, ndev->reg_ofs.ldb); + writew(1 << SNB_LINK_DB, ndev->reg_ofs.ldb); return IRQ_HANDLED; } @@ -1075,6 +1128,10 @@ static int ntb_setup_msix(struct ntb_device *ndev) "Only %d MSI-X vectors. Limiting the number of queues to that number.\n", rc); msix_entries = rc; + + rc = pci_enable_msix(pdev, ndev->msix_entries, msix_entries); + if (rc) + goto err1; } for (i = 0; i < msix_entries; i++) { @@ -1176,9 +1233,10 @@ static int ntb_setup_interrupts(struct ntb_device *ndev) */ if (ndev->hw_type == BWD_HW) writeq(~0, ndev->reg_ofs.ldb_mask); - else - writew(~(1 << ndev->limits.max_db_bits), - ndev->reg_ofs.ldb_mask); + else { + u16 var = 1 << SNB_LINK_DB; + writew(~var, ndev->reg_ofs.ldb_mask); + } rc = ntb_setup_msix(ndev); if (!rc) @@ -1286,6 +1344,39 @@ static void ntb_free_debugfs(struct ntb_device *ndev) } } +static void ntb_hw_link_up(struct ntb_device *ndev) +{ + if (ndev->conn_type == NTB_CONN_TRANSPARENT) + ntb_link_event(ndev, NTB_LINK_UP); + else { + u32 ntb_cntl; + + /* Let's bring the NTB link up */ + ntb_cntl = readl(ndev->reg_ofs.lnk_cntl); + ntb_cntl &= ~(NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK); + ntb_cntl |= NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP; + ntb_cntl |= NTB_CNTL_P2S_BAR45_SNOOP | NTB_CNTL_S2P_BAR45_SNOOP; + writel(ntb_cntl, ndev->reg_ofs.lnk_cntl); + } +} + +static void ntb_hw_link_down(struct ntb_device *ndev) +{ + u32 ntb_cntl; + + if (ndev->conn_type == NTB_CONN_TRANSPARENT) { + ntb_link_event(ndev, NTB_LINK_DOWN); + return; + } + + /* Bring NTB link down */ + ntb_cntl = readl(ndev->reg_ofs.lnk_cntl); + ntb_cntl &= ~(NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP); + ntb_cntl &= ~(NTB_CNTL_P2S_BAR45_SNOOP | NTB_CNTL_S2P_BAR45_SNOOP); + ntb_cntl |= NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK; + writel(ntb_cntl, ndev->reg_ofs.lnk_cntl); +} + static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct ntb_device *ndev; @@ -1374,9 +1465,7 @@ static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (rc) goto err6; - /* Let's bring the NTB link up */ - writel(NTB_CNTL_BAR23_SNOOP | NTB_CNTL_BAR45_SNOOP, - ndev->reg_ofs.lnk_cntl); + ntb_hw_link_up(ndev); return 0; @@ -1406,12 +1495,8 @@ static void ntb_pci_remove(struct pci_dev *pdev) { struct ntb_device *ndev = pci_get_drvdata(pdev); int i; - u32 ntb_cntl; - /* Bring NTB link down */ - ntb_cntl = readl(ndev->reg_ofs.lnk_cntl); - ntb_cntl |= NTB_CNTL_LINK_DISABLE; - writel(ntb_cntl, ndev->reg_ofs.lnk_cntl); + ntb_hw_link_down(ndev); ntb_transport_free(ndev->ntb_transport); diff --git a/drivers/ntb/ntb_hw.h b/drivers/ntb/ntb_hw.h index 0a31cedae7d4..bbdb7edca10c 100644 --- a/drivers/ntb/ntb_hw.h +++ b/drivers/ntb/ntb_hw.h @@ -106,10 +106,11 @@ struct ntb_mw { }; struct ntb_db_cb { - void (*callback) (void *data, int db_num); + int (*callback)(void *data, int db_num); unsigned int db_num; void *data; struct ntb_device *ndev; + struct tasklet_struct irq_work; }; struct ntb_device { @@ -228,8 +229,8 @@ struct ntb_device *ntb_register_transport(struct pci_dev *pdev, void ntb_unregister_transport(struct ntb_device *ndev); void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr); int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx, - void *data, void (*db_cb_func) (void *data, - int db_num)); + void *data, int (*db_cb_func)(void *data, + int db_num)); void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx); int ntb_register_event_callback(struct ntb_device *ndev, void (*event_cb_func) (void *handle, diff --git a/drivers/ntb/ntb_regs.h b/drivers/ntb/ntb_regs.h index aa4bdd393c58..9774506419d7 100644 --- a/drivers/ntb/ntb_regs.h +++ b/drivers/ntb/ntb_regs.h @@ -55,6 +55,7 @@ #define SNB_MAX_COMPAT_SPADS 16 /* Reserve the uppermost bit for link interrupt */ #define SNB_MAX_DB_BITS 15 +#define SNB_LINK_DB 15 #define SNB_DB_BITS_PER_VEC 5 #define SNB_MAX_MW 2 #define SNB_ERRATA_MAX_MW 1 @@ -75,9 +76,6 @@ #define SNB_SBAR2XLAT_OFFSET 0x0030 #define SNB_SBAR4XLAT_OFFSET 0x0038 #define SNB_SBAR0BASE_OFFSET 0x0040 -#define SNB_SBAR0BASE_OFFSET 0x0040 -#define SNB_SBAR2BASE_OFFSET 0x0048 -#define SNB_SBAR4BASE_OFFSET 0x0050 #define SNB_SBAR2BASE_OFFSET 0x0048 #define SNB_SBAR4BASE_OFFSET 0x0050 #define SNB_NTBCNTL_OFFSET 0x0058 @@ -145,11 +143,13 @@ #define BWD_LTSSMSTATEJMP_FORCEDETECT (1 << 2) #define BWD_IBIST_ERR_OFLOW 0x7FFF7FFF -#define NTB_CNTL_CFG_LOCK (1 << 0) -#define NTB_CNTL_LINK_DISABLE (1 << 1) -#define NTB_CNTL_BAR23_SNOOP (1 << 2) -#define NTB_CNTL_BAR45_SNOOP (1 << 6) -#define BWD_CNTL_LINK_DOWN (1 << 16) +#define NTB_CNTL_CFG_LOCK (1 << 0) +#define NTB_CNTL_LINK_DISABLE (1 << 1) +#define NTB_CNTL_S2P_BAR23_SNOOP (1 << 2) +#define NTB_CNTL_P2S_BAR23_SNOOP (1 << 4) +#define NTB_CNTL_S2P_BAR45_SNOOP (1 << 6) +#define NTB_CNTL_P2S_BAR45_SNOOP (1 << 8) +#define BWD_CNTL_LINK_DOWN (1 << 16) #define NTB_PPD_OFFSET 0x00D4 #define SNB_PPD_CONN_TYPE 0x0003 diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 12a9e83c008b..3217f394d45b 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -119,7 +119,6 @@ struct ntb_transport_qp { void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data, void *data, int len); - struct tasklet_struct rx_work; struct list_head rx_pend_q; struct list_head rx_free_q; spinlock_t ntb_rx_pend_q_lock; @@ -584,11 +583,8 @@ static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size) return 0; } -static void ntb_qp_link_cleanup(struct work_struct *work) +static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp) { - struct ntb_transport_qp *qp = container_of(work, - struct ntb_transport_qp, - link_cleanup); struct ntb_transport *nt = qp->transport; struct pci_dev *pdev = ntb_query_pdev(nt->ndev); @@ -602,6 +598,16 @@ static void ntb_qp_link_cleanup(struct work_struct *work) dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num); qp->qp_link = NTB_LINK_DOWN; +} + +static void ntb_qp_link_cleanup_work(struct work_struct *work) +{ + struct ntb_transport_qp *qp = container_of(work, + struct ntb_transport_qp, + link_cleanup); + struct ntb_transport *nt = qp->transport; + + ntb_qp_link_cleanup(qp); if (nt->transport_link == NTB_LINK_UP) schedule_delayed_work(&qp->link_work, @@ -613,22 +619,20 @@ static void ntb_qp_link_down(struct ntb_transport_qp *qp) schedule_work(&qp->link_cleanup); } -static void ntb_transport_link_cleanup(struct work_struct *work) +static void ntb_transport_link_cleanup(struct ntb_transport *nt) { - struct ntb_transport *nt = container_of(work, struct ntb_transport, - link_cleanup); int i; + /* Pass along the info to any clients */ + for (i = 0; i < nt->max_qps; i++) + if (!test_bit(i, &nt->qp_bitmap)) + ntb_qp_link_cleanup(&nt->qps[i]); + if (nt->transport_link == NTB_LINK_DOWN) cancel_delayed_work_sync(&nt->link_work); else nt->transport_link = NTB_LINK_DOWN; - /* Pass along the info to any clients */ - for (i = 0; i < nt->max_qps; i++) - if (!test_bit(i, &nt->qp_bitmap)) - ntb_qp_link_down(&nt->qps[i]); - /* The scratchpad registers keep the values if the remote side * goes down, blast them now to give them a sane value the next * time they are accessed @@ -637,6 +641,14 @@ static void ntb_transport_link_cleanup(struct work_struct *work) ntb_write_local_spad(nt->ndev, i, 0); } +static void ntb_transport_link_cleanup_work(struct work_struct *work) +{ + struct ntb_transport *nt = container_of(work, struct ntb_transport, + link_cleanup); + + ntb_transport_link_cleanup(nt); +} + static void ntb_transport_event_callback(void *data, enum ntb_hw_event event) { struct ntb_transport *nt = data; @@ -880,7 +892,7 @@ static int ntb_transport_init_queue(struct ntb_transport *nt, } INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work); - INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup); + INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work); spin_lock_init(&qp->ntb_rx_pend_q_lock); spin_lock_init(&qp->ntb_rx_free_q_lock); @@ -936,7 +948,7 @@ int ntb_transport_init(struct pci_dev *pdev) } INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work); - INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup); + INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work); rc = ntb_register_event_callback(nt->ndev, ntb_transport_event_callback); @@ -972,7 +984,7 @@ void ntb_transport_free(void *transport) struct ntb_device *ndev = nt->ndev; int i; - nt->transport_link = NTB_LINK_DOWN; + ntb_transport_link_cleanup(nt); /* verify that all the qp's are freed */ for (i = 0; i < nt->max_qps; i++) { @@ -1034,10 +1046,9 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, struct dma_chan *chan = qp->dma_chan; struct dma_device *device; size_t pay_off, buff_off; - dma_addr_t src, dest; + struct dmaengine_unmap_data *unmap; dma_cookie_t cookie; void *buf = entry->buf; - unsigned long flags; entry->len = len; @@ -1045,35 +1056,49 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, goto err; if (len < copy_bytes) - goto err1; + goto err_wait; device = chan->device; pay_off = (size_t) offset & ~PAGE_MASK; buff_off = (size_t) buf & ~PAGE_MASK; if (!is_dma_copy_aligned(device, pay_off, buff_off, len)) - goto err1; + goto err_wait; - dest = dma_map_single(device->dev, buf, len, DMA_FROM_DEVICE); - if (dma_mapping_error(device->dev, dest)) - goto err1; + unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT); + if (!unmap) + goto err_wait; - src = dma_map_single(device->dev, offset, len, DMA_TO_DEVICE); - if (dma_mapping_error(device->dev, src)) - goto err2; + unmap->len = len; + unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset), + pay_off, len, DMA_TO_DEVICE); + if (dma_mapping_error(device->dev, unmap->addr[0])) + goto err_get_unmap; + + unmap->to_cnt = 1; + + unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf), + buff_off, len, DMA_FROM_DEVICE); + if (dma_mapping_error(device->dev, unmap->addr[1])) + goto err_get_unmap; + + unmap->from_cnt = 1; - flags = DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SRC_UNMAP_SINGLE | - DMA_PREP_INTERRUPT; - txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags); + txd = device->device_prep_dma_memcpy(chan, unmap->addr[1], + unmap->addr[0], len, + DMA_PREP_INTERRUPT); if (!txd) - goto err3; + goto err_get_unmap; txd->callback = ntb_rx_copy_callback; txd->callback_param = entry; + dma_set_unmap(txd, unmap); cookie = dmaengine_submit(txd); if (dma_submit_error(cookie)) - goto err3; + goto err_set_unmap; + + dmaengine_unmap_put(unmap); qp->last_cookie = cookie; @@ -1081,11 +1106,11 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, return; -err3: - dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE); -err2: - dma_unmap_single(device->dev, dest, len, DMA_FROM_DEVICE); -err1: +err_set_unmap: + dmaengine_unmap_put(unmap); +err_get_unmap: + dmaengine_unmap_put(unmap); +err_wait: /* If the callbacks come out of order, the writing of the index to the * last completed will be out of order. This may result in the * receive stalling forever. @@ -1175,11 +1200,14 @@ err: goto out; } -static void ntb_transport_rx(unsigned long data) +static int ntb_transport_rxc_db(void *data, int db_num) { - struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data; + struct ntb_transport_qp *qp = data; int rc, i; + dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n", + __func__, db_num); + /* Limit the number of packets processed in a single interrupt to * provide fairness to others */ @@ -1191,16 +1219,8 @@ static void ntb_transport_rx(unsigned long data) if (qp->dma_chan) dma_async_issue_pending(qp->dma_chan); -} - -static void ntb_transport_rxc_db(void *data, int db_num) -{ - struct ntb_transport_qp *qp = data; - - dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n", - __func__, db_num); - tasklet_schedule(&qp->rx_work); + return i; } static void ntb_tx_copy_callback(void *data) @@ -1245,12 +1265,12 @@ static void ntb_async_tx(struct ntb_transport_qp *qp, struct dma_chan *chan = qp->dma_chan; struct dma_device *device; size_t dest_off, buff_off; - dma_addr_t src, dest; + struct dmaengine_unmap_data *unmap; + dma_addr_t dest; dma_cookie_t cookie; void __iomem *offset; size_t len = entry->len; void *buf = entry->buf; - unsigned long flags; offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index; hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header); @@ -1273,28 +1293,41 @@ static void ntb_async_tx(struct ntb_transport_qp *qp, if (!is_dma_copy_aligned(device, buff_off, dest_off, len)) goto err; - src = dma_map_single(device->dev, buf, len, DMA_TO_DEVICE); - if (dma_mapping_error(device->dev, src)) + unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT); + if (!unmap) goto err; - flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_PREP_INTERRUPT; - txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags); + unmap->len = len; + unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf), + buff_off, len, DMA_TO_DEVICE); + if (dma_mapping_error(device->dev, unmap->addr[0])) + goto err_get_unmap; + + unmap->to_cnt = 1; + + txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len, + DMA_PREP_INTERRUPT); if (!txd) - goto err1; + goto err_get_unmap; txd->callback = ntb_tx_copy_callback; txd->callback_param = entry; + dma_set_unmap(txd, unmap); cookie = dmaengine_submit(txd); if (dma_submit_error(cookie)) - goto err1; + goto err_set_unmap; + + dmaengine_unmap_put(unmap); dma_async_issue_pending(chan); qp->tx_async++; return; -err1: - dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE); +err_set_unmap: + dmaengine_unmap_put(unmap); +err_get_unmap: + dmaengine_unmap_put(unmap); err: ntb_memcpy_tx(entry, offset); qp->tx_memcpy++; @@ -1406,11 +1439,12 @@ ntb_transport_create_queue(void *data, struct pci_dev *pdev, qp->tx_handler = handlers->tx_handler; qp->event_handler = handlers->event_handler; + dmaengine_get(); qp->dma_chan = dma_find_channel(DMA_MEMCPY); - if (!qp->dma_chan) + if (!qp->dma_chan) { + dmaengine_put(); dev_info(&pdev->dev, "Unable to allocate DMA channel, using CPU instead\n"); - else - dmaengine_get(); + } for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC); @@ -1432,25 +1466,23 @@ ntb_transport_create_queue(void *data, struct pci_dev *pdev, &qp->tx_free_q); } - tasklet_init(&qp->rx_work, ntb_transport_rx, (unsigned long) qp); - rc = ntb_register_db_callback(qp->ndev, free_queue, qp, ntb_transport_rxc_db); if (rc) - goto err3; + goto err2; dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num); return qp; -err3: - tasklet_disable(&qp->rx_work); err2: while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) kfree(entry); err1: while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) kfree(entry); + if (qp->dma_chan) + dmaengine_put(); set_bit(free_queue, &nt->qp_bitmap); err: return NULL; @@ -1489,7 +1521,6 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp) } ntb_unregister_db_callback(qp->ndev, qp->qp_num); - tasklet_disable(&qp->rx_work); cancel_delayed_work_sync(&qp->link_work); diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c index 95655d7c0d0b..e52d7ffa38b9 100644 --- a/drivers/pci/ats.c +++ b/drivers/pci/ats.c @@ -410,7 +410,7 @@ EXPORT_SYMBOL_GPL(pci_disable_pasid); * Otherwise is returns a bitmask with supported features. Current * features reported are: * PCI_PASID_CAP_EXEC - Execute permission supported - * PCI_PASID_CAP_PRIV - Priviledged mode supported + * PCI_PASID_CAP_PRIV - Privileged mode supported */ int pci_pasid_features(struct pci_dev *pdev) { diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c index c269e430c760..2aa7b77c7c88 100644 --- a/drivers/pci/host/pci-mvebu.c +++ b/drivers/pci/host/pci-mvebu.c @@ -447,6 +447,11 @@ static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port, *value = 0; break; + case PCI_INTERRUPT_LINE: + /* LINE PIN MIN_GNT MAX_LAT */ + *value = 0; + break; + default: *value = 0xffffffff; return PCIBIOS_BAD_REGISTER_NUMBER; diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c index 7c4f38dd42ba..0afbbbc55c81 100644 --- a/drivers/pci/host/pci-tegra.c +++ b/drivers/pci/host/pci-tegra.c @@ -249,7 +249,7 @@ struct tegra_pcie { void __iomem *afi; int irq; - struct list_head busses; + struct list_head buses; struct resource *cs; struct resource io; @@ -399,14 +399,14 @@ free: /* * Look up a virtual address mapping for the specified bus number. If no such - * mapping existis, try to create one. + * mapping exists, try to create one. */ static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie, unsigned int busnr) { struct tegra_pcie_bus *bus; - list_for_each_entry(bus, &pcie->busses, list) + list_for_each_entry(bus, &pcie->buses, list) if (bus->nr == busnr) return (void __iomem *)bus->area->addr; @@ -414,7 +414,7 @@ static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie, if (IS_ERR(bus)) return NULL; - list_add_tail(&bus->list, &pcie->busses); + list_add_tail(&bus->list, &pcie->buses); return (void __iomem *)bus->area->addr; } @@ -808,7 +808,7 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie) value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS; afi_writel(pcie, value, AFI_FUSE); - /* initialze internal PHY, enable up to 16 PCIE lanes */ + /* initialize internal PHY, enable up to 16 PCIE lanes */ pads_writel(pcie, 0x0, PADS_CTL_SEL); /* override IDDQ to 1 on all 4 lanes */ @@ -1624,7 +1624,7 @@ static int tegra_pcie_probe(struct platform_device *pdev) if (!pcie) return -ENOMEM; - INIT_LIST_HEAD(&pcie->busses); + INIT_LIST_HEAD(&pcie->buses); INIT_LIST_HEAD(&pcie->ports); pcie->soc_data = match->data; pcie->dev = &pdev->dev; diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c index 1e1fea4d959b..e33b68be0391 100644 --- a/drivers/pci/host/pcie-designware.c +++ b/drivers/pci/host/pcie-designware.c @@ -197,7 +197,7 @@ static int find_valid_pos0(struct pcie_port *pp, int msgvec, int pos, int *pos0) return -ENOSPC; /* * Check if this position is at correct offset.nvec is always a - * power of two. pos0 must be nvec bit alligned. + * power of two. pos0 must be nvec bit aligned. */ if (pos % msgvec) pos += msgvec - (pos % msgvec); diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig index 0a648af89531..df8caec59789 100644 --- a/drivers/pci/hotplug/Kconfig +++ b/drivers/pci/hotplug/Kconfig @@ -133,8 +133,8 @@ config HOTPLUG_PCI_RPA_DLPAR To compile this driver as a module, choose M here: the module will be called rpadlpar_io. - - When in doubt, say N. + + When in doubt, say N. config HOTPLUG_PCI_SGI tristate "SGI PCI Hotplug Support" diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile index 47ec8c80e16d..3e6532b945c1 100644 --- a/drivers/pci/hotplug/Makefile +++ b/drivers/pci/hotplug/Makefile @@ -31,7 +31,7 @@ pci_hotplug-objs += cpci_hotplug_core.o \ cpci_hotplug_pci.o endif ifdef CONFIG_ACPI -pci_hotplug-objs += acpi_pcihp.o +pci_hotplug-objs += acpi_pcihp.o endif cpqphp-objs := cpqphp_core.o \ diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c index 1ce8ee054f1a..a94d850ae228 100644 --- a/drivers/pci/hotplug/acpi_pcihp.c +++ b/drivers/pci/hotplug/acpi_pcihp.c @@ -367,7 +367,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags) string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL }; } - handle = DEVICE_ACPI_HANDLE(&pdev->dev); + handle = ACPI_HANDLE(&pdev->dev); if (!handle) { /* * This hotplug controller was not listed in the ACPI name diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h index 26100f510b10..1592dbe4f904 100644 --- a/drivers/pci/hotplug/acpiphp.h +++ b/drivers/pci/hotplug/acpiphp.h @@ -176,7 +176,6 @@ u8 acpiphp_get_latch_status(struct acpiphp_slot *slot); u8 acpiphp_get_adapter_status(struct acpiphp_slot *slot); /* variables */ -extern bool acpiphp_debug; extern bool acpiphp_disabled; #endif /* _ACPIPHP_H */ diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c index 8650d39db392..dca66bc44578 100644 --- a/drivers/pci/hotplug/acpiphp_core.c +++ b/drivers/pci/hotplug/acpiphp_core.c @@ -111,7 +111,7 @@ int acpiphp_register_attention(struct acpiphp_attention_info *info) * @info: must match the pointer used to register * * Description: This is used to un-register a hardware specific acpi - * driver that manipulates the attention LED. The pointer to the + * driver that manipulates the attention LED. The pointer to the * info struct must be the same as the one used to set it. */ int acpiphp_unregister_attention(struct acpiphp_attention_info *info) @@ -169,8 +169,8 @@ static int disable_slot(struct hotplug_slot *hotplug_slot) * was registered with us. This allows hardware specific * ACPI implementations to blink the light for us. */ - static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status) - { +static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status) +{ int retval = -ENODEV; pr_debug("%s - physical_slot = %s\n", __func__, @@ -182,8 +182,8 @@ static int disable_slot(struct hotplug_slot *hotplug_slot) } else attention_info = NULL; return retval; - } - +} + /** * get_power_status - get power status of a slot @@ -323,7 +323,7 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot, if (retval) { pr_err("pci_hp_register failed with error %d\n", retval); goto error_hpslot; - } + } pr_info("Slot [%s] registered\n", slot_name(slot)); diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 5b4e9eb0e8ff..1cf605f67673 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -325,7 +325,7 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data, list_add_tail(&slot->node, &bridge->slots); - /* Register slots for ejectable funtions only. */ + /* Register slots for ejectable functions only. */ if (acpi_pci_check_ejectable(pbus, handle) || is_dock_device(handle)) { unsigned long long sun; int retval; diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c index 0d64c414bf78..ecfac7e72d91 100644 --- a/drivers/pci/hotplug/acpiphp_ibm.c +++ b/drivers/pci/hotplug/acpiphp_ibm.c @@ -116,7 +116,7 @@ static struct bin_attribute ibm_apci_table_attr = { .read = ibm_read_apci_table, .write = NULL, }; -static struct acpiphp_attention_info ibm_attention_info = +static struct acpiphp_attention_info ibm_attention_info = { .set_attn = ibm_set_attention_status, .get_attn = ibm_get_attention_status, @@ -171,9 +171,9 @@ ibm_slot_done: */ static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status) { - union acpi_object args[2]; + union acpi_object args[2]; struct acpi_object_list params = { .pointer = args, .count = 2 }; - acpi_status stat; + acpi_status stat; unsigned long long rc; union apci_descriptor *ibm_slot; @@ -208,7 +208,7 @@ static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status) * * Description: This method is registered with the acpiphp module as a * callback to do the device specific task of getting the LED status. - * + * * Because there is no direct method of getting the LED status directly * from an ACPI call, we read the aPCI table and parse out our * slot descriptor to read the status from that. @@ -259,7 +259,7 @@ static void ibm_handle_events(acpi_handle handle, u32 event, void *context) pr_debug("%s: Received notification %02x\n", __func__, event); if (subevent == 0x80) { - pr_debug("%s: generationg bus event\n", __func__); + pr_debug("%s: generating bus event\n", __func__); acpi_bus_generate_netlink_event(note->device->pnp.device_class, dev_name(¬e->device->dev), note->event, detail); @@ -387,7 +387,7 @@ static acpi_status __init ibm_find_acpi_device(acpi_handle handle, u32 lvl, void *context, void **rv) { acpi_handle *phandle = (acpi_handle *)context; - acpi_status status; + acpi_status status; struct acpi_device_info *info; int retval = 0; @@ -405,7 +405,7 @@ static acpi_status __init ibm_find_acpi_device(acpi_handle handle, info->hardware_id.string, handle); *phandle = handle; /* returning non-zero causes the search to stop - * and returns this value to the caller of + * and returns this value to the caller of * acpi_walk_namespace, but it also causes some warnings * in the acpi debug code to print... */ diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c index 2b4c412f94c3..00c81a3cefc9 100644 --- a/drivers/pci/hotplug/cpci_hotplug_core.c +++ b/drivers/pci/hotplug/cpci_hotplug_core.c @@ -46,7 +46,7 @@ do { \ if (cpci_debug) \ printk (KERN_DEBUG "%s: " format "\n", \ - MY_NAME , ## arg); \ + MY_NAME , ## arg); \ } while (0) #define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg) #define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg) diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c index d8add34177f2..d3add9819f63 100644 --- a/drivers/pci/hotplug/cpci_hotplug_pci.c +++ b/drivers/pci/hotplug/cpci_hotplug_pci.c @@ -39,7 +39,7 @@ extern int cpci_debug; do { \ if (cpci_debug) \ printk (KERN_DEBUG "%s: " format "\n", \ - MY_NAME , ## arg); \ + MY_NAME , ## arg); \ } while (0) #define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg) #define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg) diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c index a6a71c41cdf8..7536eef620b0 100644 --- a/drivers/pci/hotplug/cpcihp_generic.c +++ b/drivers/pci/hotplug/cpcihp_generic.c @@ -13,14 +13,14 @@ * option) any later version. * * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY - * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL - * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along @@ -53,9 +53,9 @@ #define dbg(format, arg...) \ do { \ - if(debug) \ + if (debug) \ printk (KERN_DEBUG "%s: " format "\n", \ - MY_NAME , ## arg); \ + MY_NAME , ## arg); \ } while(0) #define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg) #define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg) diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c index 449b4bbc8301..e8c4a7ccf578 100644 --- a/drivers/pci/hotplug/cpcihp_zt5550.c +++ b/drivers/pci/hotplug/cpcihp_zt5550.c @@ -13,14 +13,14 @@ * option) any later version. * * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY - * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL - * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along @@ -48,9 +48,9 @@ #define dbg(format, arg...) \ do { \ - if(debug) \ + if (debug) \ printk (KERN_DEBUG "%s: " format "\n", \ - MY_NAME , ## arg); \ + MY_NAME , ## arg); \ } while(0) #define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg) #define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg) @@ -285,7 +285,7 @@ static struct pci_device_id zt5550_hc_pci_tbl[] = { { 0, } }; MODULE_DEVICE_TABLE(pci, zt5550_hc_pci_tbl); - + static struct pci_driver zt5550_hc_driver = { .name = "zt5550_hc", .id_table = zt5550_hc_pci_tbl, diff --git a/drivers/pci/hotplug/cpcihp_zt5550.h b/drivers/pci/hotplug/cpcihp_zt5550.h index bebc6060a558..9a57fda5348c 100644 --- a/drivers/pci/hotplug/cpcihp_zt5550.h +++ b/drivers/pci/hotplug/cpcihp_zt5550.h @@ -13,14 +13,14 @@ * option) any later version. * * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY - * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL - * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along @@ -55,7 +55,7 @@ #define HC_CMD_REG 0x0C #define ARB_CONFIG_GNT_REG 0x10 #define ARB_CONFIG_CFG_REG 0x12 -#define ARB_CONFIG_REG 0x10 +#define ARB_CONFIG_REG 0x10 #define ISOL_CONFIG_REG 0x18 #define FAULT_STATUS_REG 0x20 #define FAULT_CONFIG_REG 0x24 diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c index c8eaeb43fa5d..31273e155e6c 100644 --- a/drivers/pci/hotplug/cpqphp_core.c +++ b/drivers/pci/hotplug/cpqphp_core.c @@ -862,10 +862,10 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_disable_device; } - /* Check for the proper subsystem ID's + /* Check for the proper subsystem IDs * Intel uses a different SSID programming model than Compaq. * For Intel, each SSID bit identifies a PHP capability. - * Also Intel HPC's may have RID=0. + * Also Intel HPCs may have RID=0. */ if ((pdev->revision <= 2) && (vendor_id != PCI_VENDOR_ID_INTEL)) { err(msg_HPC_not_supported); diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c index d282019cda5f..11845b796799 100644 --- a/drivers/pci/hotplug/cpqphp_ctrl.c +++ b/drivers/pci/hotplug/cpqphp_ctrl.c @@ -1231,7 +1231,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_ /* Only if mode change...*/ if (((bus->cur_bus_speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) || - ((bus->cur_bus_speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz))) + ((bus->cur_bus_speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz))) set_SOGO(ctrl); wait_for_ctrl_irq(ctrl); @@ -1828,7 +1828,7 @@ static void interrupt_event_handler(struct controller *ctrl) if (ctrl->event_queue[loop].event_type == INT_BUTTON_PRESS) { dbg("button pressed\n"); - } else if (ctrl->event_queue[loop].event_type == + } else if (ctrl->event_queue[loop].event_type == INT_BUTTON_CANCEL) { dbg("button cancel\n"); del_timer(&p_slot->task_event); @@ -2411,11 +2411,11 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func if (rc) return rc; - /* find range of busses to use */ + /* find range of buses to use */ dbg("find ranges of buses to use\n"); bus_node = get_max_resource(&(resources->bus_head), 1); - /* If we don't have any busses to allocate, we can't continue */ + /* If we don't have any buses to allocate, we can't continue */ if (!bus_node) return -ENOMEM; @@ -2900,7 +2900,7 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func /* If this function needs an interrupt and we are behind * a bridge and the pin is tied to something that's - * alread mapped, set this one the same */ + * already mapped, set this one the same */ if (temp_byte && resources->irqs && (resources->irqs->valid_INT & (0x01 << ((temp_byte + resources->irqs->barber_pole - 1) & 0x03)))) { diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c index 09801c6945ce..6e4a12c91adb 100644 --- a/drivers/pci/hotplug/cpqphp_pci.c +++ b/drivers/pci/hotplug/cpqphp_pci.c @@ -291,7 +291,7 @@ int cpqhp_get_bus_dev (struct controller *ctrl, u8 * bus_num, u8 * dev_num, u8 s * * Reads configuration for all slots in a PCI bus and saves info. * - * Note: For non-hot plug busses, the slot # saved is the device # + * Note: For non-hot plug buses, the slot # saved is the device # * * returns 0 if success */ @@ -455,7 +455,7 @@ int cpqhp_save_config(struct controller *ctrl, int busnumber, int is_hot_plug) * cpqhp_save_slot_config * * Saves configuration info for all PCI devices in a given slot - * including subordinate busses. + * including subordinate buses. * * returns 0 if success */ @@ -1556,4 +1556,3 @@ void cpqhp_destroy_board_resources (struct pci_func * func) kfree(tres); } } - diff --git a/drivers/pci/hotplug/ibmphp.h b/drivers/pci/hotplug/ibmphp.h index 8c5b25871d02..e3e46a7b3ee7 100644 --- a/drivers/pci/hotplug/ibmphp.h +++ b/drivers/pci/hotplug/ibmphp.h @@ -59,7 +59,7 @@ extern int ibmphp_debug; /************************************************************ -* RESOURE TYPE * +* RESOURCE TYPE * ************************************************************/ #define EBDA_RSRC_TYPE_MASK 0x03 @@ -103,7 +103,7 @@ extern int ibmphp_debug; //-------------------------------------------------------------- struct rio_table_hdr { - u8 ver_num; + u8 ver_num; u8 scal_count; u8 riodev_count; u16 offset; @@ -127,7 +127,7 @@ struct scal_detail { }; //-------------------------------------------------------------- -// RIO DETAIL +// RIO DETAIL //-------------------------------------------------------------- struct rio_detail { @@ -152,7 +152,7 @@ struct opt_rio { u8 first_slot_num; u8 middle_num; struct list_head opt_rio_list; -}; +}; struct opt_rio_lo { u8 rio_type; @@ -161,7 +161,7 @@ struct opt_rio_lo { u8 middle_num; u8 pack_count; struct list_head opt_rio_lo_list; -}; +}; /**************************************************************** * HPC DESCRIPTOR NODE * @@ -574,7 +574,7 @@ void ibmphp_hpc_stop_poll_thread(void); #define HPC_CTLR_IRQ_PENDG 0x80 //---------------------------------------------------------------------------- -// HPC_CTLR_WROKING status return codes +// HPC_CTLR_WORKING status return codes //---------------------------------------------------------------------------- #define HPC_CTLR_WORKING_NO 0x00 #define HPC_CTLR_WORKING_YES 0x01 diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c index cbd72d81d253..efdc13adbe41 100644 --- a/drivers/pci/hotplug/ibmphp_core.c +++ b/drivers/pci/hotplug/ibmphp_core.c @@ -58,7 +58,7 @@ MODULE_DESCRIPTION (DRIVER_DESC); struct pci_bus *ibmphp_pci_bus; static int max_slots; -static int irqs[16]; /* PIC mode IRQ's we're using so far (in case MPS +static int irqs[16]; /* PIC mode IRQs we're using so far (in case MPS * tables don't provide default info for empty slots */ static int init_flag; @@ -71,20 +71,20 @@ static inline int get_max_adapter_speed (struct hotplug_slot *hs, u8 *value) return get_max_adapter_speed_1 (hs, value, 1); } */ -static inline int get_cur_bus_info(struct slot **sl) +static inline int get_cur_bus_info(struct slot **sl) { int rc = 1; struct slot * slot_cur = *sl; debug("options = %x\n", slot_cur->ctrl->options); - debug("revision = %x\n", slot_cur->ctrl->revision); + debug("revision = %x\n", slot_cur->ctrl->revision); - if (READ_BUS_STATUS(slot_cur->ctrl)) + if (READ_BUS_STATUS(slot_cur->ctrl)) rc = ibmphp_hpc_readslot(slot_cur, READ_BUSSTATUS, NULL); - - if (rc) + + if (rc) return rc; - + slot_cur->bus_on->current_speed = CURRENT_BUS_SPEED(slot_cur->busstatus); if (READ_BUS_MODE(slot_cur->ctrl)) slot_cur->bus_on->current_bus_mode = @@ -96,7 +96,7 @@ static inline int get_cur_bus_info(struct slot **sl) slot_cur->busstatus, slot_cur->bus_on->current_speed, slot_cur->bus_on->current_bus_mode); - + *sl = slot_cur; return 0; } @@ -104,8 +104,8 @@ static inline int get_cur_bus_info(struct slot **sl) static inline int slot_update(struct slot **sl) { int rc; - rc = ibmphp_hpc_readslot(*sl, READ_ALLSTAT, NULL); - if (rc) + rc = ibmphp_hpc_readslot(*sl, READ_ALLSTAT, NULL); + if (rc) return rc; if (!init_flag) rc = get_cur_bus_info(sl); @@ -172,7 +172,7 @@ int ibmphp_init_devno(struct slot **cur_slot) debug("(*cur_slot)->irq[3] = %x\n", (*cur_slot)->irq[3]); - debug("rtable->exlusive_irqs = %x\n", + debug("rtable->exclusive_irqs = %x\n", rtable->exclusive_irqs); debug("rtable->slots[loop].irq[0].bitmap = %x\n", rtable->slots[loop].irq[0].bitmap); @@ -271,7 +271,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 value) else rc = -ENODEV; } - } else + } else rc = -ENODEV; ibmphp_unlock_operations(); @@ -288,7 +288,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 * value) debug("get_attention_status - Entry hotplug_slot[%lx] pvalue[%lx]\n", (ulong) hotplug_slot, (ulong) value); - + ibmphp_lock_operations(); if (hotplug_slot) { pslot = hotplug_slot->private; @@ -406,14 +406,14 @@ static int get_max_bus_speed(struct slot *slot) ibmphp_lock_operations(); mode = slot->supported_bus_mode; - speed = slot->supported_speed; + speed = slot->supported_speed; ibmphp_unlock_operations(); switch (speed) { case BUS_SPEED_33: break; case BUS_SPEED_66: - if (mode == BUS_MODE_PCIX) + if (mode == BUS_MODE_PCIX) speed += 0x01; break; case BUS_SPEED_100: @@ -515,13 +515,13 @@ static int __init init_ops(void) debug("BEFORE GETTING SLOT STATUS, slot # %x\n", slot_cur->number); - if (slot_cur->ctrl->revision == 0xFF) + if (slot_cur->ctrl->revision == 0xFF) if (get_ctrl_revision(slot_cur, &slot_cur->ctrl->revision)) return -1; - if (slot_cur->bus_on->current_speed == 0xFF) - if (get_cur_bus_info(&slot_cur)) + if (slot_cur->bus_on->current_speed == 0xFF) + if (get_cur_bus_info(&slot_cur)) return -1; get_max_bus_speed(slot_cur); @@ -539,8 +539,8 @@ static int __init init_ops(void) debug("SLOT_PRESENT = %x\n", SLOT_PRESENT(slot_cur->status)); debug("SLOT_LATCH = %x\n", SLOT_LATCH(slot_cur->status)); - if ((SLOT_PWRGD(slot_cur->status)) && - !(SLOT_PRESENT(slot_cur->status)) && + if ((SLOT_PWRGD(slot_cur->status)) && + !(SLOT_PRESENT(slot_cur->status)) && !(SLOT_LATCH(slot_cur->status))) { debug("BEFORE POWER OFF COMMAND\n"); rc = power_off(slot_cur); @@ -581,13 +581,13 @@ static int validate(struct slot *slot_cur, int opn) switch (opn) { case ENABLE: - if (!(SLOT_PWRGD(slot_cur->status)) && - (SLOT_PRESENT(slot_cur->status)) && + if (!(SLOT_PWRGD(slot_cur->status)) && + (SLOT_PRESENT(slot_cur->status)) && !(SLOT_LATCH(slot_cur->status))) return 0; break; case DISABLE: - if ((SLOT_PWRGD(slot_cur->status)) && + if ((SLOT_PWRGD(slot_cur->status)) && (SLOT_PRESENT(slot_cur->status)) && !(SLOT_LATCH(slot_cur->status))) return 0; @@ -617,7 +617,7 @@ int ibmphp_update_slot_info(struct slot *slot_cur) err("out of system memory\n"); return -ENOMEM; } - + info->power_status = SLOT_PWRGD(slot_cur->status); info->attention_status = SLOT_ATTN(slot_cur->status, slot_cur->ext_status); @@ -638,7 +638,7 @@ int ibmphp_update_slot_info(struct slot *slot_cur) case BUS_SPEED_33: break; case BUS_SPEED_66: - if (mode == BUS_MODE_PCIX) + if (mode == BUS_MODE_PCIX) bus_speed += 0x01; else if (mode == BUS_MODE_PCI) ; @@ -654,8 +654,8 @@ int ibmphp_update_slot_info(struct slot *slot_cur) } bus->cur_bus_speed = bus_speed; - // To do: bus_names - + // To do: bus_names + rc = pci_hp_change_slot_info(slot_cur->hotplug_slot, info); kfree(info); return rc; @@ -729,8 +729,8 @@ static void ibm_unconfigure_device(struct pci_func *func) } /* - * The following function is to fix kernel bug regarding - * getting bus entries, here we manually add those primary + * The following function is to fix kernel bug regarding + * getting bus entries, here we manually add those primary * bus entries to kernel bus structure whenever apply */ static u8 bus_structure_fixup(u8 busno) @@ -814,7 +814,7 @@ static int ibm_configure_device(struct pci_func *func) } /******************************************************* - * Returns whether the bus is empty or not + * Returns whether the bus is empty or not *******************************************************/ static int is_bus_empty(struct slot * slot_cur) { @@ -842,7 +842,7 @@ static int is_bus_empty(struct slot * slot_cur) } /*********************************************************** - * If the HPC permits and the bus currently empty, tries to set the + * If the HPC permits and the bus currently empty, tries to set the * bus speed and mode at the maximum card and bus capability * Parameters: slot * Returns: bus is set (0) or error code @@ -856,7 +856,7 @@ static int set_bus(struct slot * slot_cur) static struct pci_device_id ciobx[] = { { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, 0x0101) }, { }, - }; + }; debug("%s - entry slot # %d\n", __func__, slot_cur->number); if (SET_BUS_STATUS(slot_cur->ctrl) && is_bus_empty(slot_cur)) { @@ -877,7 +877,7 @@ static int set_bus(struct slot * slot_cur) else if (!SLOT_BUS_MODE(slot_cur->ext_status)) /* if max slot/bus capability is 66 pci and there's no bus mode mismatch, then - the adapter supports 66 pci */ + the adapter supports 66 pci */ cmd = HPC_BUS_66CONVMODE; else cmd = HPC_BUS_33CONVMODE; @@ -930,7 +930,7 @@ static int set_bus(struct slot * slot_cur) return -EIO; } } - /* This is for x440, once Brandon fixes the firmware, + /* This is for x440, once Brandon fixes the firmware, will not need this delay */ msleep(1000); debug("%s -Exit\n", __func__); @@ -938,9 +938,9 @@ static int set_bus(struct slot * slot_cur) } /* This routine checks the bus limitations that the slot is on from the BIOS. - * This is used in deciding whether or not to power up the slot. + * This is used in deciding whether or not to power up the slot. * (electrical/spec limitations. For example, >1 133 MHz or >2 66 PCI cards on - * same bus) + * same bus) * Parameters: slot * Returns: 0 = no limitations, -EINVAL = exceeded limitations on the bus */ @@ -986,7 +986,7 @@ static int check_limitations(struct slot *slot_cur) static inline void print_card_capability(struct slot *slot_cur) { info("capability of the card is "); - if ((slot_cur->ext_status & CARD_INFO) == PCIX133) + if ((slot_cur->ext_status & CARD_INFO) == PCIX133) info(" 133 MHz PCI-X\n"); else if ((slot_cur->ext_status & CARD_INFO) == PCIX66) info(" 66 MHz PCI-X\n"); @@ -1020,7 +1020,7 @@ static int enable_slot(struct hotplug_slot *hs) } attn_LED_blink(slot_cur); - + rc = set_bus(slot_cur); if (rc) { err("was not able to set the bus\n"); @@ -1082,7 +1082,7 @@ static int enable_slot(struct hotplug_slot *hs) rc = slot_update(&slot_cur); if (rc) goto error_power; - + rc = -EINVAL; if (SLOT_POWER(slot_cur->status) && !(SLOT_PWRGD(slot_cur->status))) { err("power fault occurred trying to power up...\n"); @@ -1093,7 +1093,7 @@ static int enable_slot(struct hotplug_slot *hs) "speed and card capability\n"); print_card_capability(slot_cur); goto error_power; - } + } /* Don't think this case will happen after above checks... * but just in case, for paranoia sake */ if (!(SLOT_POWER(slot_cur->status))) { @@ -1144,7 +1144,7 @@ static int enable_slot(struct hotplug_slot *hs) ibmphp_print_test(); rc = ibmphp_update_slot_info(slot_cur); exit: - ibmphp_unlock_operations(); + ibmphp_unlock_operations(); return rc; error_nopower: @@ -1180,7 +1180,7 @@ static int ibmphp_disable_slot(struct hotplug_slot *hotplug_slot) { struct slot *slot = hotplug_slot->private; int rc; - + ibmphp_lock_operations(); rc = ibmphp_do_disable_slot(slot); ibmphp_unlock_operations(); @@ -1192,12 +1192,12 @@ int ibmphp_do_disable_slot(struct slot *slot_cur) int rc; u8 flag; - debug("DISABLING SLOT...\n"); - + debug("DISABLING SLOT...\n"); + if ((slot_cur == NULL) || (slot_cur->ctrl == NULL)) { return -ENODEV; } - + flag = slot_cur->flag; slot_cur->flag = 1; @@ -1210,7 +1210,7 @@ int ibmphp_do_disable_slot(struct slot *slot_cur) attn_LED_blink(slot_cur); if (slot_cur->func == NULL) { - /* We need this for fncs's that were there on bootup */ + /* We need this for functions that were there on bootup */ slot_cur->func = kzalloc(sizeof(struct pci_func), GFP_KERNEL); if (!slot_cur->func) { err("out of system memory\n"); @@ -1222,12 +1222,13 @@ int ibmphp_do_disable_slot(struct slot *slot_cur) } ibm_unconfigure_device(slot_cur->func); - - /* If we got here from latch suddenly opening on operating card or - a power fault, there's no power to the card, so cannot - read from it to determine what resources it occupied. This operation - is forbidden anyhow. The best we can do is remove it from kernel - lists at least */ + + /* + * If we got here from latch suddenly opening on operating card or + * a power fault, there's no power to the card, so cannot + * read from it to determine what resources it occupied. This operation + * is forbidden anyhow. The best we can do is remove it from kernel + * lists at least */ if (!flag) { attn_off(slot_cur); @@ -1264,7 +1265,7 @@ error: rc = -EFAULT; goto exit; } - if (flag) + if (flag) ibmphp_update_slot_info(slot_cur); goto exit; } @@ -1339,7 +1340,7 @@ static int __init ibmphp_init(void) debug("AFTER Resource & EBDA INITIALIZATIONS\n"); max_slots = get_max_slots(); - + if ((rc = ibmphp_register_pci())) goto error; diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c index 9df78bc14541..bd044158b36c 100644 --- a/drivers/pci/hotplug/ibmphp_ebda.c +++ b/drivers/pci/hotplug/ibmphp_ebda.c @@ -123,7 +123,7 @@ static struct ebda_pci_rsrc *alloc_ebda_pci_rsrc (void) static void __init print_bus_info (void) { struct bus_info *ptr; - + list_for_each_entry(ptr, &bus_info_head, bus_info_list) { debug ("%s - slot_min = %x\n", __func__, ptr->slot_min); debug ("%s - slot_max = %x\n", __func__, ptr->slot_max); @@ -131,7 +131,7 @@ static void __init print_bus_info (void) debug ("%s - bus# = %x\n", __func__, ptr->busno); debug ("%s - current_speed = %x\n", __func__, ptr->current_speed); debug ("%s - controller_id = %x\n", __func__, ptr->controller_id); - + debug ("%s - slots_at_33_conv = %x\n", __func__, ptr->slots_at_33_conv); debug ("%s - slots_at_66_conv = %x\n", __func__, ptr->slots_at_66_conv); debug ("%s - slots_at_66_pcix = %x\n", __func__, ptr->slots_at_66_pcix); @@ -144,7 +144,7 @@ static void __init print_bus_info (void) static void print_lo_info (void) { struct rio_detail *ptr; - debug ("print_lo_info ----\n"); + debug ("print_lo_info ----\n"); list_for_each_entry(ptr, &rio_lo_head, rio_detail_list) { debug ("%s - rio_node_id = %x\n", __func__, ptr->rio_node_id); debug ("%s - rio_type = %x\n", __func__, ptr->rio_type); @@ -176,7 +176,7 @@ static void __init print_ebda_pci_rsrc (void) struct ebda_pci_rsrc *ptr; list_for_each_entry(ptr, &ibmphp_ebda_pci_rsrc_head, ebda_pci_rsrc_list) { - debug ("%s - rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n", + debug ("%s - rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n", __func__, ptr->rsrc_type ,ptr->bus_num, ptr->dev_fun,ptr->start_addr, ptr->end_addr); } } @@ -259,7 +259,7 @@ int __init ibmphp_access_ebda (void) ebda_seg = readw (io_mem); iounmap (io_mem); debug ("returned ebda segment: %x\n", ebda_seg); - + io_mem = ioremap(ebda_seg<<4, 1); if (!io_mem) return -ENOMEM; @@ -310,7 +310,7 @@ int __init ibmphp_access_ebda (void) re = readw (io_mem + sub_addr); /* next sub blk */ sub_addr += 2; - rc_id = readw (io_mem + sub_addr); /* sub blk id */ + rc_id = readw (io_mem + sub_addr); /* sub blk id */ sub_addr += 2; if (rc_id != 0x5243) @@ -330,7 +330,7 @@ int __init ibmphp_access_ebda (void) debug ("info about hpc descriptor---\n"); debug ("hot blk format: %x\n", format); debug ("num of controller: %x\n", num_ctlrs); - debug ("offset of hpc data structure enteries: %x\n ", sub_addr); + debug ("offset of hpc data structure entries: %x\n ", sub_addr); sub_addr = base + re; /* re sub blk */ /* FIXME: rc is never used/checked */ @@ -359,7 +359,7 @@ int __init ibmphp_access_ebda (void) debug ("info about rsrc descriptor---\n"); debug ("format: %x\n", format); debug ("num of rsrc: %x\n", num_entries); - debug ("offset of rsrc data structure enteries: %x\n ", sub_addr); + debug ("offset of rsrc data structure entries: %x\n ", sub_addr); hs_complete = 1; } else { @@ -376,7 +376,7 @@ int __init ibmphp_access_ebda (void) rio_table_ptr->scal_count = readb (io_mem + offset + 1); rio_table_ptr->riodev_count = readb (io_mem + offset + 2); rio_table_ptr->offset = offset +3 ; - + debug("info about rio table hdr ---\n"); debug("ver_num: %x\nscal_count: %x\nriodev_count: %x\noffset of rio table: %x\n ", rio_table_ptr->ver_num, rio_table_ptr->scal_count, @@ -440,12 +440,12 @@ static int __init ebda_rio_table (void) rio_detail_ptr->chassis_num = readb (io_mem + offset + 14); // debug ("rio_node_id: %x\nbbar: %x\nrio_type: %x\nowner_id: %x\nport0_node: %x\nport0_port: %x\nport1_node: %x\nport1_port: %x\nfirst_slot_num: %x\nstatus: %x\n", rio_detail_ptr->rio_node_id, rio_detail_ptr->bbar, rio_detail_ptr->rio_type, rio_detail_ptr->owner_id, rio_detail_ptr->port0_node_connect, rio_detail_ptr->port0_port_connect, rio_detail_ptr->port1_node_connect, rio_detail_ptr->port1_port_connect, rio_detail_ptr->first_slot_num, rio_detail_ptr->status); //create linked list of chassis - if (rio_detail_ptr->rio_type == 4 || rio_detail_ptr->rio_type == 5) + if (rio_detail_ptr->rio_type == 4 || rio_detail_ptr->rio_type == 5) list_add (&rio_detail_ptr->rio_detail_list, &rio_vg_head); - //create linked list of expansion box - else if (rio_detail_ptr->rio_type == 6 || rio_detail_ptr->rio_type == 7) + //create linked list of expansion box + else if (rio_detail_ptr->rio_type == 6 || rio_detail_ptr->rio_type == 7) list_add (&rio_detail_ptr->rio_detail_list, &rio_lo_head); - else + else // not in my concern kfree (rio_detail_ptr); offset += 15; @@ -456,7 +456,7 @@ static int __init ebda_rio_table (void) } /* - * reorganizing linked list of chassis + * reorganizing linked list of chassis */ static struct opt_rio *search_opt_vg (u8 chassis_num) { @@ -464,7 +464,7 @@ static struct opt_rio *search_opt_vg (u8 chassis_num) list_for_each_entry(ptr, &opt_vg_head, opt_rio_list) { if (ptr->chassis_num == chassis_num) return ptr; - } + } return NULL; } @@ -472,7 +472,7 @@ static int __init combine_wpg_for_chassis (void) { struct opt_rio *opt_rio_ptr = NULL; struct rio_detail *rio_detail_ptr = NULL; - + list_for_each_entry(rio_detail_ptr, &rio_vg_head, rio_detail_list) { opt_rio_ptr = search_opt_vg (rio_detail_ptr->chassis_num); if (!opt_rio_ptr) { @@ -484,14 +484,14 @@ static int __init combine_wpg_for_chassis (void) opt_rio_ptr->first_slot_num = rio_detail_ptr->first_slot_num; opt_rio_ptr->middle_num = rio_detail_ptr->first_slot_num; list_add (&opt_rio_ptr->opt_rio_list, &opt_vg_head); - } else { + } else { opt_rio_ptr->first_slot_num = min (opt_rio_ptr->first_slot_num, rio_detail_ptr->first_slot_num); opt_rio_ptr->middle_num = max (opt_rio_ptr->middle_num, rio_detail_ptr->first_slot_num); - } + } } print_opt_vg (); - return 0; -} + return 0; +} /* * reorganizing linked list of expansion box @@ -502,7 +502,7 @@ static struct opt_rio_lo *search_opt_lo (u8 chassis_num) list_for_each_entry(ptr, &opt_lo_head, opt_rio_lo_list) { if (ptr->chassis_num == chassis_num) return ptr; - } + } return NULL; } @@ -510,7 +510,7 @@ static int combine_wpg_for_expansion (void) { struct opt_rio_lo *opt_rio_lo_ptr = NULL; struct rio_detail *rio_detail_ptr = NULL; - + list_for_each_entry(rio_detail_ptr, &rio_lo_head, rio_detail_list) { opt_rio_lo_ptr = search_opt_lo (rio_detail_ptr->chassis_num); if (!opt_rio_lo_ptr) { @@ -522,22 +522,22 @@ static int combine_wpg_for_expansion (void) opt_rio_lo_ptr->first_slot_num = rio_detail_ptr->first_slot_num; opt_rio_lo_ptr->middle_num = rio_detail_ptr->first_slot_num; opt_rio_lo_ptr->pack_count = 1; - + list_add (&opt_rio_lo_ptr->opt_rio_lo_list, &opt_lo_head); - } else { + } else { opt_rio_lo_ptr->first_slot_num = min (opt_rio_lo_ptr->first_slot_num, rio_detail_ptr->first_slot_num); opt_rio_lo_ptr->middle_num = max (opt_rio_lo_ptr->middle_num, rio_detail_ptr->first_slot_num); opt_rio_lo_ptr->pack_count = 2; - } + } } - return 0; + return 0; } - + /* Since we don't know the max slot number per each chassis, hence go * through the list of all chassis to find out the range - * Arguments: slot_num, 1st slot number of the chassis we think we are on, - * var (0 = chassis, 1 = expansion box) + * Arguments: slot_num, 1st slot number of the chassis we think we are on, + * var (0 = chassis, 1 = expansion box) */ static int first_slot_num (u8 slot_num, u8 first_slot, u8 var) { @@ -547,7 +547,7 @@ static int first_slot_num (u8 slot_num, u8 first_slot, u8 var) if (!var) { list_for_each_entry(opt_vg_ptr, &opt_vg_head, opt_rio_list) { - if ((first_slot < opt_vg_ptr->first_slot_num) && (slot_num >= opt_vg_ptr->first_slot_num)) { + if ((first_slot < opt_vg_ptr->first_slot_num) && (slot_num >= opt_vg_ptr->first_slot_num)) { rc = -ENODEV; break; } @@ -569,7 +569,7 @@ static struct opt_rio_lo * find_rxe_num (u8 slot_num) list_for_each_entry(opt_lo_ptr, &opt_lo_head, opt_rio_lo_list) { //check to see if this slot_num belongs to expansion box - if ((slot_num >= opt_lo_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_lo_ptr->first_slot_num, 1))) + if ((slot_num >= opt_lo_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_lo_ptr->first_slot_num, 1))) return opt_lo_ptr; } return NULL; @@ -580,8 +580,8 @@ static struct opt_rio * find_chassis_num (u8 slot_num) struct opt_rio *opt_vg_ptr; list_for_each_entry(opt_vg_ptr, &opt_vg_head, opt_rio_list) { - //check to see if this slot_num belongs to chassis - if ((slot_num >= opt_vg_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_vg_ptr->first_slot_num, 0))) + //check to see if this slot_num belongs to chassis + if ((slot_num >= opt_vg_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_vg_ptr->first_slot_num, 0))) return opt_vg_ptr; } return NULL; @@ -594,13 +594,13 @@ static u8 calculate_first_slot (u8 slot_num) { u8 first_slot = 1; struct slot * slot_cur; - + list_for_each_entry(slot_cur, &ibmphp_slot_head, ibm_slot_list) { if (slot_cur->ctrl) { - if ((slot_cur->ctrl->ctlr_type != 4) && (slot_cur->ctrl->ending_slot_num > first_slot) && (slot_num > slot_cur->ctrl->ending_slot_num)) + if ((slot_cur->ctrl->ctlr_type != 4) && (slot_cur->ctrl->ending_slot_num > first_slot) && (slot_num > slot_cur->ctrl->ending_slot_num)) first_slot = slot_cur->ctrl->ending_slot_num; } - } + } return first_slot + 1; } @@ -622,11 +622,11 @@ static char *create_file_name (struct slot * slot_cur) err ("Structure passed is empty\n"); return NULL; } - + slot_num = slot_cur->number; memset (str, 0, sizeof(str)); - + if (rio_table_ptr) { if (rio_table_ptr->ver_num == 3) { opt_vg_ptr = find_chassis_num (slot_num); @@ -660,7 +660,7 @@ static char *create_file_name (struct slot * slot_cur) /* if both NULL and we DO have correct RIO table in BIOS */ return NULL; } - } + } if (!flag) { if (slot_cur->ctrl->ctlr_type == 4) { first_slot = calculate_first_slot (slot_num); @@ -798,7 +798,7 @@ static int __init ebda_rsrc_controller (void) slot_ptr->ctl_index = readb (io_mem + addr_slot + 2*slot_num); slot_ptr->slot_cap = readb (io_mem + addr_slot + 3*slot_num); - // create bus_info lined list --- if only one slot per bus: slot_min = slot_max + // create bus_info lined list --- if only one slot per bus: slot_min = slot_max bus_info_ptr2 = ibmphp_find_same_bus_num (slot_ptr->slot_bus_num); if (!bus_info_ptr2) { @@ -814,9 +814,9 @@ static int __init ebda_rsrc_controller (void) bus_info_ptr1->index = bus_index++; bus_info_ptr1->current_speed = 0xff; bus_info_ptr1->current_bus_mode = 0xff; - + bus_info_ptr1->controller_id = hpc_ptr->ctlr_id; - + list_add_tail (&bus_info_ptr1->bus_info_list, &bus_info_head); } else { @@ -851,7 +851,7 @@ static int __init ebda_rsrc_controller (void) bus_info_ptr2->slots_at_66_conv = bus_ptr->slots_at_66_conv; bus_info_ptr2->slots_at_66_pcix = bus_ptr->slots_at_66_pcix; bus_info_ptr2->slots_at_100_pcix = bus_ptr->slots_at_100_pcix; - bus_info_ptr2->slots_at_133_pcix = bus_ptr->slots_at_133_pcix; + bus_info_ptr2->slots_at_133_pcix = bus_ptr->slots_at_133_pcix; } bus_ptr++; } @@ -864,7 +864,7 @@ static int __init ebda_rsrc_controller (void) hpc_ptr->u.pci_ctlr.dev_fun = readb (io_mem + addr + 1); hpc_ptr->irq = readb (io_mem + addr + 2); addr += 3; - debug ("ctrl bus = %x, ctlr devfun = %x, irq = %x\n", + debug ("ctrl bus = %x, ctlr devfun = %x, irq = %x\n", hpc_ptr->u.pci_ctlr.bus, hpc_ptr->u.pci_ctlr.dev_fun, hpc_ptr->irq); break; @@ -932,7 +932,7 @@ static int __init ebda_rsrc_controller (void) tmp_slot->supported_speed = 2; else if ((hpc_ptr->slots[index].slot_cap & EBDA_SLOT_66_MAX) == EBDA_SLOT_66_MAX) tmp_slot->supported_speed = 1; - + if ((hpc_ptr->slots[index].slot_cap & EBDA_SLOT_PCIX_CAP) == EBDA_SLOT_PCIX_CAP) tmp_slot->supported_bus_mode = 1; else @@ -1000,7 +1000,7 @@ error_no_hpc: return rc; } -/* +/* * map info (bus, devfun, start addr, end addr..) of i/o, memory, * pfm from the physical addr to a list of resource. */ @@ -1057,7 +1057,7 @@ static int __init ebda_rsrc_rsrc (void) addr += 10; debug ("rsrc from mem or pfm ---\n"); - debug ("rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n", + debug ("rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n", rsrc_ptr->rsrc_type, rsrc_ptr->bus_num, rsrc_ptr->dev_fun, rsrc_ptr->start_addr, rsrc_ptr->end_addr); list_add (&rsrc_ptr->ebda_pci_rsrc_list, &ibmphp_ebda_pci_rsrc_head); @@ -1096,7 +1096,7 @@ struct bus_info *ibmphp_find_same_bus_num (u32 num) struct bus_info *ptr; list_for_each_entry(ptr, &bus_info_head, bus_info_list) { - if (ptr->busno == num) + if (ptr->busno == num) return ptr; } return NULL; @@ -1110,7 +1110,7 @@ int ibmphp_get_bus_index (u8 num) struct bus_info *ptr; list_for_each_entry(ptr, &bus_info_head, bus_info_list) { - if (ptr->busno == num) + if (ptr->busno == num) return ptr->index; } return -ENODEV; @@ -1168,7 +1168,7 @@ static struct pci_device_id id_table[] = { .subdevice = HPC_SUBSYSTEM_ID, .class = ((PCI_CLASS_SYSTEM_PCI_HOTPLUG << 8) | 0x00), }, {} -}; +}; MODULE_DEVICE_TABLE(pci, id_table); @@ -1197,7 +1197,7 @@ static int ibmphp_probe (struct pci_dev * dev, const struct pci_device_id *ids) struct controller *ctrl; debug ("inside ibmphp_probe\n"); - + list_for_each_entry(ctrl, &ebda_hpc_head, ebda_hpc_list) { if (ctrl->ctlr_type == 1) { if ((dev->devfn == ctrl->u.pci_ctlr.dev_fun) && (dev->bus->number == ctrl->u.pci_ctlr.bus)) { @@ -1210,4 +1210,3 @@ static int ibmphp_probe (struct pci_dev * dev, const struct pci_device_id *ids) } return -ENODEV; } - diff --git a/drivers/pci/hotplug/ibmphp_hpc.c b/drivers/pci/hotplug/ibmphp_hpc.c index f59ed30512b5..5fc7a089f532 100644 --- a/drivers/pci/hotplug/ibmphp_hpc.c +++ b/drivers/pci/hotplug/ibmphp_hpc.c @@ -258,7 +258,7 @@ static u8 i2c_ctrl_write (struct controller *ctlr_ptr, void __iomem *WPGBbar, u8 { u8 rc; void __iomem *wpg_addr; // base addr + offset - unsigned long wpg_data; // data to/from WPG LOHI format + unsigned long wpg_data; // data to/from WPG LOHI format unsigned long ultemp; unsigned long data; // actual data HILO format int i; @@ -351,7 +351,7 @@ static u8 i2c_ctrl_write (struct controller *ctlr_ptr, void __iomem *WPGBbar, u8 } //------------------------------------------------------------ -// Read from ISA type HPC +// Read from ISA type HPC //------------------------------------------------------------ static u8 isa_ctrl_read (struct controller *ctlr_ptr, u8 offset) { @@ -372,7 +372,7 @@ static void isa_ctrl_write (struct controller *ctlr_ptr, u8 offset, u8 data) { u16 start_address; u16 port_address; - + start_address = ctlr_ptr->u.isa_ctlr.io_start; port_address = start_address + (u16) offset; outb (data, port_address); @@ -656,11 +656,11 @@ int ibmphp_hpc_readslot (struct slot * pslot, u8 cmd, u8 * pstatus) //-------------------------------------------------------------------- // cleanup //-------------------------------------------------------------------- - + // remove physical to logical address mapping if ((ctlr_ptr->ctlr_type == 2) || (ctlr_ptr->ctlr_type == 4)) iounmap (wpg_bbar); - + free_hpc_access (); debug_polling ("%s - Exit rc[%d]\n", __func__, rc); @@ -835,7 +835,7 @@ static int poll_hpc(void *data) down (&semOperations); switch (poll_state) { - case POLL_LATCH_REGISTER: + case POLL_LATCH_REGISTER: oldlatchlow = curlatchlow; ctrl_count = 0x00; list_for_each (pslotlist, &ibmphp_slot_head) { @@ -892,16 +892,16 @@ static int poll_hpc(void *data) if (kthread_should_stop()) goto out_sleep; - + down (&semOperations); - + if (poll_count >= POLL_LATCH_CNT) { poll_count = 0; poll_state = POLL_SLOTS; } else poll_state = POLL_LATCH_REGISTER; break; - } + } /* give up the hardware semaphore */ up (&semOperations); /* sleep for a short time just for good measure */ @@ -958,7 +958,7 @@ static int process_changeinstatus (struct slot *pslot, struct slot *poldslot) // bit 5 - HPC_SLOT_PWRGD if ((pslot->status & 0x20) != (poldslot->status & 0x20)) // OFF -> ON: ignore, ON -> OFF: disable slot - if ((poldslot->status & 0x20) && (SLOT_CONNECT (poldslot->status) == HPC_SLOT_CONNECTED) && (SLOT_PRESENT (poldslot->status))) + if ((poldslot->status & 0x20) && (SLOT_CONNECT (poldslot->status) == HPC_SLOT_CONNECTED) && (SLOT_PRESENT (poldslot->status))) disable = 1; // bit 6 - HPC_SLOT_BUS_SPEED @@ -980,7 +980,7 @@ static int process_changeinstatus (struct slot *pslot, struct slot *poldslot) pslot->status &= ~HPC_SLOT_POWER; } } - // CLOSE -> OPEN + // CLOSE -> OPEN else if ((SLOT_PWRGD (poldslot->status) == HPC_SLOT_PWRGD_GOOD) && (SLOT_CONNECT (poldslot->status) == HPC_SLOT_CONNECTED) && (SLOT_PRESENT (poldslot->status))) { disable = 1; @@ -1075,7 +1075,7 @@ void __exit ibmphp_hpc_stop_poll_thread (void) debug ("before locking operations \n"); ibmphp_lock_operations (); debug ("after locking operations \n"); - + // wait for poll thread to exit debug ("before sem_exit down \n"); down (&sem_exit); diff --git a/drivers/pci/hotplug/ibmphp_pci.c b/drivers/pci/hotplug/ibmphp_pci.c index c60f5f3e838d..639ea3a75e14 100644 --- a/drivers/pci/hotplug/ibmphp_pci.c +++ b/drivers/pci/hotplug/ibmphp_pci.c @@ -1,8 +1,8 @@ /* * IBM Hot Plug Controller Driver - * + * * Written By: Irene Zubarev, IBM Corporation - * + * * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2001,2002 IBM Corp. * @@ -42,7 +42,7 @@ static u8 find_sec_number (u8 primary_busno, u8 slotno); /* * NOTE..... If BIOS doesn't provide default routing, we assign: - * 9 for SCSI, 10 for LAN adapters, and 11 for everything else. + * 9 for SCSI, 10 for LAN adapters, and 11 for everything else. * If adapter is bridged, then we assign 11 to it and devices behind it. * We also assign the same irq numbers for multi function devices. * These are PIC mode, so shouldn't matter n.e.ways (hopefully) @@ -71,11 +71,11 @@ static void assign_alt_irq (struct pci_func * cur_func, u8 class_code) * Configures the device to be added (will allocate needed resources if it * can), the device can be a bridge or a regular pci device, can also be * multi-functional - * + * * Input: function to be added - * + * * TO DO: The error case with Multifunction device or multi function bridge, - * if there is an error, will need to go through all previous functions and + * if there is an error, will need to go through all previous functions and * unconfigure....or can add some code into unconfigure_card.... */ int ibmphp_configure_card (struct pci_func *func, u8 slotno) @@ -98,7 +98,7 @@ int ibmphp_configure_card (struct pci_func *func, u8 slotno) cur_func = func; /* We only get bus and device from IRQ routing table. So at this point, - * func->busno is correct, and func->device contains only device (at the 5 + * func->busno is correct, and func->device contains only device (at the 5 * highest bits) */ @@ -151,7 +151,7 @@ int ibmphp_configure_card (struct pci_func *func, u8 slotno) cur_func->device, cur_func->busno); cleanup_count = 6; goto error; - } + } cur_func->next = NULL; function = 0x8; break; @@ -339,7 +339,7 @@ error: } /* - * This function configures the pci BARs of a single device. + * This function configures the pci BARs of a single device. * Input: pointer to the pci_func * Output: configured PCI, 0, or error */ @@ -371,17 +371,17 @@ static int configure_device (struct pci_func *func) for (count = 0; address[count]; count++) { /* for 6 BARs */ - /* not sure if i need this. per scott, said maybe need smth like this + /* not sure if i need this. per scott, said maybe need * something like this if devices don't adhere 100% to the spec, so don't want to write to the reserved bits - pcibios_read_config_byte(cur_func->busno, cur_func->device, + pcibios_read_config_byte(cur_func->busno, cur_func->device, PCI_BASE_ADDRESS_0 + 4 * count, &tmp); if (tmp & 0x01) // IO - pcibios_write_config_dword(cur_func->busno, cur_func->device, + pcibios_write_config_dword(cur_func->busno, cur_func->device, PCI_BASE_ADDRESS_0 + 4 * count, 0xFFFFFFFD); else // Memory - pcibios_write_config_dword(cur_func->busno, cur_func->device, + pcibios_write_config_dword(cur_func->busno, cur_func->device, PCI_BASE_ADDRESS_0 + 4 * count, 0xFFFFFFFF); */ pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], 0xFFFFFFFF); @@ -421,8 +421,8 @@ static int configure_device (struct pci_func *func) return -EIO; } pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], func->io[count]->start); - - /* _______________This is for debugging purposes only_____________________ */ + + /* _______________This is for debugging purposes only_____________________ */ debug ("b4 writing, the IO address is %x\n", func->io[count]->start); pci_bus_read_config_dword (ibmphp_pci_bus, devfn, address[count], &bar[count]); debug ("after writing.... the start address is %x\n", bar[count]); @@ -484,7 +484,7 @@ static int configure_device (struct pci_func *func) pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], func->pfmem[count]->start); - /*_______________This is for debugging purposes only______________________________*/ + /*_______________This is for debugging purposes only______________________________*/ debug ("b4 writing, start address is %x\n", func->pfmem[count]->start); pci_bus_read_config_dword (ibmphp_pci_bus, devfn, address[count], &bar[count]); debug ("after writing, start address is %x\n", bar[count]); @@ -559,7 +559,7 @@ static int configure_device (struct pci_func *func) /****************************************************************************** * This routine configures a PCI-2-PCI bridge and the functions behind it * Parameters: pci_func - * Returns: + * Returns: ******************************************************************************/ static int configure_bridge (struct pci_func **func_passed, u8 slotno) { @@ -622,7 +622,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno) debug ("AFTER FIND_SEC_NUMBER, func->busno IS %x\n", func->busno); pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, sec_number); - + /* __________________For debugging purposes only __________________________________ pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, &sec_number); debug ("sec_number after write/read is %x\n", sec_number); @@ -644,7 +644,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno) /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - !!!!!!!!!!!!!!!NEED TO ADD!!! FAST BACK-TO-BACK ENABLE!!!!!!!!!!!!!!!!!!!! + !!!!!!!!!!!!!!!NEED TO ADD!!! FAST BACK-TO-BACK ENABLE!!!!!!!!!!!!!!!!!!!! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!*/ @@ -670,7 +670,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno) debug ("len[count] in IO = %x\n", len[count]); bus_io[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL); - + if (!bus_io[count]) { err ("out of system memory\n"); retval = -ENOMEM; @@ -735,7 +735,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno) ibmphp_add_pfmem_from_mem (bus_pfmem[count]); func->pfmem[count] = bus_pfmem[count]; } else { - err ("cannot allocate requested pfmem for bus %x, device %x, len %x\n", + err ("cannot allocate requested pfmem for bus %x, device %x, len %x\n", func->busno, func->device, len[count]); kfree (mem_tmp); kfree (bus_pfmem[count]); @@ -805,7 +805,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno) debug ("amount_needed->mem = %x\n", amount_needed->mem); debug ("amount_needed->pfmem = %x\n", amount_needed->pfmem); - if (amount_needed->not_correct) { + if (amount_needed->not_correct) { debug ("amount_needed is not correct\n"); for (count = 0; address[count]; count++) { /* for 2 BARs */ @@ -830,7 +830,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno) } else { debug ("it wants %x IO behind the bridge\n", amount_needed->io); io = kzalloc(sizeof(*io), GFP_KERNEL); - + if (!io) { err ("out of system memory\n"); retval = -ENOMEM; @@ -959,7 +959,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno) if (bus->noIORanges) { pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_IO_BASE, 0x00 | bus->rangeIO->start >> 8); - pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_IO_LIMIT, 0x00 | bus->rangeIO->end >> 8); + pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_IO_LIMIT, 0x00 | bus->rangeIO->end >> 8); /* _______________This is for debugging purposes only ____________________ pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_IO_BASE, &temp); @@ -980,7 +980,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno) if (bus->noMemRanges) { pci_bus_write_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_BASE, 0x0000 | bus->rangeMem->start >> 16); pci_bus_write_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_LIMIT, 0x0000 | bus->rangeMem->end >> 16); - + /* ____________________This is for debugging purposes only ________________________ pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_BASE, &temp); debug ("mem_base = %x\n", (temp & PCI_MEMORY_RANGE_TYPE_MASK) << 16); @@ -1017,7 +1017,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno) pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_INTERRUPT_PIN, &irq); if ((irq > 0x00) && (irq < 0x05)) pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_INTERRUPT_LINE, func->irq[irq - 1]); - /* + /* pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_BRIDGE_CONTROL, ctrl); pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_BRIDGE_CONTROL, PCI_BRIDGE_CTL_PARITY); pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_BRIDGE_CONTROL, PCI_BRIDGE_CTL_SERR); @@ -1071,7 +1071,7 @@ error: * This function adds up the amount of resources needed behind the PPB bridge * and passes it to the configure_bridge function * Input: bridge function - * Ouput: amount of resources needed + * Output: amount of resources needed *****************************************************************************/ static struct res_needed *scan_behind_bridge (struct pci_func * func, u8 busno) { @@ -1204,9 +1204,9 @@ static struct res_needed *scan_behind_bridge (struct pci_func * func, u8 busno) return amount; } -/* The following 3 unconfigure_boot_ routines deal with the case when we had the card - * upon bootup in the system, since we don't allocate func to such case, we need to read - * the start addresses from pci config space and then find the corresponding entries in +/* The following 3 unconfigure_boot_ routines deal with the case when we had the card + * upon bootup in the system, since we don't allocate func to such case, we need to read + * the start addresses from pci config space and then find the corresponding entries in * our resource lists. The functions return either 0, -ENODEV, or -1 (general failure) * Change: we also call these functions even if we configured the card ourselves (i.e., not * the bootup case), since it should work same way @@ -1561,8 +1561,8 @@ static int unconfigure_boot_card (struct slot *slot_cur) * unconfiguring the device * TO DO: will probably need to add some code in case there was some resource, * to remove it... this is from when we have errors in the configure_card... - * !!!!!!!!!!!!!!!!!!!!!!!!!FOR BUSES!!!!!!!!!!!! - * Returns: 0, -1, -ENODEV + * !!!!!!!!!!!!!!!!!!!!!!!!!FOR BUSES!!!!!!!!!!!! + * Returns: 0, -1, -ENODEV */ int ibmphp_unconfigure_card (struct slot **slot_cur, int the_end) { @@ -1634,7 +1634,7 @@ int ibmphp_unconfigure_card (struct slot **slot_cur, int the_end) * Input: bus and the amount of resources needed (we know we can assign those, * since they've been checked already * Output: bus added to the correct spot - * 0, -1, error + * 0, -1, error */ static int add_new_bus (struct bus_node *bus, struct resource_node *io, struct resource_node *mem, struct resource_node *pfmem, u8 parent_busno) { @@ -1650,7 +1650,7 @@ static int add_new_bus (struct bus_node *bus, struct resource_node *io, struct r err ("strange, cannot find bus which is supposed to be at the system... something is terribly wrong...\n"); return -ENODEV; } - + list_add (&bus->bus_list, &cur_bus->bus_list); } if (io) { @@ -1679,7 +1679,7 @@ static int add_new_bus (struct bus_node *bus, struct resource_node *io, struct r } if (pfmem) { pfmem_range = kzalloc(sizeof(*pfmem_range), GFP_KERNEL); - if (!pfmem_range) { + if (!pfmem_range) { err ("out of system memory\n"); return -ENOMEM; } @@ -1726,4 +1726,3 @@ static u8 find_sec_number (u8 primary_busno, u8 slotno) return busno; return 0xff; } - diff --git a/drivers/pci/hotplug/ibmphp_res.c b/drivers/pci/hotplug/ibmphp_res.c index e2dc289f767c..a265acb2d518 100644 --- a/drivers/pci/hotplug/ibmphp_res.c +++ b/drivers/pci/hotplug/ibmphp_res.c @@ -72,7 +72,7 @@ static struct bus_node * __init alloc_error_bus (struct ebda_pci_rsrc * curr, u8 static struct resource_node * __init alloc_resources (struct ebda_pci_rsrc * curr) { struct resource_node *rs; - + if (!curr) { err ("NULL passed to allocate\n"); return NULL; @@ -128,7 +128,7 @@ static int __init alloc_bus_range (struct bus_node **new_bus, struct range_node } newrange->start = curr->start_addr; newrange->end = curr->end_addr; - + if (first_bus || (!num_ranges)) newrange->rangeno = 1; else { @@ -162,7 +162,7 @@ static int __init alloc_bus_range (struct bus_node **new_bus, struct range_node newbus->rangePFMem = newrange; if (first_bus) newbus->noPFMemRanges = 1; - else { + else { debug ("1st PFMemory Primary on Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end); ++newbus->noPFMemRanges; fix_resources (newbus); @@ -190,7 +190,7 @@ static int __init alloc_bus_range (struct bus_node **new_bus, struct range_node * This is the Resource Management initialization function. It will go through * the Resource list taken from EBDA and fill in this module's data structures * - * THIS IS NOT TAKING INTO CONSIDERATION IO RESTRICTIONS OF PRIMARY BUSES, + * THIS IS NOT TAKING INTO CONSIDERATION IO RESTRICTIONS OF PRIMARY BUSES, * SINCE WE'RE GOING TO ASSUME FOR NOW WE DON'T HAVE THOSE ON OUR BUSES FOR NOW * * Input: ptr to the head of the resource list from EBDA @@ -382,7 +382,7 @@ int __init ibmphp_rsrc_init (void) * pci devices' resources for the appropriate resource * * Input: type of the resource, range to add, current bus - * Output: 0 or -1, bus and range ptrs + * Output: 0 or -1, bus and range ptrs ********************************************************************************/ static int add_bus_range (int type, struct range_node *range, struct bus_node *bus_cur) { @@ -466,7 +466,7 @@ static void update_resources (struct bus_node *bus_cur, int type, int rangeno) switch (type) { case MEM: - if (bus_cur->firstMem) + if (bus_cur->firstMem) res = bus_cur->firstMem; break; case PFMEM: @@ -583,7 +583,7 @@ static void fix_resources (struct bus_node *bus_cur) } /******************************************************************************* - * This routine adds a resource to the list of resources to the appropriate bus + * This routine adds a resource to the list of resources to the appropriate bus * based on their resource type and sorted by their starting addresses. It assigns * the ptrs to next and nextRange if needed. * @@ -605,11 +605,11 @@ int ibmphp_add_resource (struct resource_node *res) err ("NULL passed to add\n"); return -ENODEV; } - + bus_cur = find_bus_wprev (res->busno, NULL, 0); - + if (!bus_cur) { - /* didn't find a bus, smth's wrong!!! */ + /* didn't find a bus, something's wrong!!! */ debug ("no bus in the system, either pci_dev's wrong or allocation failed\n"); return -ENODEV; } @@ -648,7 +648,7 @@ int ibmphp_add_resource (struct resource_node *res) if (!range_cur) { switch (res->type) { case IO: - ++bus_cur->needIOUpdate; + ++bus_cur->needIOUpdate; break; case MEM: ++bus_cur->needMemUpdate; @@ -659,13 +659,13 @@ int ibmphp_add_resource (struct resource_node *res) } res->rangeno = -1; } - + debug ("The range is %d\n", res->rangeno); if (!res_start) { /* no first{IO,Mem,Pfmem} on the bus, 1st IO/Mem/Pfmem resource ever */ switch (res->type) { case IO: - bus_cur->firstIO = res; + bus_cur->firstIO = res; break; case MEM: bus_cur->firstMem = res; @@ -673,7 +673,7 @@ int ibmphp_add_resource (struct resource_node *res) case PFMEM: bus_cur->firstPFMem = res; break; - } + } res->next = NULL; res->nextRange = NULL; } else { @@ -770,7 +770,7 @@ int ibmphp_add_resource (struct resource_node *res) * This routine will remove the resource from the list of resources * * Input: io, mem, and/or pfmem resource to be deleted - * Ouput: modified resource list + * Output: modified resource list * 0 or error code ****************************************************************************/ int ibmphp_remove_resource (struct resource_node *res) @@ -825,7 +825,7 @@ int ibmphp_remove_resource (struct resource_node *res) if (!res_cur) { if (res->type == PFMEM) { - /* + /* * case where pfmem might be in the PFMemFromMem list * so will also need to remove the corresponding mem * entry @@ -961,12 +961,12 @@ static struct range_node * find_range (struct bus_node *bus_cur, struct resource } /***************************************************************************** - * This routine will check to make sure the io/mem/pfmem->len that the device asked for + * This routine will check to make sure the io/mem/pfmem->len that the device asked for * can fit w/i our list of available IO/MEM/PFMEM resources. If cannot, returns -EINVAL, * otherwise, returns 0 * * Input: resource - * Ouput: the correct start and end address are inputted into the resource node, + * Output: the correct start and end address are inputted into the resource node, * 0 or -EINVAL *****************************************************************************/ int ibmphp_check_resource (struct resource_node *res, u8 bridge) @@ -996,7 +996,7 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge) bus_cur = find_bus_wprev (res->busno, NULL, 0); if (!bus_cur) { - /* didn't find a bus, smth's wrong!!! */ + /* didn't find a bus, something's wrong!!! */ debug ("no bus in the system, either pci_dev's wrong or allocation failed\n"); return -EINVAL; } @@ -1066,7 +1066,7 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge) break; } } - + if (flag && len_cur == res->len) { debug ("but we are not here, right?\n"); res->start = start_cur; @@ -1118,10 +1118,10 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge) if (res_prev) { if (res_prev->rangeno != res_cur->rangeno) { /* 1st device on this range */ - if ((res_cur->start != range->start) && + if ((res_cur->start != range->start) && ((len_tmp = res_cur->start - 1 - range->start) >= res->len)) { if ((len_tmp < len_cur) || (len_cur == 0)) { - if ((range->start % tmp_divide) == 0) { + if ((range->start % tmp_divide) == 0) { /* just perfect, starting address is divisible by length */ flag = 1; len_cur = len_tmp; @@ -1344,7 +1344,7 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge) * This routine is called from remove_card if the card contained PPB. * It will remove all the resources on the bus as well as the bus itself * Input: Bus - * Ouput: 0, -ENODEV + * Output: 0, -ENODEV ********************************************************************************/ int ibmphp_remove_bus (struct bus_node *bus, u8 parent_busno) { @@ -1353,7 +1353,7 @@ int ibmphp_remove_bus (struct bus_node *bus, u8 parent_busno) struct bus_node *prev_bus; int rc; - prev_bus = find_bus_wprev (parent_busno, NULL, 0); + prev_bus = find_bus_wprev (parent_busno, NULL, 0); if (!prev_bus) { debug ("something terribly wrong. Cannot find parent bus to the one to remove\n"); @@ -1424,7 +1424,7 @@ int ibmphp_remove_bus (struct bus_node *bus, u8 parent_busno) } /****************************************************************************** - * This routine deletes the ranges from a given bus, and the entries from the + * This routine deletes the ranges from a given bus, and the entries from the * parent's bus in the resources * Input: current bus, previous bus * Output: 0, -EINVAL @@ -1453,7 +1453,7 @@ static int remove_ranges (struct bus_node *bus_cur, struct bus_node *bus_prev) if (bus_cur->noMemRanges) { range_cur = bus_cur->rangeMem; for (i = 0; i < bus_cur->noMemRanges; i++) { - if (ibmphp_find_resource (bus_prev, range_cur->start, &res, MEM) < 0) + if (ibmphp_find_resource (bus_prev, range_cur->start, &res, MEM) < 0) return -EINVAL; ibmphp_remove_resource (res); @@ -1467,7 +1467,7 @@ static int remove_ranges (struct bus_node *bus_cur, struct bus_node *bus_prev) if (bus_cur->noPFMemRanges) { range_cur = bus_cur->rangePFMem; for (i = 0; i < bus_cur->noPFMemRanges; i++) { - if (ibmphp_find_resource (bus_prev, range_cur->start, &res, PFMEM) < 0) + if (ibmphp_find_resource (bus_prev, range_cur->start, &res, PFMEM) < 0) return -EINVAL; ibmphp_remove_resource (res); @@ -1482,7 +1482,7 @@ static int remove_ranges (struct bus_node *bus_cur, struct bus_node *bus_prev) } /* - * find the resource node in the bus + * find the resource node in the bus * Input: Resource needed, start address of the resource, type of resource */ int ibmphp_find_resource (struct bus_node *bus, u32 start_address, struct resource_node **res, int flag) @@ -1512,7 +1512,7 @@ int ibmphp_find_resource (struct bus_node *bus, u32 start_address, struct resour err ("wrong type of flag\n"); return -EINVAL; } - + while (res_cur) { if (res_cur->start == start_address) { *res = res_cur; @@ -1718,7 +1718,7 @@ static int __init once_over (void) } /* end for pfmem */ } /* end if */ } /* end list_for_each bus */ - return 0; + return 0; } int ibmphp_add_pfmem_from_mem (struct resource_node *pfmem) @@ -1760,9 +1760,9 @@ static struct bus_node *find_bus_wprev (u8 bus_number, struct bus_node **prev, u list_for_each (tmp, &gbuses) { tmp_prev = tmp->prev; bus_cur = list_entry (tmp, struct bus_node, bus_list); - if (flag) + if (flag) *prev = list_entry (tmp_prev, struct bus_node, bus_list); - if (bus_cur->busno == bus_number) + if (bus_cur->busno == bus_number) return bus_cur; } @@ -1776,7 +1776,7 @@ void ibmphp_print_test (void) struct range_node *range; struct resource_node *res; struct list_head *tmp; - + debug_pci ("*****************START**********************\n"); if ((!list_empty(&gbuses)) && flags) { @@ -1906,7 +1906,7 @@ static int range_exists_already (struct range_node * range, struct bus_node * bu return 1; range_cur = range_cur->next; } - + return 0; } @@ -1920,7 +1920,7 @@ static int range_exists_already (struct range_node * range, struct bus_node * bu * Returns: none * Note: this function doesn't take into account IO restrictions etc, * so will only work for bridges with no video/ISA devices behind them It - * also will not work for onboard PPB's that can have more than 1 *bus + * also will not work for onboard PPBs that can have more than 1 *bus * behind them All these are TO DO. * Also need to add more error checkings... (from fnc returns etc) */ @@ -1963,7 +1963,7 @@ static int __init update_bridge_ranges (struct bus_node **bus) case PCI_HEADER_TYPE_BRIDGE: function = 0x8; case PCI_HEADER_TYPE_MULTIBRIDGE: - /* We assume here that only 1 bus behind the bridge + /* We assume here that only 1 bus behind the bridge TO DO: add functionality for several: temp = secondary; while (temp < subordinate) { @@ -1972,7 +1972,7 @@ static int __init update_bridge_ranges (struct bus_node **bus) } */ pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, &sec_busno); - bus_sec = find_bus_wprev (sec_busno, NULL, 0); + bus_sec = find_bus_wprev (sec_busno, NULL, 0); /* this bus structure doesn't exist yet, PPB was configured during previous loading of ibmphp */ if (!bus_sec) { bus_sec = alloc_error_bus (NULL, sec_busno, 1); @@ -2028,7 +2028,7 @@ static int __init update_bridge_ranges (struct bus_node **bus) io->len = io->end - io->start + 1; ibmphp_add_resource (io); } - } + } pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_BASE, &start_mem_address); pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_LIMIT, &end_mem_address); diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c index ec20f74c8981..cfa92a984e62 100644 --- a/drivers/pci/hotplug/pci_hotplug_core.c +++ b/drivers/pci/hotplug/pci_hotplug_core.c @@ -131,7 +131,7 @@ static ssize_t power_write_file(struct pci_slot *pci_slot, const char *buf, } module_put(slot->ops->owner); -exit: +exit: if (retval) return retval; return count; @@ -177,7 +177,7 @@ static ssize_t attention_write_file(struct pci_slot *slot, const char *buf, retval = ops->set_attention_status(slot->hotplug, attention); module_put(ops->owner); -exit: +exit: if (retval) return retval; return count; @@ -247,7 +247,7 @@ static ssize_t test_write_file(struct pci_slot *pci_slot, const char *buf, retval = slot->ops->hardware_test(slot, test); module_put(slot->ops->owner); -exit: +exit: if (retval) return retval; return count; @@ -512,7 +512,7 @@ int pci_hp_deregister(struct hotplug_slot *hotplug) * @hotplug: pointer to the slot whose info has changed * @info: pointer to the info copy into the slot's info structure * - * @slot must have been registered with the pci + * @slot must have been registered with the pci * hotplug subsystem previously with a call to pci_hp_register(). * * Returns 0 if successful, anything else for an error. diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 541bbe6d5343..21e865ded1dc 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -180,5 +180,5 @@ static inline int pciehp_acpi_slot_detection_check(struct pci_dev *dev) { return 0; } -#endif /* CONFIG_ACPI */ +#endif /* CONFIG_ACPI */ #endif /* _PCIEHP_H */ diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c index ead7c534095e..eddddd447d0d 100644 --- a/drivers/pci/hotplug/pciehp_acpi.c +++ b/drivers/pci/hotplug/pciehp_acpi.c @@ -54,7 +54,7 @@ int pciehp_acpi_slot_detection_check(struct pci_dev *dev) { if (slot_detection_mode != PCIEHP_DETECT_ACPI) return 0; - if (acpi_pci_detect_ejectable(DEVICE_ACPI_HANDLE(&dev->dev))) + if (acpi_pci_detect_ejectable(ACPI_HANDLE(&dev->dev))) return 0; return -ENODEV; } @@ -78,7 +78,7 @@ static int __initdata dup_slot_id; static int __initdata acpi_slot_detected; static struct list_head __initdata dummy_slots = LIST_HEAD_INIT(dummy_slots); -/* Dummy driver for dumplicate name detection */ +/* Dummy driver for duplicate name detection */ static int __init dummy_probe(struct pcie_device *dev) { u32 slot_cap; @@ -96,7 +96,7 @@ static int __init dummy_probe(struct pcie_device *dev) dup_slot_id++; } list_add_tail(&slot->list, &dummy_slots); - handle = DEVICE_ACPI_HANDLE(&pdev->dev); + handle = ACPI_HANDLE(&pdev->dev); if (!acpi_slot_detected && acpi_pci_detect_ejectable(handle)) acpi_slot_detected = 1; return -ENODEV; /* dummy driver always returns error */ diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index f4a18f51a29c..bbd48bbe4e9b 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -351,8 +351,8 @@ static int __init pcied_init(void) pciehp_firmware_init(); retval = pcie_port_service_register(&hpdriver_portdrv); - dbg("pcie_port_service_register = %d\n", retval); - info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); + dbg("pcie_port_service_register = %d\n", retval); + info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); if (retval) dbg("Failure to register service\n"); diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 51f56ef4ab6f..3eea3fdd4b0b 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -92,7 +92,7 @@ static void start_int_poll_timer(struct controller *ctrl, int sec) { /* Clamp to sane value */ if ((sec <= 0) || (sec > 60)) - sec = 2; + sec = 2; ctrl->poll_timer.function = &int_poll_timeout; ctrl->poll_timer.data = (unsigned long)ctrl; @@ -194,7 +194,7 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) ctrl_dbg(ctrl, "CMD_COMPLETED not clear after 1 sec\n"); } else if (!NO_CMD_CMPL(ctrl)) { /* - * This controller semms to notify of command completed + * This controller seems to notify of command completed * event even though it supports none of power * controller, attention led, power led and EMI. */ @@ -926,7 +926,7 @@ struct controller *pcie_init(struct pcie_device *dev) if (pciehp_writew(ctrl, PCI_EXP_SLTSTA, 0x1f)) goto abort_ctrl; - /* Disable sotfware notification */ + /* Disable software notification */ pcie_disable_notification(ctrl); ctrl_info(ctrl, "HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", diff --git a/drivers/pci/hotplug/pcihp_skeleton.c b/drivers/pci/hotplug/pcihp_skeleton.c index 1f00b937f721..ac69094e4b20 100644 --- a/drivers/pci/hotplug/pcihp_skeleton.c +++ b/drivers/pci/hotplug/pcihp_skeleton.c @@ -52,7 +52,7 @@ static LIST_HEAD(slot_list); do { \ if (debug) \ printk (KERN_DEBUG "%s: " format "\n", \ - MY_NAME , ## arg); \ + MY_NAME , ## arg); \ } while (0) #define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg) #define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg) @@ -287,7 +287,7 @@ static int __init init_slots(void) hotplug_slot->release = &release_slot; make_slot_name(slot); hotplug_slot->ops = &skel_hotplug_slot_ops; - + /* * Initialize the slot info structure with some known * good values. @@ -296,7 +296,7 @@ static int __init init_slots(void) get_attention_status(hotplug_slot, &info->attention_status); get_latch_status(hotplug_slot, &info->latch_status); get_adapter_status(hotplug_slot, &info->adapter_status); - + dbg("registering slot %d\n", i); retval = pci_hp_register(slot->hotplug_slot); if (retval) { @@ -336,7 +336,7 @@ static void __exit cleanup_slots(void) pci_hp_deregister(slot->hotplug_slot); } } - + static int __init pcihp_skel_init(void) { int retval; diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c index bb7af78e4eed..e9c044d15add 100644 --- a/drivers/pci/hotplug/rpadlpar_core.c +++ b/drivers/pci/hotplug/rpadlpar_core.c @@ -217,7 +217,7 @@ static int dlpar_remove_phb(char *drc_name, struct device_node *dn) if (!pcibios_find_pci_bus(dn)) return -EINVAL; - /* If pci slot is hotplugable, use hotplug to remove it */ + /* If pci slot is hotpluggable, use hotplug to remove it */ slot = find_php_slot(dn); if (slot && rpaphp_deregister_slot(slot)) { printk(KERN_ERR "%s: unable to remove hotplug slot %s\n", diff --git a/drivers/pci/hotplug/rpaphp.h b/drivers/pci/hotplug/rpaphp.h index 3135856e5e1c..b2593e876a09 100644 --- a/drivers/pci/hotplug/rpaphp.h +++ b/drivers/pci/hotplug/rpaphp.h @@ -49,9 +49,9 @@ extern bool rpaphp_debug; #define dbg(format, arg...) \ do { \ - if (rpaphp_debug) \ + if (rpaphp_debug) \ printk(KERN_DEBUG "%s: " format, \ - MY_NAME , ## arg); \ + MY_NAME , ## arg); \ } while (0) #define err(format, arg...) printk(KERN_ERR "%s: " format, MY_NAME , ## arg) #define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg) @@ -99,5 +99,5 @@ void dealloc_slot_struct(struct slot *slot); struct slot *alloc_slot_struct(struct device_node *dn, int drc_index, char *drc_name, int power_domain); int rpaphp_register_slot(struct slot *slot); int rpaphp_deregister_slot(struct slot *slot); - + #endif /* _PPC64PHP_H */ diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c index 127d6e600185..b7fc5c9255a5 100644 --- a/drivers/pci/hotplug/rpaphp_core.c +++ b/drivers/pci/hotplug/rpaphp_core.c @@ -226,7 +226,7 @@ int rpaphp_get_drc_props(struct device_node *dn, int *drc_index, for (i = 0; i < indexes[0]; i++) { if ((unsigned int) indexes[i + 1] == *my_index) { if (drc_name) - *drc_name = name_tmp; + *drc_name = name_tmp; if (drc_type) *drc_type = type_tmp; if (drc_index) @@ -289,7 +289,7 @@ static int is_php_dn(struct device_node *dn, const int **indexes, * rpaphp_add_slot -- declare a hotplug slot to the hotplug subsystem. * @dn: device node of slot * - * This subroutine will register a hotplugable slot with the + * This subroutine will register a hotpluggable slot with the * PCI hotplug infrastructure. This routine is typically called * during boot time, if the hotplug slots are present at boot time, * or is called later, by the dlpar add code, if the slot is @@ -328,7 +328,7 @@ int rpaphp_add_slot(struct device_node *dn) return -ENOMEM; slot->type = simple_strtoul(type, NULL, 10); - + dbg("Found drc-index:0x%x drc-name:%s drc-type:%s\n", indexes[i + 1], name, type); @@ -356,7 +356,7 @@ static void __exit cleanup_slots(void) /* * Unregister all of our slots with the pci_hotplug subsystem, * and free up all memory that we had allocated. - * memory will be freed in release_slot callback. + * memory will be freed in release_slot callback. */ list_for_each_safe(tmp, n, &rpaphp_slot_head) { diff --git a/drivers/pci/hotplug/rpaphp_pci.c b/drivers/pci/hotplug/rpaphp_pci.c index 513e1e282391..9243f3e7a1c9 100644 --- a/drivers/pci/hotplug/rpaphp_pci.c +++ b/drivers/pci/hotplug/rpaphp_pci.c @@ -44,7 +44,7 @@ int rpaphp_get_sensor_state(struct slot *slot, int *state) dbg("%s: slot must be power up to get sensor-state\n", __func__); - /* some slots have to be powered up + /* some slots have to be powered up * before get-sensor will succeed. */ rc = rtas_set_power_level(slot->power_domain, POWER_ON, @@ -133,4 +133,3 @@ int rpaphp_enable_slot(struct slot *slot) return 0; } - diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c index b283bbea6d24..a6082cc263f7 100644 --- a/drivers/pci/hotplug/rpaphp_slot.c +++ b/drivers/pci/hotplug/rpaphp_slot.c @@ -1,5 +1,5 @@ /* - * RPA Virtual I/O device functions + * RPA Virtual I/O device functions * Copyright (C) 2004 Linda Xie <lxie@us.ibm.com> * * All rights reserved. @@ -51,27 +51,27 @@ struct slot *alloc_slot_struct(struct device_node *dn, int drc_index, char *drc_name, int power_domain) { struct slot *slot; - + slot = kzalloc(sizeof(struct slot), GFP_KERNEL); if (!slot) goto error_nomem; slot->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL); if (!slot->hotplug_slot) - goto error_slot; + goto error_slot; slot->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info), GFP_KERNEL); if (!slot->hotplug_slot->info) goto error_hpslot; slot->name = kstrdup(drc_name, GFP_KERNEL); if (!slot->name) - goto error_info; + goto error_info; slot->dn = dn; slot->index = drc_index; slot->power_domain = power_domain; slot->hotplug_slot->private = slot; slot->hotplug_slot->ops = &rpaphp_hotplug_slot_ops; slot->hotplug_slot->release = &rpaphp_release_slot; - + return (slot); error_info: @@ -91,7 +91,7 @@ static int is_registered(struct slot *slot) list_for_each_entry(tmp_slot, &rpaphp_slot_head, rpaphp_slot_list) { if (!strcmp(tmp_slot->name, slot->name)) return 1; - } + } return 0; } @@ -104,7 +104,7 @@ int rpaphp_deregister_slot(struct slot *slot) __func__, slot->name); list_del(&slot->rpaphp_slot_list); - + retval = pci_hp_deregister(php_slot); if (retval) err("Problem unregistering a slot %s\n", slot->name); @@ -120,7 +120,7 @@ int rpaphp_register_slot(struct slot *slot) int retval; int slotno; - dbg("%s registering slot:path[%s] index[%x], name[%s] pdomain[%x] type[%d]\n", + dbg("%s registering slot:path[%s] index[%x], name[%s] pdomain[%x] type[%d]\n", __func__, slot->dn->full_name, slot->index, slot->name, slot->power_domain, slot->type); @@ -128,7 +128,7 @@ int rpaphp_register_slot(struct slot *slot) if (is_registered(slot)) { err("rpaphp_register_slot: slot[%s] is already registered\n", slot->name); return -EAGAIN; - } + } if (slot->dn->child) slotno = PCI_SLOT(PCI_DN(slot->dn->child)->devfn); @@ -145,4 +145,3 @@ int rpaphp_register_slot(struct slot *slot) info("Slot [%s] registered\n", slot->name); return 0; } - diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c index b2781dfe60e9..5b05a68cca6c 100644 --- a/drivers/pci/hotplug/sgi_hotplug.c +++ b/drivers/pci/hotplug/sgi_hotplug.c @@ -9,6 +9,7 @@ * Work to add BIOS PROM support was completed by Mike Habeck. */ +#include <linux/acpi.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> @@ -29,7 +30,6 @@ #include <asm/sn/sn_feature_sets.h> #include <asm/sn/sn_sal.h> #include <asm/sn/types.h> -#include <linux/acpi.h> #include <asm/sn/acpi.h> #include "../pci.h" @@ -414,7 +414,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot) acpi_handle rethandle; acpi_status ret; - phandle = PCI_CONTROLLER(slot->pci_bus)->acpi_handle; + phandle = acpi_device_handle(PCI_CONTROLLER(slot->pci_bus)->companion); if (acpi_bus_get_device(phandle, &pdevice)) { dev_dbg(&slot->pci_bus->self->dev, @@ -495,7 +495,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot) /* free the ACPI resources for the slot */ if (SN_ACPI_BASE_SUPPORT() && - PCI_CONTROLLER(slot->pci_bus)->acpi_handle) { + PCI_CONTROLLER(slot->pci_bus)->companion) { unsigned long long adr; struct acpi_device *device; acpi_handle phandle; @@ -504,7 +504,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot) acpi_status ret; /* Get the rootbus node pointer */ - phandle = PCI_CONTROLLER(slot->pci_bus)->acpi_handle; + phandle = acpi_device_handle(PCI_CONTROLLER(slot->pci_bus)->companion); acpi_scan_lock_acquire(); /* diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h index d876e4b3c6a9..61529097464d 100644 --- a/drivers/pci/hotplug/shpchp.h +++ b/drivers/pci/hotplug/shpchp.h @@ -216,13 +216,13 @@ struct ctrl_reg { /* offsets to the controller registers based on the above structure layout */ enum ctrl_offsets { - BASE_OFFSET = offsetof(struct ctrl_reg, base_offset), - SLOT_AVAIL1 = offsetof(struct ctrl_reg, slot_avail1), + BASE_OFFSET = offsetof(struct ctrl_reg, base_offset), + SLOT_AVAIL1 = offsetof(struct ctrl_reg, slot_avail1), SLOT_AVAIL2 = offsetof(struct ctrl_reg, slot_avail2), - SLOT_CONFIG = offsetof(struct ctrl_reg, slot_config), + SLOT_CONFIG = offsetof(struct ctrl_reg, slot_config), SEC_BUS_CONFIG = offsetof(struct ctrl_reg, sec_bus_config), MSI_CTRL = offsetof(struct ctrl_reg, msi_ctrl), - PROG_INTERFACE = offsetof(struct ctrl_reg, prog_interface), + PROG_INTERFACE = offsetof(struct ctrl_reg, prog_interface), CMD = offsetof(struct ctrl_reg, cmd), CMD_STATUS = offsetof(struct ctrl_reg, cmd_status), INTR_LOC = offsetof(struct ctrl_reg, intr_loc), diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c index d3f757df691c..faf13abd5b99 100644 --- a/drivers/pci/hotplug/shpchp_core.c +++ b/drivers/pci/hotplug/shpchp_core.c @@ -143,11 +143,11 @@ static int init_slots(struct controller *ctrl) snprintf(name, SLOT_NAME_SIZE, "%d", slot->number); hotplug_slot->ops = &shpchp_hotplug_slot_ops; - ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:%02x " - "hp_slot=%x sun=%x slot_device_offset=%x\n", - pci_domain_nr(ctrl->pci_dev->subordinate), - slot->bus, slot->device, slot->hp_slot, slot->number, - ctrl->slot_device_offset); + ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:%02x " + "hp_slot=%x sun=%x slot_device_offset=%x\n", + pci_domain_nr(ctrl->pci_dev->subordinate), + slot->bus, slot->device, slot->hp_slot, slot->number, + ctrl->slot_device_offset); retval = pci_hp_register(slot->hotplug_slot, ctrl->pci_dev->subordinate, slot->device, name); if (retval) { diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c index 75ba2311b54f..2d7f474ca0ec 100644 --- a/drivers/pci/hotplug/shpchp_hpc.c +++ b/drivers/pci/hotplug/shpchp_hpc.c @@ -116,7 +116,7 @@ #define SLOT_REG_RSVDZ_MASK ((1 << 15) | (7 << 21)) /* - * SHPC Command Code definitnions + * SHPC Command Code definitions * * Slot Operation 00h - 3Fh * Set Bus Segment Speed/Mode A 40h - 47h diff --git a/drivers/pci/ioapic.c b/drivers/pci/ioapic.c index 1b90579b233a..50ce68098298 100644 --- a/drivers/pci/ioapic.c +++ b/drivers/pci/ioapic.c @@ -37,7 +37,7 @@ static int ioapic_probe(struct pci_dev *dev, const struct pci_device_id *ent) char *type; struct resource *res; - handle = DEVICE_ACPI_HANDLE(&dev->dev); + handle = ACPI_HANDLE(&dev->dev); if (!handle) return -EINVAL; diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index 21a7182dccd4..1fe2d6fb19d5 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -610,7 +610,7 @@ resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno) struct resource tmp; enum pci_bar_type type; int reg = pci_iov_resource_bar(dev, resno, &type); - + if (!reg) return 0; diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c index b008cf86b9c3..6684f153ab57 100644 --- a/drivers/pci/irq.c +++ b/drivers/pci/irq.c @@ -25,7 +25,7 @@ static void pci_note_irq_problem(struct pci_dev *pdev, const char *reason) /** * pci_lost_interrupt - reports a lost PCI interrupt * @pdev: device whose interrupt is lost - * + * * The primary function of this routine is to report a lost interrupt * in a standard way which users can recognise (instead of blaming the * driver). diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 5e63645a7abe..3fcd67a16677 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -784,7 +784,7 @@ error: * @nvec: how many MSIs have been requested ? * @type: are we checking for MSI or MSI-X ? * - * Look at global flags, the device itself, and its parent busses + * Look at global flags, the device itself, and its parent buses * to determine if MSI/-X are supported for the device. If MSI/-X is * supported return 0, else return an error code. **/ diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index dfd1f59de729..577074efbe62 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -141,7 +141,7 @@ phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle) * if (_PRW at S-state x) * choose from highest power _SxD to lowest power _SxW * else // no _PRW at S-state x - * choose highest power _SxD or any lower power + * choose highest power _SxD or any lower power */ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev) @@ -173,14 +173,14 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev) static bool acpi_pci_power_manageable(struct pci_dev *dev) { - acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); + acpi_handle handle = ACPI_HANDLE(&dev->dev); return handle ? acpi_bus_power_manageable(handle) : false; } static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) { - acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); + acpi_handle handle = ACPI_HANDLE(&dev->dev); static const u8 state_conv[] = { [PCI_D0] = ACPI_STATE_D0, [PCI_D1] = ACPI_STATE_D1, @@ -217,7 +217,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) static bool acpi_pci_can_wakeup(struct pci_dev *dev) { - acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); + acpi_handle handle = ACPI_HANDLE(&dev->dev); return handle ? acpi_bus_can_wakeup(handle) : false; } diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 454853507b7e..25f0bc659164 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -19,6 +19,7 @@ #include <linux/cpu.h> #include <linux/pm_runtime.h> #include <linux/suspend.h> +#include <linux/kexec.h> #include "pci.h" struct pci_dynid { @@ -288,12 +289,27 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, int error, node; struct drv_dev_and_id ddi = { drv, dev, id }; - /* Execute driver initialization on node where the device's - bus is attached to. This way the driver likely allocates - its local memory on the right node without any need to - change it. */ + /* + * Execute driver initialization on node where the device is + * attached. This way the driver likely allocates its local memory + * on the right node. + */ node = dev_to_node(&dev->dev); - if (node >= 0) { + + /* + * On NUMA systems, we are likely to call a PF probe function using + * work_on_cpu(). If that probe calls pci_enable_sriov() (which + * adds the VF devices via pci_bus_add_device()), we may re-enter + * this function to call the VF probe function. Calling + * work_on_cpu() again will cause a lockdep warning. Since VFs are + * always on the same node as the PF, we can work around this by + * avoiding work_on_cpu() when we're already on the correct node. + * + * Preemption is enabled, so it's theoretically unsafe to use + * numa_node_id(), but even if we run the probe function on the + * wrong node, it should be functionally correct. + */ + if (node >= 0 && node != numa_node_id()) { int cpu; get_online_cpus(); @@ -305,6 +321,7 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, put_online_cpus(); } else error = local_pci_probe(&ddi); + return error; } @@ -312,7 +329,7 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, * __pci_device_probe - check if a driver wants to claim a specific PCI device * @drv: driver to call to check if it wants the PCI device * @pci_dev: PCI device being probed - * + * * returns 0 on success, else error. * side-effect: pci_dev->driver is set to drv when drv claims pci_dev. */ @@ -378,7 +395,7 @@ static int pci_device_remove(struct device * dev) * We would love to complain here if pci_dev->is_enabled is set, that * the driver should have called pci_disable_device(), but the * unfortunate fact is there are too many odd BIOS and bridge setups - * that don't like drivers doing that all of the time. + * that don't like drivers doing that all of the time. * Oh well, we can dream of sane hardware when we sleep, no matter how * horrible the crap we have to deal with is when we are awake... */ @@ -399,12 +416,17 @@ static void pci_device_shutdown(struct device *dev) pci_msi_shutdown(pci_dev); pci_msix_shutdown(pci_dev); +#ifdef CONFIG_KEXEC /* - * Turn off Bus Master bit on the device to tell it to not - * continue to do DMA. Don't touch devices in D3cold or unknown states. + * If this is a kexec reboot, turn off Bus Master bit on the + * device to tell it to not continue to do DMA. Don't touch + * devices in D3cold or unknown states. + * If it is not a kexec reboot, firmware will hit the PCI + * devices with big hammer and stop their DMA any way. */ - if (pci_dev->current_state <= PCI_D3hot) + if (kexec_in_progress && (pci_dev->current_state <= PCI_D3hot)) pci_clear_master(pci_dev); +#endif } #ifdef CONFIG_PM @@ -1156,10 +1178,10 @@ static const struct dev_pm_ops pci_dev_pm_ops = { * @drv: the driver structure to register * @owner: owner module of drv * @mod_name: module name string - * + * * Adds the driver structure to the list of registered drivers. - * Returns a negative value on error, otherwise 0. - * If no error occurred, the driver remains registered even if + * Returns a negative value on error, otherwise 0. + * If no error occurred, the driver remains registered even if * no device was claimed during registration. */ int __pci_register_driver(struct pci_driver *drv, struct module *owner, @@ -1181,7 +1203,7 @@ int __pci_register_driver(struct pci_driver *drv, struct module *owner, /** * pci_unregister_driver - unregister a pci driver * @drv: the driver structure to unregister - * + * * Deletes the driver structure from the list of registered PCI drivers, * gives it a chance to clean up by calling its remove() function for * each device it was responsible for, and marks those devices as @@ -1203,7 +1225,7 @@ static struct pci_driver pci_compat_driver = { * pci_dev_driver - get the pci_driver of a device * @dev: the device to query * - * Returns the appropriate pci_driver structure or %NULL if there is no + * Returns the appropriate pci_driver structure or %NULL if there is no * registered driver for the device. */ struct pci_driver * @@ -1224,7 +1246,7 @@ pci_dev_driver(const struct pci_dev *dev) * pci_bus_match - Tell if a PCI device structure has a matching PCI device id structure * @dev: the PCI device structure to match against * @drv: the device driver to search for matching PCI device id structures - * + * * Used by a driver to check whether a PCI device present in the * system is in its list of supported devices. Returns the matching * pci_device_id structure or %NULL if there is no match. diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c index edaed6f4da6c..d51f45aa669e 100644 --- a/drivers/pci/pci-label.c +++ b/drivers/pci/pci-label.c @@ -263,7 +263,7 @@ device_has_dsm(struct device *dev) acpi_handle handle; struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; - handle = DEVICE_ACPI_HANDLE(dev); + handle = ACPI_HANDLE(dev); if (!handle) return FALSE; @@ -295,7 +295,7 @@ acpilabel_show(struct device *dev, struct device_attribute *attr, char *buf) acpi_handle handle; int length; - handle = DEVICE_ACPI_HANDLE(dev); + handle = ACPI_HANDLE(dev); if (!handle) return -1; @@ -316,7 +316,7 @@ acpiindex_show(struct device *dev, struct device_attribute *attr, char *buf) acpi_handle handle; int length; - handle = DEVICE_ACPI_HANDLE(dev); + handle = ACPI_HANDLE(dev); if (!handle) return -1; diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c index 6e47c519c510..2ff77509d8e5 100644 --- a/drivers/pci/pci-stub.c +++ b/drivers/pci/pci-stub.c @@ -2,13 +2,13 @@ * * Copyright (C) 2008 Red Hat, Inc. * Author: - * Chris Wright + * Chris Wright * * This work is licensed under the terms of the GNU GPL, version 2. * * Usage is simple, allocate a new id to the stub driver and bind the * device to it. For example: - * + * * # echo "8086 10f5" > /sys/bus/pci/drivers/pci-stub/new_id * # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/e1000e/unbind * # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/pci-stub/bind diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 2aaa83c85a4e..c91e6c18debc 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -10,7 +10,7 @@ * * File attributes for PCI devices * - * Modeled after usb's driverfs.c + * Modeled after usb's driverfs.c * */ @@ -270,13 +270,17 @@ msi_bus_store(struct device *dev, struct device_attribute *attr, if (kstrtoul(buf, 0, &val) < 0) return -EINVAL; - /* bad things may happen if the no_msi flag is changed - * while some drivers are loaded */ + /* + * Bad things may happen if the no_msi flag is changed + * while drivers are loaded. + */ if (!capable(CAP_SYS_ADMIN)) return -EPERM; - /* Maybe pci devices without subordinate busses shouldn't even have this - * attribute in the first place? */ + /* + * Maybe devices without subordinate buses shouldn't have this + * attribute in the first place? + */ if (!pdev->subordinate) return count; @@ -670,7 +674,7 @@ pci_write_config(struct file* filp, struct kobject *kobj, size = dev->cfg_size - off; count = size; } - + pci_config_pm_runtime_get(dev); if ((off & 1) && size) { @@ -678,7 +682,7 @@ pci_write_config(struct file* filp, struct kobject *kobj, off++; size--; } - + if ((off & 3) && size > 2) { u16 val = data[off - init_off]; val |= (u16) data[off - init_off + 1] << 8; @@ -696,7 +700,7 @@ pci_write_config(struct file* filp, struct kobject *kobj, off += 4; size -= 4; } - + if (size >= 2) { u16 val = data[off - init_off]; val |= (u16) data[off - init_off + 1] << 8; @@ -1229,21 +1233,21 @@ pci_read_rom(struct file *filp, struct kobject *kobj, if (!pdev->rom_attr_enabled) return -EINVAL; - + rom = pci_map_rom(pdev, &size); /* size starts out as PCI window size */ if (!rom || !size) return -EIO; - + if (off >= size) count = 0; else { if (off + count > size) count = size - off; - + memcpy_fromio(buf, rom + off, count); } pci_unmap_rom(pdev, rom); - + return count; } diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index b127fbda6fc8..07369f32e8bb 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -198,7 +198,7 @@ static int __pci_bus_find_cap_start(struct pci_bus *bus, } /** - * pci_find_capability - query for devices' capabilities + * pci_find_capability - query for devices' capabilities * @dev: PCI device to query * @cap: capability code * @@ -207,12 +207,12 @@ static int __pci_bus_find_cap_start(struct pci_bus *bus, * device's PCI configuration space or 0 in case the device does not * support it. Possible values for @cap: * - * %PCI_CAP_ID_PM Power Management - * %PCI_CAP_ID_AGP Accelerated Graphics Port - * %PCI_CAP_ID_VPD Vital Product Data - * %PCI_CAP_ID_SLOTID Slot Identification + * %PCI_CAP_ID_PM Power Management + * %PCI_CAP_ID_AGP Accelerated Graphics Port + * %PCI_CAP_ID_VPD Vital Product Data + * %PCI_CAP_ID_SLOTID Slot Identification * %PCI_CAP_ID_MSI Message Signalled Interrupts - * %PCI_CAP_ID_CHSWP CompactPCI HotSwap + * %PCI_CAP_ID_CHSWP CompactPCI HotSwap * %PCI_CAP_ID_PCIX PCI-X * %PCI_CAP_ID_EXP PCI Express */ @@ -228,13 +228,13 @@ int pci_find_capability(struct pci_dev *dev, int cap) } /** - * pci_bus_find_capability - query for devices' capabilities + * pci_bus_find_capability - query for devices' capabilities * @bus: the PCI bus to query * @devfn: PCI device to query * @cap: capability code * * Like pci_find_capability() but works for pci devices that do not have a - * pci_dev structure set up yet. + * pci_dev structure set up yet. * * Returns the address of the requested capability structure within the * device's PCI configuration space or 0 in case the device does not @@ -515,7 +515,7 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) return -EINVAL; /* Validate current state: - * Can enter D0 from any state, but if we can only go deeper + * Can enter D0 from any state, but if we can only go deeper * to sleep if we're already in a low power state */ if (state != PCI_D0 && dev->current_state <= PCI_D3cold @@ -998,7 +998,7 @@ static void pci_restore_config_space(struct pci_dev *pdev) } } -/** +/** * pci_restore_state - Restore the saved state of a PCI device * @dev: - PCI device that we're dealing with */ @@ -1030,7 +1030,7 @@ struct pci_saved_state { * the device saved state. * @dev: PCI device that we're dealing with * - * Rerturn NULL if no state or error. + * Return NULL if no state or error. */ struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev) { @@ -1880,7 +1880,7 @@ int pci_finish_runtime_suspend(struct pci_dev *dev) * pci_dev_run_wake - Check if device can generate run-time wake-up events. * @dev: Device to check. * - * Return true if the device itself is cabable of generating wake-up events + * Return true if the device itself is capable of generating wake-up events * (through the platform or using the native PCIe PME) or if the device supports * PME and one of its upstream bridges can generate wake-up events. */ @@ -2447,7 +2447,7 @@ bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags) switch (pci_pcie_type(pdev)) { /* * PCI/X-to-PCIe bridges are not specifically mentioned by the spec, - * but since their primary inteface is PCI/X, we conservatively + * but since their primary interface is PCI/X, we conservatively * handle them as we would a non-PCIe device. */ case PCI_EXP_TYPE_PCIE_BRIDGE: @@ -2471,7 +2471,7 @@ bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags) /* * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be * implemented by the remaining PCIe types to indicate peer-to-peer - * capabilities, but only when they are part of a multifunciton + * capabilities, but only when they are part of a multifunction * device. The footnote for section 6.12 indicates the specific * PCIe types included here. */ @@ -2486,7 +2486,7 @@ bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags) } /* - * PCIe 3.0, 6.12.1.3 specifies no ACS capabilties are applicable + * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable * to single function devices with the exception of downstream ports. */ return true; @@ -2622,7 +2622,7 @@ void pci_release_region(struct pci_dev *pdev, int bar) * * If @exclusive is set, then the region is marked so that userspace * is explicitly not allowed to map the resource via /dev/mem or - * sysfs MMIO access. + * sysfs MMIO access. * * Returns 0 on success, or %EBUSY on error. A warning * message is also printed on failure. @@ -2634,7 +2634,7 @@ static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_n if (pci_resource_len(pdev, bar) == 0) return 0; - + if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) { if (!request_region(pci_resource_start(pdev, bar), pci_resource_len(pdev, bar), res_name)) @@ -2694,7 +2694,7 @@ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) * * The key difference that _exclusive makes it that userspace is * explicitly not allowed to map the resource via /dev/mem or - * sysfs. + * sysfs. */ int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name) { @@ -2799,7 +2799,7 @@ int pci_request_regions(struct pci_dev *pdev, const char *res_name) * successfully. * * pci_request_regions_exclusive() will mark the region so that - * /dev/mem and the sysfs MMIO access will not be allowed. + * /dev/mem and the sysfs MMIO access will not be allowed. * * Returns 0 on success, or %EBUSY on error. A warning * message is also printed on failure. @@ -2967,7 +2967,7 @@ pci_set_mwi(struct pci_dev *dev) cmd |= PCI_COMMAND_INVALIDATE; pci_write_config_word(dev, PCI_COMMAND, cmd); } - + return 0; } @@ -3292,7 +3292,7 @@ clear: * * NOTE: This causes the caller to sleep for twice the device power transition * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms - * by devault (i.e. unless the @dev's d3_delay field has a different value). + * by default (i.e. unless the @dev's d3_delay field has a different value). * Moreover, only devices in D0 can be reset by this function. */ static int pci_pm_reset(struct pci_dev *dev, int probe) @@ -3341,7 +3341,7 @@ void pci_reset_bridge_secondary_bus(struct pci_dev *dev) pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl); /* * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double - * this to 2ms to ensure that we meet the minium requirement. + * this to 2ms to ensure that we meet the minimum requirement. */ msleep(2); @@ -3998,7 +3998,7 @@ int pcie_set_mps(struct pci_dev *dev, int mps) return -EINVAL; v = ffs(mps) - 8; - if (v > dev->pcie_mpss) + if (v > dev->pcie_mpss) return -EINVAL; v <<= 5; @@ -4165,6 +4165,14 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode, return 0; } +bool pci_device_is_present(struct pci_dev *pdev) +{ + u32 v; + + return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0); +} +EXPORT_SYMBOL_GPL(pci_device_is_present); + #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0}; static DEFINE_SPINLOCK(resource_alignment_lock); diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index 6b3a958e1be6..b2c8881da764 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c @@ -525,7 +525,7 @@ static void handle_error_source(struct pcie_device *aerdev, if (info->severity == AER_CORRECTABLE) { /* - * Correctable error does not need software intevention. + * Correctable error does not need software intervention. * No need to go through error recovery process. */ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 403a44374ed5..f1272dc54de1 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -548,7 +548,7 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev) /* * pcie_aspm_init_link_state: Initiate PCI express link state. - * It is called after the pcie and its children devices are scaned. + * It is called after the pcie and its children devices are scanned. * @pdev: the root port or switch downstream port */ void pcie_aspm_init_link_state(struct pci_dev *pdev) diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c index e56e594ce112..bbc3bdd2b189 100644 --- a/drivers/pci/pcie/pme.c +++ b/drivers/pci/pcie/pme.c @@ -419,8 +419,8 @@ static void pcie_pme_remove(struct pcie_device *srv) static struct pcie_port_service_driver pcie_pme_driver = { .name = "pcie_pme", - .port_type = PCI_EXP_TYPE_ROOT_PORT, - .service = PCIE_PORT_SERVICE_PME, + .port_type = PCI_EXP_TYPE_ROOT_PORT, + .service = PCIE_PORT_SERVICE_PME, .probe = pcie_pme_probe, .suspend = pcie_pme_suspend, diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h index d2eb80aab569..d525548404d6 100644 --- a/drivers/pci/pcie/portdrv.h +++ b/drivers/pci/pcie/portdrv.h @@ -14,7 +14,7 @@ #define PCIE_PORT_DEVICE_MAXSERVICES 4 /* * According to the PCI Express Base Specification 2.0, the indices of - * the MSI-X table entires used by port services must not exceed 31 + * the MSI-X table entries used by port services must not exceed 31 */ #define PCIE_PORT_MAX_MSIX_ENTRIES 32 diff --git a/drivers/pci/pcie/portdrv_bus.c b/drivers/pci/pcie/portdrv_bus.c index 67be55a7f260..87e79a6ffb5a 100644 --- a/drivers/pci/pcie/portdrv_bus.c +++ b/drivers/pci/pcie/portdrv_bus.c @@ -18,8 +18,8 @@ static int pcie_port_bus_match(struct device *dev, struct device_driver *drv); struct bus_type pcie_port_bus_type = { - .name = "pci_express", - .match = pcie_port_bus_match, + .name = "pci_express", + .match = pcie_port_bus_match, }; EXPORT_SYMBOL_GPL(pcie_port_bus_type); diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index 08d131f7815b..0b6e76604068 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c @@ -46,7 +46,7 @@ static void release_pcie_device(struct device *dev) * pcie_port_msix_add_entry - add entry to given array of MSI-X entries * @entries: Array of MSI-X entries * @new_entry: Index of the entry to add to the array - * @nr_entries: Number of entries aleady in the array + * @nr_entries: Number of entries already in the array * * Return value: Position of the added entry in the array */ diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 696caed5fdf5..0d8fdc48e642 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -223,7 +223,6 @@ static int pcie_portdrv_probe(struct pci_dev *dev, static void pcie_portdrv_remove(struct pci_dev *dev) { pcie_port_device_remove(dev); - pci_disable_device(dev); } static int error_detected_iter(struct device *device, void *data) @@ -390,9 +389,9 @@ static struct pci_driver pcie_portdriver = { .probe = pcie_portdrv_probe, .remove = pcie_portdrv_remove, - .err_handler = &pcie_portdrv_err_handler, + .err_handler = &pcie_portdrv_err_handler, - .driver.pm = PCIE_PORTDRV_PM_OPS, + .driver.pm = PCIE_PORTDRV_PM_OPS, }; static int __init dmi_pcie_pme_disable_msi(const struct dmi_system_id *d) @@ -412,7 +411,7 @@ static struct dmi_system_id __initdata pcie_portdrv_dmi_table[] = { .ident = "MSI Wind U-100", .matches = { DMI_MATCH(DMI_SYS_VENDOR, - "MICRO-STAR INTERNATIONAL CO., LTD"), + "MICRO-STAR INTERNATIONAL CO., LTD"), DMI_MATCH(DMI_PRODUCT_NAME, "U-100"), }, }, diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 5e14f5a51357..38e403dddf6e 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -582,7 +582,7 @@ static enum pci_bus_speed agp_speed(int agp3, int agpstat) index = 1; else goto out; - + if (agp3) { index += 2; if (index == 5) @@ -789,7 +789,7 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) } /* Disable MasterAbortMode during probing to avoid reporting - of bus errors (in some architectures) */ + of bus errors (in some architectures) */ pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl); pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT); @@ -1005,7 +1005,7 @@ void set_pcie_hotplug_bridge(struct pci_dev *pdev) * pci_setup_device - fill in class and map information of a device * @dev: the device structure to fill * - * Initialize the device structure with information about the device's + * Initialize the device structure with information about the device's * vendor,class,memory and IO-space addresses,IRQ lines etc. * Called at initialisation of the PCI subsystem and by CardBus services. * Returns 0 on success and negative if unknown type of device (not normal, @@ -1111,7 +1111,7 @@ int pci_setup_device(struct pci_dev *dev) goto bad; /* The PCI-to-PCI bridge spec requires that subtractive decoding (i.e. transparent) bridge must have programming - interface code of 0x01. */ + interface code of 0x01. */ pci_read_irq(dev); dev->transparent = ((dev->class & 0xff) == 1); pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); @@ -1570,7 +1570,7 @@ static void pcie_write_mrrs(struct pci_dev *dev) * subsequent read will verify if the value is acceptable or not. * If the MRRS value provided is not acceptable (e.g., too large), * shrink the value until it is acceptable to the HW. - */ + */ while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) { rc = pcie_set_readrq(dev, mrrs); if (!rc) diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c index cdc7836d7e3d..46d1378f2e9e 100644 --- a/drivers/pci/proc.c +++ b/drivers/pci/proc.c @@ -222,7 +222,7 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd, default: ret = -EINVAL; break; - }; + } return ret; } diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 91490453c229..3a02717473ad 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -9,10 +9,6 @@ * * Init/reset quirks for USB host controllers should be in the * USB quirks file, where their drivers can access reuse it. - * - * The bridge optimization stuff has been removed. If you really - * have a silly BIOS which is unable to set your host bridge right, - * use the PowerTweak utility (see http://powertweak.sourceforge.net). */ #include <linux/types.h> @@ -55,7 +51,7 @@ static void quirk_mellanox_tavor(struct pci_dev *dev) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor); -/* Deal with broken BIOS'es that neglect to enable passive release, +/* Deal with broken BIOSes that neglect to enable passive release, which can cause problems in combination with the 82441FX/PPro MTRRs */ static void quirk_passive_release(struct pci_dev *dev) { @@ -78,11 +74,11 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_p /* The VIA VP2/VP3/MVP3 seem to have some 'features'. There may be a workaround but VIA don't answer queries. If you happen to have good contacts at VIA - ask them for me please -- Alan - - This appears to be BIOS not version dependent. So presumably there is a + ask them for me please -- Alan + + This appears to be BIOS not version dependent. So presumably there is a chipset level fix */ - + static void quirk_isa_dma_hangs(struct pci_dev *dev) { if (!isa_dma_bridge_buggy) { @@ -97,7 +93,7 @@ static void quirk_isa_dma_hangs(struct pci_dev *dev) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_isa_dma_hangs); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596, quirk_isa_dma_hangs); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, quirk_isa_dma_hangs); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, quirk_isa_dma_hangs); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, quirk_isa_dma_hangs); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1, quirk_isa_dma_hangs); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs); @@ -157,10 +153,10 @@ static void quirk_triton(struct pci_dev *dev) pci_pci_problems |= PCIPCI_TRITON; } } -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437, quirk_triton); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437VX, quirk_triton); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439, quirk_triton); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quirk_triton); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437, quirk_triton); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437VX, quirk_triton); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439, quirk_triton); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quirk_triton); /* * VIA Apollo KT133 needs PCI latency patch @@ -171,7 +167,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quir * the info on which Mr Breese based his work. * * Updated based on further information from the site and also on - * information provided by VIA + * information provided by VIA */ static void quirk_vialatency(struct pci_dev *dev) { @@ -179,7 +175,7 @@ static void quirk_vialatency(struct pci_dev *dev) u8 busarb; /* Ok we have a potential problem chipset here. Now see if we have a buggy southbridge */ - + p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, NULL); if (p!=NULL) { /* 0x40 - 0x4f == 686B, 0x10 - 0x2f == 686A; thanks Dan Hollis */ @@ -194,9 +190,9 @@ static void quirk_vialatency(struct pci_dev *dev) if (p->revision < 0x10 || p->revision > 0x12) goto exit; } - + /* - * Ok we have the problem. Now set the PCI master grant to + * Ok we have the problem. Now set the PCI master grant to * occur every master grant. The apparent bug is that under high * PCI load (quite common in Linux of course) you can get data * loss when the CPU is held off the bus for 3 bus master requests @@ -209,7 +205,7 @@ static void quirk_vialatency(struct pci_dev *dev) */ pci_read_config_byte(dev, 0x76, &busarb); - /* Set bit 4 and bi 5 of byte 76 to 0x01 + /* Set bit 4 and bi 5 of byte 76 to 0x01 "Master priority rotation on every PCI master grant */ busarb &= ~(1<<5); busarb |= (1<<4); @@ -252,7 +248,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C576, quirk_vsfx) * that DMA to AGP space. Latency must be set to 0xA and triton * workaround applied too * [Info kindly provided by ALi] - */ + */ static void quirk_alimagik(struct pci_dev *dev) { if ((pci_pci_problems&PCIPCI_ALIMAGIK)==0) { @@ -260,8 +256,8 @@ static void quirk_alimagik(struct pci_dev *dev) pci_pci_problems |= PCIPCI_ALIMAGIK|PCIPCI_TRITON; } } -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1647, quirk_alimagik); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1651, quirk_alimagik); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1647, quirk_alimagik); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1651, quirk_alimagik); /* * Natoma has some interesting boundary conditions with Zoran stuff @@ -274,12 +270,12 @@ static void quirk_natoma(struct pci_dev *dev) pci_pci_problems |= PCIPCI_NATOMA; } } -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_natoma); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_0, quirk_natoma); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_1, quirk_natoma); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0, quirk_natoma); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_1, quirk_natoma); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2, quirk_natoma); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_natoma); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_0, quirk_natoma); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_1, quirk_natoma); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0, quirk_natoma); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_1, quirk_natoma); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2, quirk_natoma); /* * This chip can cause PCI parity errors if config register 0xA0 is read @@ -400,7 +396,7 @@ static void piix4_io_quirk(struct pci_dev *dev, const char *name, unsigned int p /* * For now we only print it out. Eventually we'll want to * reserve it (at least if it's in the 0x1000+ range), but - * let's get enough confirmation reports first. + * let's get enough confirmation reports first. */ base &= -size; dev_info(&dev->dev, "%s PIO at %04x-%04x\n", name, base, base + size - 1); @@ -425,7 +421,7 @@ static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int } /* * For now we only print it out. Eventually we'll want to - * reserve it, but let's get enough confirmation reports first. + * reserve it, but let's get enough confirmation reports first. */ base &= -size; dev_info(&dev->dev, "%s MMIO at %04x-%04x\n", name, base, base + size - 1); @@ -682,7 +678,7 @@ static void quirk_xio2000a(struct pci_dev *dev) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A, quirk_xio2000a); -#ifdef CONFIG_X86_IO_APIC +#ifdef CONFIG_X86_IO_APIC #include <asm/io_apic.h> @@ -696,12 +692,12 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A, static void quirk_via_ioapic(struct pci_dev *dev) { u8 tmp; - + if (nr_ioapics < 1) tmp = 0; /* nothing routed to external APIC */ else tmp = 0x1f; /* all known bits (4-0) routed to external APIC */ - + dev_info(&dev->dev, "%sbling VIA external APIC routing\n", tmp == 0 ? "Disa" : "Ena"); @@ -712,7 +708,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_i DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic); /* - * VIA 8237: Some BIOSs don't set the 'Bypass APIC De-Assert Message' Bit. + * VIA 8237: Some BIOSes don't set the 'Bypass APIC De-Assert Message' Bit. * This leads to doubled level interrupt rates. * Set this bit to get rid of cycle wastage. * Otherwise uncritical. @@ -986,7 +982,7 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, qu static void quirk_disable_pxb(struct pci_dev *pdev) { u16 config; - + if (pdev->revision != 0x04) /* Only C0 requires this */ return; pci_read_config_word(pdev, 0x40, &config); @@ -1094,11 +1090,11 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_e * On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge * is not activated. The myth is that Asus said that they do not want the * users to be irritated by just another PCI Device in the Win98 device - * manager. (see the file prog/hotplug/README.p4b in the lm_sensors + * manager. (see the file prog/hotplug/README.p4b in the lm_sensors * package 2.7.0 for details) * - * The SMBus PCI Device can be activated by setting a bit in the ICH LPC - * bridge. Unfortunately, this device has no subvendor/subdevice ID. So it + * The SMBus PCI Device can be activated by setting a bit in the ICH LPC + * bridge. Unfortunately, this device has no subvendor/subdevice ID. So it * becomes necessary to do this tweak in two steps -- the chosen trigger * is either the Host bridge (preferred) or on-board VGA controller. * @@ -1253,7 +1249,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, asu static void asus_hides_smbus_lpc(struct pci_dev *dev) { u16 val; - + if (likely(!asus_hides_smbus)) return; @@ -1640,8 +1636,8 @@ static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev) dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n", dev->vendor, dev->device); } -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); /* * disable boot interrupts on HT-1000 @@ -1673,8 +1669,8 @@ static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev) dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n", dev->vendor, dev->device); } -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); /* * disable boot interrupts on AMD and ATI chipsets @@ -1730,8 +1726,8 @@ static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev) dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n", dev->vendor, dev->device); } -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt); #endif /* CONFIG_X86_IO_APIC */ /* @@ -2127,8 +2123,8 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8624, quirk_tile_plx_gen1); #ifdef CONFIG_PCI_MSI /* Some chipsets do not support MSI. We cannot easily rely on setting * PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually - * some other busses controlled by the chipset even if Linux is not - * aware of it. Instead of setting the flag on all busses in the + * some other buses controlled by the chipset even if Linux is not + * aware of it. Instead of setting the flag on all buses in the * machine, simply disable MSI globally. */ static void quirk_disable_all_msi(struct pci_dev *dev) @@ -2288,14 +2284,14 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, nvenet_msi_disable); /* - * Some versions of the MCP55 bridge from nvidia have a legacy irq routing - * config register. This register controls the routing of legacy interrupts - * from devices that route through the MCP55. If this register is misprogramed - * interrupts are only sent to the bsp, unlike conventional systems where the - * irq is broadxast to all online cpus. Not having this register set - * properly prevents kdump from booting up properly, so lets make sure that - * we have it set correctly. - * Note this is an undocumented register. + * Some versions of the MCP55 bridge from Nvidia have a legacy IRQ routing + * config register. This register controls the routing of legacy + * interrupts from devices that route through the MCP55. If this register + * is misprogrammed, interrupts are only sent to the BSP, unlike + * conventional systems where the IRQ is broadcast to all online CPUs. Not + * having this register set properly prevents kdump from booting up + * properly, so let's make sure that we have it set correctly. + * Note that this is an undocumented register. */ static void nvbridge_check_legacy_irq_routing(struct pci_dev *dev) { @@ -2626,7 +2622,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0xe091, /* Allow manual resource allocation for PCI hotplug bridges * via pci=hpmemsize=nnM and pci=hpiosize=nnM parameters. For * some PCI-PCI hotplug bridges, like PLX 6254 (former HINT HB6), - * kernel fails to allocate resources when hotplug device is + * kernel fails to allocate resources when hotplug device is * inserted and PCI bus is rescanned. */ static void quirk_hotplug_bridge(struct pci_dev *dev) diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c index 8fc54b7327bc..cc9337a71529 100644 --- a/drivers/pci/remove.c +++ b/drivers/pci/remove.c @@ -7,7 +7,7 @@ static void pci_free_resources(struct pci_dev *dev) { int i; - msi_remove_pci_irq_vectors(dev); + msi_remove_pci_irq_vectors(dev); pci_cleanup_rom(dev); for (i = 0; i < PCI_NUM_RESOURCES; i++) { @@ -24,7 +24,7 @@ static void pci_stop_dev(struct pci_dev *dev) if (dev->is_added) { pci_proc_detach_device(dev); pci_remove_sysfs_dev_files(dev); - device_del(&dev->dev); + device_release_driver(&dev->dev); dev->is_added = 0; } @@ -34,6 +34,8 @@ static void pci_stop_dev(struct pci_dev *dev) static void pci_destroy_dev(struct pci_dev *dev) { + device_del(&dev->dev); + down_write(&pci_bus_sem); list_del(&dev->bus_list); up_write(&pci_bus_sem); diff --git a/drivers/pci/search.c b/drivers/pci/search.c index d0627fa9f368..3ff2ac7c14e2 100644 --- a/drivers/pci/search.c +++ b/drivers/pci/search.c @@ -1,5 +1,5 @@ /* - * PCI searching functions. + * PCI searching functions. * * Copyright (C) 1993 -- 1997 Drew Eckhardt, Frederic Potter, * David Mosberger-Tang @@ -96,12 +96,12 @@ struct pci_bus * pci_find_bus(int domain, int busnr) * pci_find_next_bus - begin or continue searching for a PCI bus * @from: Previous PCI bus found, or %NULL for new search. * - * Iterates through the list of known PCI busses. A new search is + * Iterates through the list of known PCI buses. A new search is * initiated by passing %NULL as the @from argument. Otherwise if * @from is not %NULL, searches continue from next device on the * global list. */ -struct pci_bus * +struct pci_bus * pci_find_next_bus(const struct pci_bus *from) { struct list_head *n; @@ -119,11 +119,11 @@ pci_find_next_bus(const struct pci_bus *from) /** * pci_get_slot - locate PCI device for a given PCI slot * @bus: PCI bus on which desired PCI device resides - * @devfn: encodes number of PCI slot in which the desired PCI - * device resides and the logical device number within that slot + * @devfn: encodes number of PCI slot in which the desired PCI + * device resides and the logical device number within that slot * in case of multi-function devices. * - * Given a PCI bus and slot/function number, the desired PCI device + * Given a PCI bus and slot/function number, the desired PCI device * is located in the list of PCI devices. * If the device is found, its reference count is increased and this * function returns a pointer to its data structure. The caller must diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 4ce83b26ae9e..219a4106480a 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -292,8 +292,8 @@ static void assign_requested_resources_sorted(struct list_head *head, (!(res->flags & IORESOURCE_ROM_ENABLE)))) add_to_list(fail_head, dev_res->dev, res, - 0 /* dont care */, - 0 /* dont care */); + 0 /* don't care */, + 0 /* don't care */); } reset_resource(res); } @@ -667,9 +667,9 @@ static void pci_bridge_check_ranges(struct pci_bus *bus) if (!io) { pci_write_config_word(bridge, PCI_IO_BASE, 0xf0f0); pci_read_config_word(bridge, PCI_IO_BASE, &io); - pci_write_config_word(bridge, PCI_IO_BASE, 0x0); - } - if (io) + pci_write_config_word(bridge, PCI_IO_BASE, 0x0); + } + if (io) b_res[0].flags |= IORESOURCE_IO; /* DECchip 21050 pass 2 errata: the bridge may miss an address disconnect boundary by one PCI data phase. @@ -819,7 +819,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, resource_size_t min_align, align; if (!b_res) - return; + return; min_align = window_alignment(bus, IORESOURCE_IO); list_for_each_entry(dev, &bus->devices, bus_list) { @@ -950,7 +950,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, if (realloc_head && i >= PCI_IOV_RESOURCES && i <= PCI_IOV_RESOURCE_END) { r->end = r->start - 1; - add_to_list(realloc_head, dev, r, r_size, 0/* dont' care */); + add_to_list(realloc_head, dev, r, r_size, 0/* don't care */); children_add_size += r_size; continue; } @@ -1456,8 +1456,8 @@ static enum enable_type pci_realloc_detect(struct pci_bus *bus, /* * first try will not touch pci bridge res - * second and later try will clear small leaf bridge res - * will stop till to the max deepth if can not find good one + * second and later try will clear small leaf bridge res + * will stop till to the max depth if can not find good one */ void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus) { diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 07f2eddc09ce..83c4d3bc47ab 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -159,7 +159,7 @@ resource_size_t __weak pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx) return 0; } -static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev, +static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev, int resno, resource_size_t size) { struct resource *root, *conflict; diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c index c1e9284a677b..448ca562d1f8 100644 --- a/drivers/pci/slot.c +++ b/drivers/pci/slot.c @@ -53,7 +53,7 @@ static ssize_t address_read_file(struct pci_slot *slot, char *buf) static const char *pci_bus_speed_strings[] = { "33 MHz PCI", /* 0x00 */ "66 MHz PCI", /* 0x01 */ - "66 MHz PCI-X", /* 0x02 */ + "66 MHz PCI-X", /* 0x02 */ "100 MHz PCI-X", /* 0x03 */ "133 MHz PCI-X", /* 0x04 */ NULL, /* 0x05 */ diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c index e1c1ec540893..24750a1b39b6 100644 --- a/drivers/pci/syscall.c +++ b/drivers/pci/syscall.c @@ -44,7 +44,7 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn, default: err = -EINVAL; goto error; - }; + } err = -EIO; if (cfg_ret != PCIBIOS_SUCCESSFUL) diff --git a/drivers/pinctrl/pinctrl-abx500.c b/drivers/pinctrl/pinctrl-abx500.c index 4780959e11d4..5183e7bb8de3 100644 --- a/drivers/pinctrl/pinctrl-abx500.c +++ b/drivers/pinctrl/pinctrl-abx500.c @@ -418,7 +418,7 @@ static int abx500_set_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip, ret = abx500_gpio_set_bits(chip, AB8500_GPIO_ALTFUN_REG, af.alt_bit1, - !!(af.alta_val && BIT(0))); + !!(af.alta_val & BIT(0))); if (ret < 0) goto out; @@ -439,7 +439,7 @@ static int abx500_set_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip, goto out; ret = abx500_gpio_set_bits(chip, AB8500_GPIO_ALTFUN_REG, - af.alt_bit1, !!(af.altb_val && BIT(0))); + af.alt_bit1, !!(af.altb_val & BIT(0))); if (ret < 0) goto out; @@ -462,7 +462,7 @@ static int abx500_set_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip, goto out; ret = abx500_gpio_set_bits(chip, AB8500_GPIO_ALTFUN_REG, - af.alt_bit2, !!(af.altc_val && BIT(1))); + af.alt_bit2, !!(af.altc_val & BIT(1))); break; default: diff --git a/drivers/pinctrl/pinctrl-abx500.h b/drivers/pinctrl/pinctrl-abx500.h index eeca8f973999..82293806e842 100644 --- a/drivers/pinctrl/pinctrl-abx500.h +++ b/drivers/pinctrl/pinctrl-abx500.h @@ -1,4 +1,4 @@ -#ifndef PINCTRL_PINCTRL_ABx5O0_H +#ifndef PINCTRL_PINCTRL_ABx500_H #define PINCTRL_PINCTRL_ABx500_H /* Package definitions */ diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index e939c28cbf1f..46dddc159286 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c @@ -504,6 +504,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank, data |= (3 << bit); break; default: + spin_unlock_irqrestore(&bank->slock, flags); dev_err(info->dev, "unsupported pull setting %d\n", pull); return -EINVAL; @@ -1453,8 +1454,8 @@ static int rockchip_pinctrl_probe(struct platform_device *pdev) if (ctrl->type == RK3188) { res = platform_get_resource(pdev, IORESOURCE_MEM, 1); info->reg_pull = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(info->reg_base)) - return PTR_ERR(info->reg_base); + if (IS_ERR(info->reg_pull)) + return PTR_ERR(info->reg_pull); } ret = rockchip_gpiolib_register(pdev, info); diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c index 009174d07767..bc5eb453a45c 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c @@ -3720,7 +3720,7 @@ static void __iomem *r8a7740_pinmux_portcr(struct sh_pfc *pfc, unsigned int pin) const struct r8a7740_portcr_group *group = &r8a7740_portcr_offsets[i]; - if (i <= group->end_pin) + if (pin <= group->end_pin) return pfc->window->virt + group->offset + pin; } diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7372.c b/drivers/pinctrl/sh-pfc/pfc-sh7372.c index 70b522d34821..cc097b693820 100644 --- a/drivers/pinctrl/sh-pfc/pfc-sh7372.c +++ b/drivers/pinctrl/sh-pfc/pfc-sh7372.c @@ -2584,7 +2584,7 @@ static void __iomem *sh7372_pinmux_portcr(struct sh_pfc *pfc, unsigned int pin) const struct sh7372_portcr_group *group = &sh7372_portcr_offsets[i]; - if (i <= group->end_pin) + if (pin <= group->end_pin) return pfc->window->virt + group->offset + pin; } diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig index 69616aeaa966..09fde58b12e0 100644 --- a/drivers/platform/Kconfig +++ b/drivers/platform/Kconfig @@ -5,3 +5,4 @@ if GOLDFISH source "drivers/platform/goldfish/Kconfig" endif +source "drivers/platform/chrome/Kconfig" diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile index 8a44a4cd6d1e..3656b7b17b99 100644 --- a/drivers/platform/Makefile +++ b/drivers/platform/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_X86) += x86/ obj-$(CONFIG_OLPC) += olpc/ obj-$(CONFIG_GOLDFISH) += goldfish/ +obj-$(CONFIG_CHROME_PLATFORMS) += chrome/ diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig new file mode 100644 index 000000000000..b13303e75a34 --- /dev/null +++ b/drivers/platform/chrome/Kconfig @@ -0,0 +1,28 @@ +# +# Platform support for Chrome OS hardware (Chromebooks and Chromeboxes) +# + +menuconfig CHROME_PLATFORMS + bool "Platform support for Chrome hardware" + depends on X86 + ---help--- + Say Y here to get to see options for platform support for + various Chromebooks and Chromeboxes. This option alone does + not add any kernel code. + + If you say N, all options in this submenu will be skipped and disabled. + +if CHROME_PLATFORMS + +config CHROMEOS_LAPTOP + tristate "Chrome OS Laptop" + depends on I2C + depends on DMI + ---help--- + This driver instantiates i2c and smbus devices such as + light sensors and touchpads. + + If you have a supported Chromebook, choose Y or M here. + The module will be called chromeos_laptop. + +endif # CHROMEOS_PLATFORMS diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile new file mode 100644 index 000000000000..015e9195e226 --- /dev/null +++ b/drivers/platform/chrome/Makefile @@ -0,0 +1,2 @@ + +obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o diff --git a/drivers/platform/x86/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c index 3e5b4497a1d0..3e5b4497a1d0 100644 --- a/drivers/platform/x86/chromeos_laptop.c +++ b/drivers/platform/chrome/chromeos_laptop.c diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index b51a7460cc49..d9dcd37b5a52 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -79,17 +79,6 @@ config ASUS_LAPTOP If you have an ACPI-compatible ASUS laptop, say Y or M here. -config CHROMEOS_LAPTOP - tristate "Chrome OS Laptop" - depends on I2C - depends on DMI - ---help--- - This driver instantiates i2c and smbus devices such as - light sensors and touchpads. - - If you have a supported Chromebook, choose Y or M here. - The module will be called chromeos_laptop. - config DELL_LAPTOP tristate "Dell Laptop Extras" depends on X86 diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index 5dbe19324351..f0e6aa407ffb 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -50,7 +50,6 @@ obj-$(CONFIG_INTEL_MID_POWER_BUTTON) += intel_mid_powerbtn.o obj-$(CONFIG_INTEL_OAKTRAIL) += intel_oaktrail.o obj-$(CONFIG_SAMSUNG_Q10) += samsung-q10.o obj-$(CONFIG_APPLE_GMUX) += apple-gmux.o -obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o obj-$(CONFIG_INTEL_RST) += intel-rst.o obj-$(CONFIG_INTEL_SMARTCONNECT) += intel-smartconnect.o diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c index 605a9be55129..b9429fbf1cd8 100644 --- a/drivers/platform/x86/apple-gmux.c +++ b/drivers/platform/x86/apple-gmux.c @@ -519,7 +519,7 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) gmux_data->power_state = VGA_SWITCHEROO_ON; - gmux_data->dhandle = DEVICE_ACPI_HANDLE(&pnp->dev); + gmux_data->dhandle = ACPI_HANDLE(&pnp->dev); if (!gmux_data->dhandle) { pr_err("Cannot find acpi handle for pnp device %s\n", dev_name(&pnp->dev)); diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c index 0e9c169b42f8..594323a926cf 100644 --- a/drivers/platform/x86/asus-laptop.c +++ b/drivers/platform/x86/asus-laptop.c @@ -1494,10 +1494,9 @@ static int asus_input_init(struct asus_laptop *asus) int error; input = input_allocate_device(); - if (!input) { - pr_warn("Unable to allocate input device\n"); + if (!input) return -ENOMEM; - } + input->name = "Asus Laptop extra buttons"; input->phys = ASUS_LAPTOP_FILE "/input0"; input->id.bustype = BUS_HOST; diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index bb77e18b3dd4..c608b1d33f4a 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -21,6 +21,7 @@ #include <linux/err.h> #include <linux/dmi.h> #include <linux/io.h> +#include <linux/rfkill.h> #include <linux/power_supply.h> #include <linux/acpi.h> #include <linux/mm.h> @@ -89,6 +90,13 @@ static struct platform_driver platform_driver = { static struct platform_device *platform_device; static struct backlight_device *dell_backlight_device; +static struct rfkill *wifi_rfkill; +static struct rfkill *bluetooth_rfkill; +static struct rfkill *wwan_rfkill; +static bool force_rfkill; + +module_param(force_rfkill, bool, 0444); +MODULE_PARM_DESC(force_rfkill, "enable rfkill on non whitelisted models"); static const struct dmi_system_id dell_device_table[] __initconst = { { @@ -355,6 +363,108 @@ dell_send_request(struct calling_interface_buffer *buffer, int class, return buffer; } +/* Derived from information in DellWirelessCtl.cpp: + Class 17, select 11 is radio control. It returns an array of 32-bit values. + + Input byte 0 = 0: Wireless information + + result[0]: return code + result[1]: + Bit 0: Hardware switch supported + Bit 1: Wifi locator supported + Bit 2: Wifi is supported + Bit 3: Bluetooth is supported + Bit 4: WWAN is supported + Bit 5: Wireless keyboard supported + Bits 6-7: Reserved + Bit 8: Wifi is installed + Bit 9: Bluetooth is installed + Bit 10: WWAN is installed + Bits 11-15: Reserved + Bit 16: Hardware switch is on + Bit 17: Wifi is blocked + Bit 18: Bluetooth is blocked + Bit 19: WWAN is blocked + Bits 20-31: Reserved + result[2]: NVRAM size in bytes + result[3]: NVRAM format version number + + Input byte 0 = 2: Wireless switch configuration + result[0]: return code + result[1]: + Bit 0: Wifi controlled by switch + Bit 1: Bluetooth controlled by switch + Bit 2: WWAN controlled by switch + Bits 3-6: Reserved + Bit 7: Wireless switch config locked + Bit 8: Wifi locator enabled + Bits 9-14: Reserved + Bit 15: Wifi locator setting locked + Bits 16-31: Reserved +*/ + +static int dell_rfkill_set(void *data, bool blocked) +{ + int disable = blocked ? 1 : 0; + unsigned long radio = (unsigned long)data; + int hwswitch_bit = (unsigned long)data - 1; + + get_buffer(); + dell_send_request(buffer, 17, 11); + + /* If the hardware switch controls this radio, and the hardware + switch is disabled, always disable the radio */ + if ((hwswitch_state & BIT(hwswitch_bit)) && + !(buffer->output[1] & BIT(16))) + disable = 1; + + buffer->input[0] = (1 | (radio<<8) | (disable << 16)); + dell_send_request(buffer, 17, 11); + + release_buffer(); + return 0; +} + +/* Must be called with the buffer held */ +static void dell_rfkill_update_sw_state(struct rfkill *rfkill, int radio, + int status) +{ + if (status & BIT(0)) { + /* Has hw-switch, sync sw_state to BIOS */ + int block = rfkill_blocked(rfkill); + buffer->input[0] = (1 | (radio << 8) | (block << 16)); + dell_send_request(buffer, 17, 11); + } else { + /* No hw-switch, sync BIOS state to sw_state */ + rfkill_set_sw_state(rfkill, !!(status & BIT(radio + 16))); + } +} + +static void dell_rfkill_update_hw_state(struct rfkill *rfkill, int radio, + int status) +{ + if (hwswitch_state & (BIT(radio - 1))) + rfkill_set_hw_state(rfkill, !(status & BIT(16))); +} + +static void dell_rfkill_query(struct rfkill *rfkill, void *data) +{ + int status; + + get_buffer(); + dell_send_request(buffer, 17, 11); + status = buffer->output[1]; + + dell_rfkill_update_hw_state(rfkill, (unsigned long)data, status); + + release_buffer(); +} + +static const struct rfkill_ops dell_rfkill_ops = { + .set_block = dell_rfkill_set, + .query = dell_rfkill_query, +}; + static struct dentry *dell_laptop_dir; static int dell_debugfs_show(struct seq_file *s, void *data) @@ -424,6 +534,136 @@ static const struct file_operations dell_debugfs_fops = { .release = single_release, }; +static void dell_update_rfkill(struct work_struct *ignored) +{ + int status; + + get_buffer(); + dell_send_request(buffer, 17, 11); + status = buffer->output[1]; + + if (wifi_rfkill) { + dell_rfkill_update_hw_state(wifi_rfkill, 1, status); + dell_rfkill_update_sw_state(wifi_rfkill, 1, status); + } + if (bluetooth_rfkill) { + dell_rfkill_update_hw_state(bluetooth_rfkill, 2, status); + dell_rfkill_update_sw_state(bluetooth_rfkill, 2, status); + } + if (wwan_rfkill) { + dell_rfkill_update_hw_state(wwan_rfkill, 3, status); + dell_rfkill_update_sw_state(wwan_rfkill, 3, status); + } + + release_buffer(); +} +static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill); + + +static int __init dell_setup_rfkill(void) +{ + int status; + int ret; + const char *product; + + /* + * rfkill causes trouble on various non Latitudes, according to Dell + * actually testing the rfkill functionality is only done on Latitudes. + */ + product = dmi_get_system_info(DMI_PRODUCT_NAME); + if (!force_rfkill && (!product || strncmp(product, "Latitude", 8))) + return 0; + + get_buffer(); + dell_send_request(buffer, 17, 11); + status = buffer->output[1]; + buffer->input[0] = 0x2; + dell_send_request(buffer, 17, 11); + hwswitch_state = buffer->output[1]; + release_buffer(); + + if (!(status & BIT(0))) { + if (force_rfkill) { + /* No hwsitch, clear all hw-controlled bits */ + hwswitch_state &= ~7; + } else { + /* rfkill is only tested on laptops with a hwswitch */ + return 0; + } + } + + if ((status & (1<<2|1<<8)) == (1<<2|1<<8)) { + wifi_rfkill = rfkill_alloc("dell-wifi", &platform_device->dev, + RFKILL_TYPE_WLAN, + &dell_rfkill_ops, (void *) 1); + if (!wifi_rfkill) { + ret = -ENOMEM; + goto err_wifi; + } + ret = rfkill_register(wifi_rfkill); + if (ret) + goto err_wifi; + } + + if ((status & (1<<3|1<<9)) == (1<<3|1<<9)) { + bluetooth_rfkill = rfkill_alloc("dell-bluetooth", + &platform_device->dev, + RFKILL_TYPE_BLUETOOTH, + &dell_rfkill_ops, (void *) 2); + if (!bluetooth_rfkill) { + ret = -ENOMEM; + goto err_bluetooth; + } + ret = rfkill_register(bluetooth_rfkill); + if (ret) + goto err_bluetooth; + } + + if ((status & (1<<4|1<<10)) == (1<<4|1<<10)) { + wwan_rfkill = rfkill_alloc("dell-wwan", + &platform_device->dev, + RFKILL_TYPE_WWAN, + &dell_rfkill_ops, (void *) 3); + if (!wwan_rfkill) { + ret = -ENOMEM; + goto err_wwan; + } + ret = rfkill_register(wwan_rfkill); + if (ret) + goto err_wwan; + } + + return 0; +err_wwan: + rfkill_destroy(wwan_rfkill); + if (bluetooth_rfkill) + rfkill_unregister(bluetooth_rfkill); +err_bluetooth: + rfkill_destroy(bluetooth_rfkill); + if (wifi_rfkill) + rfkill_unregister(wifi_rfkill); +err_wifi: + rfkill_destroy(wifi_rfkill); + + return ret; +} + +static void dell_cleanup_rfkill(void) +{ + if (wifi_rfkill) { + rfkill_unregister(wifi_rfkill); + rfkill_destroy(wifi_rfkill); + } + if (bluetooth_rfkill) { + rfkill_unregister(bluetooth_rfkill); + rfkill_destroy(bluetooth_rfkill); + } + if (wwan_rfkill) { + rfkill_unregister(wwan_rfkill); + rfkill_destroy(wwan_rfkill); + } +} + static int dell_send_intensity(struct backlight_device *bd) { int ret = 0; @@ -515,6 +755,30 @@ static void touchpad_led_exit(void) led_classdev_unregister(&touchpad_led); } +static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str, + struct serio *port) +{ + static bool extended; + + if (str & 0x20) + return false; + + if (unlikely(data == 0xe0)) { + extended = true; + return false; + } else if (unlikely(extended)) { + switch (data) { + case 0x8: + schedule_delayed_work(&dell_rfkill_work, + round_jiffies_relative(HZ / 4)); + break; + } + extended = false; + } + + return false; +} + static int __init dell_init(void) { int max_intensity = 0; @@ -557,10 +821,26 @@ static int __init dell_init(void) } buffer = page_address(bufferpage); + ret = dell_setup_rfkill(); + + if (ret) { + pr_warn("Unable to setup rfkill\n"); + goto fail_rfkill; + } + + ret = i8042_install_filter(dell_laptop_i8042_filter); + if (ret) { + pr_warn("Unable to install key filter\n"); + goto fail_filter; + } + if (quirks && quirks->touchpad_led) touchpad_led_init(&platform_device->dev); dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL); + if (dell_laptop_dir != NULL) + debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL, + &dell_debugfs_fops); #ifdef CONFIG_ACPI /* In the event of an ACPI backlight being available, don't @@ -603,6 +883,11 @@ static int __init dell_init(void) return 0; fail_backlight: + i8042_remove_filter(dell_laptop_i8042_filter); + cancel_delayed_work_sync(&dell_rfkill_work); +fail_filter: + dell_cleanup_rfkill(); +fail_rfkill: free_page((unsigned long)bufferpage); fail_buffer: platform_device_del(platform_device); @@ -620,7 +905,10 @@ static void __exit dell_exit(void) debugfs_remove_recursive(dell_laptop_dir); if (quirks && quirks->touchpad_led) touchpad_led_exit(); + i8042_remove_filter(dell_laptop_i8042_filter); + cancel_delayed_work_sync(&dell_rfkill_work); backlight_device_unregister(dell_backlight_device); + dell_cleanup_rfkill(); if (platform_device) { platform_device_unregister(platform_device); platform_driver_unregister(&platform_driver); diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c index fa9a2171cc13..60e0900bc117 100644 --- a/drivers/platform/x86/dell-wmi.c +++ b/drivers/platform/x86/dell-wmi.c @@ -130,7 +130,8 @@ static const u16 bios_to_linux_keycode[256] __initconst = { KEY_BRIGHTNESSUP, KEY_UNKNOWN, KEY_KBDILLUMTOGGLE, KEY_UNKNOWN, KEY_SWITCHVIDEOMODE, KEY_UNKNOWN, KEY_UNKNOWN, KEY_SWITCHVIDEOMODE, KEY_UNKNOWN, KEY_UNKNOWN, KEY_PROG2, - KEY_UNKNOWN, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, + KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_MICMUTE, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -139,8 +140,8 @@ static const u16 bios_to_linux_keycode[256] __initconst = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - KEY_PROG3 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, KEY_PROG3 }; static struct input_dev *dell_wmi_input_dev; diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c index aefcc32e5634..dec68e7a99c7 100644 --- a/drivers/platform/x86/eeepc-laptop.c +++ b/drivers/platform/x86/eeepc-laptop.c @@ -1203,10 +1203,8 @@ static int eeepc_input_init(struct eeepc_laptop *eeepc) int error; input = input_allocate_device(); - if (!input) { - pr_info("Unable to allocate input device\n"); + if (!input) return -ENOMEM; - } input->name = "Asus EeePC extra buttons"; input->phys = EEEPC_LAPTOP_FILE "/input0"; diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 1c86fa0857c8..8ba8956b5a48 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -54,6 +54,7 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4"); #define HPWMI_HARDWARE_QUERY 0x4 #define HPWMI_WIRELESS_QUERY 0x5 #define HPWMI_HOTKEY_QUERY 0xc +#define HPWMI_FEATURE_QUERY 0xd #define HPWMI_WIRELESS2_QUERY 0x1b #define HPWMI_POSTCODEERROR_QUERY 0x2a @@ -292,6 +293,17 @@ static int hp_wmi_tablet_state(void) return (state & 0x4) ? 1 : 0; } +static int hp_wmi_bios_2009_later(void) +{ + int state = 0; + int ret = hp_wmi_perform_query(HPWMI_FEATURE_QUERY, 0, &state, + sizeof(state), sizeof(state)); + if (ret) + return ret; + + return (state & 0x10) ? 1 : 0; +} + static int hp_wmi_set_block(void *data, bool blocked) { enum hp_wmi_radio r = (enum hp_wmi_radio) data; @@ -871,7 +883,7 @@ static int __init hp_wmi_bios_setup(struct platform_device *device) gps_rfkill = NULL; rfkill2_count = 0; - if (hp_wmi_rfkill_setup(device)) + if (hp_wmi_bios_2009_later() || hp_wmi_rfkill_setup(device)) hp_wmi_rfkill2_setup(device); err = device_create_file(&device->dev, &dev_attr_display); diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index 6788acc22ab9..19ec95147f69 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -570,10 +570,8 @@ static int ideapad_input_init(struct ideapad_private *priv) int error; inputdev = input_allocate_device(); - if (!inputdev) { - pr_info("Unable to allocate input device\n"); + if (!inputdev) return -ENOMEM; - } inputdev->name = "Ideapad extra buttons"; inputdev->phys = "ideapad/input0"; diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c index 6b18aba82cfa..8d6775266d66 100644 --- a/drivers/platform/x86/intel_mid_powerbtn.c +++ b/drivers/platform/x86/intel_mid_powerbtn.c @@ -66,10 +66,8 @@ static int mfld_pb_probe(struct platform_device *pdev) return -EINVAL; input = input_allocate_device(); - if (!input) { - dev_err(&pdev->dev, "Input device allocation error\n"); + if (!input) return -ENOMEM; - } input->name = pdev->name; input->phys = "power-button/input0"; diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c index d654f831410d..60ea476a9130 100644 --- a/drivers/platform/x86/intel_scu_ipc.c +++ b/drivers/platform/x86/intel_scu_ipc.c @@ -58,12 +58,56 @@ * message handler is called within firmware. */ -#define IPC_BASE_ADDR 0xFF11C000 /* IPC1 base register address */ -#define IPC_MAX_ADDR 0x100 /* Maximum IPC regisers */ #define IPC_WWBUF_SIZE 20 /* IPC Write buffer Size */ #define IPC_RWBUF_SIZE 20 /* IPC Read buffer Size */ -#define IPC_I2C_BASE 0xFF12B000 /* I2C control register base address */ -#define IPC_I2C_MAX_ADDR 0x10 /* Maximum I2C regisers */ +#define IPC_IOC 0x100 /* IPC command register IOC bit */ + +enum { + SCU_IPC_LINCROFT, + SCU_IPC_PENWELL, + SCU_IPC_CLOVERVIEW, + SCU_IPC_TANGIER, +}; + +/* intel scu ipc driver data*/ +struct intel_scu_ipc_pdata_t { + u32 ipc_base; + u32 i2c_base; + u32 ipc_len; + u32 i2c_len; + u8 irq_mode; +}; + +static struct intel_scu_ipc_pdata_t intel_scu_ipc_pdata[] = { + [SCU_IPC_LINCROFT] = { + .ipc_base = 0xff11c000, + .i2c_base = 0xff12b000, + .ipc_len = 0x100, + .i2c_len = 0x10, + .irq_mode = 0, + }, + [SCU_IPC_PENWELL] = { + .ipc_base = 0xff11c000, + .i2c_base = 0xff12b000, + .ipc_len = 0x100, + .i2c_len = 0x10, + .irq_mode = 1, + }, + [SCU_IPC_CLOVERVIEW] = { + .ipc_base = 0xff11c000, + .i2c_base = 0xff12b000, + .ipc_len = 0x100, + .i2c_len = 0x10, + .irq_mode = 1, + }, + [SCU_IPC_TANGIER] = { + .ipc_base = 0xff009000, + .i2c_base = 0xff00d000, + .ipc_len = 0x100, + .i2c_len = 0x10, + .irq_mode = 0, + }, +}; static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id); static void ipc_remove(struct pci_dev *pdev); @@ -72,6 +116,8 @@ struct intel_scu_ipc_dev { struct pci_dev *pdev; void __iomem *ipc_base; void __iomem *i2c_base; + struct completion cmd_complete; + u8 irq_mode; }; static struct intel_scu_ipc_dev ipcdev; /* Only one for now */ @@ -98,6 +144,10 @@ static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */ */ static inline void ipc_command(u32 cmd) /* Send ipc command */ { + if (ipcdev.irq_mode) { + reinit_completion(&ipcdev.cmd_complete); + writel(cmd | IPC_IOC, ipcdev.ipc_base); + } writel(cmd, ipcdev.ipc_base); } @@ -156,6 +206,30 @@ static inline int busy_loop(void) /* Wait till scu status is busy */ return 0; } +/* Wait till ipc ioc interrupt is received or timeout in 3 HZ */ +static inline int ipc_wait_for_interrupt(void) +{ + int status; + + if (!wait_for_completion_timeout(&ipcdev.cmd_complete, 3 * HZ)) { + struct device *dev = &ipcdev.pdev->dev; + dev_err(dev, "IPC timed out\n"); + return -ETIMEDOUT; + } + + status = ipc_read_status(); + + if ((status >> 1) & 1) + return -EIO; + + return 0; +} + +int intel_scu_ipc_check_status(void) +{ + return ipcdev.irq_mode ? ipc_wait_for_interrupt() : busy_loop(); +} + /* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */ static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id) { @@ -196,8 +270,8 @@ static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id) ipc_command(4 << 16 | id << 12 | 0 << 8 | op); } - err = busy_loop(); - if (id == IPC_CMD_PCNTRL_R) { /* Read rbuf */ + err = intel_scu_ipc_check_status(); + if (!err && id == IPC_CMD_PCNTRL_R) { /* Read rbuf */ /* Workaround: values are read as 0 without memcpy_fromio */ memcpy_fromio(cbuf, ipcdev.ipc_base + 0x90, 16); for (nc = 0; nc < count; nc++) @@ -391,7 +465,7 @@ int intel_scu_ipc_simple_command(int cmd, int sub) return -ENODEV; } ipc_command(sub << 12 | cmd); - err = busy_loop(); + err = intel_scu_ipc_check_status(); mutex_unlock(&ipclock); return err; } @@ -425,10 +499,12 @@ int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen, ipc_data_writel(*in++, 4 * i); ipc_command((inlen << 16) | (sub << 12) | cmd); - err = busy_loop(); + err = intel_scu_ipc_check_status(); - for (i = 0; i < outlen; i++) - *out++ = ipc_data_readl(4 * i); + if (!err) { + for (i = 0; i < outlen; i++) + *out++ = ipc_data_readl(4 * i); + } mutex_unlock(&ipclock); return err; @@ -491,6 +567,9 @@ EXPORT_SYMBOL(intel_scu_ipc_i2c_cntrl); */ static irqreturn_t ioc(int irq, void *dev_id) { + if (ipcdev.irq_mode) + complete(&ipcdev.cmd_complete); + return IRQ_HANDLED; } @@ -504,13 +583,18 @@ static irqreturn_t ioc(int irq, void *dev_id) */ static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id) { - int err; + int err, pid; + struct intel_scu_ipc_pdata_t *pdata; resource_size_t pci_resource; if (ipcdev.pdev) /* We support only one SCU */ return -EBUSY; + pid = id->driver_data; + pdata = &intel_scu_ipc_pdata[pid]; + ipcdev.pdev = pci_dev_get(dev); + ipcdev.irq_mode = pdata->irq_mode; err = pci_enable_device(dev); if (err) @@ -524,14 +608,16 @@ static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id) if (!pci_resource) return -ENOMEM; + init_completion(&ipcdev.cmd_complete); + if (request_irq(dev->irq, ioc, 0, "intel_scu_ipc", &ipcdev)) return -EBUSY; - ipcdev.ipc_base = ioremap_nocache(IPC_BASE_ADDR, IPC_MAX_ADDR); + ipcdev.ipc_base = ioremap_nocache(pdata->ipc_base, pdata->ipc_len); if (!ipcdev.ipc_base) return -ENOMEM; - ipcdev.i2c_base = ioremap_nocache(IPC_I2C_BASE, IPC_I2C_MAX_ADDR); + ipcdev.i2c_base = ioremap_nocache(pdata->i2c_base, pdata->i2c_len); if (!ipcdev.i2c_base) { iounmap(ipcdev.ipc_base); return -ENOMEM; @@ -564,7 +650,10 @@ static void ipc_remove(struct pci_dev *pdev) } static DEFINE_PCI_DEVICE_TABLE(pci_ids) = { - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x082a)}, + {PCI_VDEVICE(INTEL, 0x082a), SCU_IPC_LINCROFT}, + {PCI_VDEVICE(INTEL, 0x080e), SCU_IPC_PENWELL}, + {PCI_VDEVICE(INTEL, 0x08ea), SCU_IPC_CLOVERVIEW}, + {PCI_VDEVICE(INTEL, 0x11a0), SCU_IPC_TANGIER}, { 0,} }; MODULE_DEVICE_TABLE(pci, pci_ids); diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c index 10d12b221601..3008fd20572e 100644 --- a/drivers/platform/x86/panasonic-laptop.c +++ b/drivers/platform/x86/panasonic-laptop.c @@ -490,11 +490,8 @@ static int acpi_pcc_init_input(struct pcc_acpi *pcc) int error; input_dev = input_allocate_device(); - if (!input_dev) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Couldn't allocate input device for hotkey")); + if (!input_dev) return -ENOMEM; - } input_dev->name = ACPI_PCC_DRIVER_NAME; input_dev->phys = ACPI_PCC_INPUT_PHYS; diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index 47caab0ea7a1..fb233ae7bb0e 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c @@ -140,12 +140,12 @@ MODULE_PARM_DESC(kbd_backlight_timeout, "on the model (default: no change from current value)"); #ifdef CONFIG_PM_SLEEP -static void sony_nc_kbd_backlight_resume(void); static void sony_nc_thermal_resume(void); #endif static int sony_nc_kbd_backlight_setup(struct platform_device *pd, unsigned int handle); -static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd); +static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd, + unsigned int handle); static int sony_nc_battery_care_setup(struct platform_device *pd, unsigned int handle); @@ -304,8 +304,8 @@ static int sony_laptop_input_keycode_map[] = { KEY_FN_F10, /* 14 SONYPI_EVENT_FNKEY_F10 */ KEY_FN_F11, /* 15 SONYPI_EVENT_FNKEY_F11 */ KEY_FN_F12, /* 16 SONYPI_EVENT_FNKEY_F12 */ - KEY_FN_F1, /* 17 SONYPI_EVENT_FNKEY_1 */ - KEY_FN_F2, /* 18 SONYPI_EVENT_FNKEY_2 */ + KEY_FN_1, /* 17 SONYPI_EVENT_FNKEY_1 */ + KEY_FN_2, /* 18 SONYPI_EVENT_FNKEY_2 */ KEY_FN_D, /* 19 SONYPI_EVENT_FNKEY_D */ KEY_FN_E, /* 20 SONYPI_EVENT_FNKEY_E */ KEY_FN_F, /* 21 SONYPI_EVENT_FNKEY_F */ @@ -1444,7 +1444,7 @@ static void sony_nc_function_cleanup(struct platform_device *pd) case 0x014b: case 0x014c: case 0x0163: - sony_nc_kbd_backlight_cleanup(pd); + sony_nc_kbd_backlight_cleanup(pd, handle); break; default: continue; @@ -1486,13 +1486,6 @@ static void sony_nc_function_resume(void) case 0x0135: sony_nc_rfkill_update(); break; - case 0x0137: - case 0x0143: - case 0x014b: - case 0x014c: - case 0x0163: - sony_nc_kbd_backlight_resume(); - break; default: continue; } @@ -1822,6 +1815,12 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd, int result; int ret = 0; + if (kbdbl_ctl) { + pr_warn("handle 0x%.4x: keyboard backlight setup already done for 0x%.4x\n", + handle, kbdbl_ctl->handle); + return -EBUSY; + } + /* verify the kbd backlight presence, these handles are not used for * keyboard backlight only */ @@ -1881,9 +1880,10 @@ outkzalloc: return ret; } -static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd) +static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd, + unsigned int handle) { - if (kbdbl_ctl) { + if (kbdbl_ctl && handle == kbdbl_ctl->handle) { device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr); device_remove_file(&pd->dev, &kbdbl_ctl->timeout_attr); kfree(kbdbl_ctl); @@ -1891,25 +1891,6 @@ static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd) } } -#ifdef CONFIG_PM_SLEEP -static void sony_nc_kbd_backlight_resume(void) -{ - int ignore = 0; - - if (!kbdbl_ctl) - return; - - if (kbdbl_ctl->mode == 0) - sony_call_snc_handle(kbdbl_ctl->handle, kbdbl_ctl->base, - &ignore); - - if (kbdbl_ctl->timeout != 0) - sony_call_snc_handle(kbdbl_ctl->handle, - (kbdbl_ctl->base + 0x200) | - (kbdbl_ctl->timeout << 0x10), &ignore); -} -#endif - struct battery_care_control { struct device_attribute attrs[2]; unsigned int handle; diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 05e046aa5e31..58b0274d24cc 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -6438,7 +6438,12 @@ static struct ibm_struct brightness_driver_data = { #define TPACPI_ALSA_SHRTNAME "ThinkPad Console Audio Control" #define TPACPI_ALSA_MIXERNAME TPACPI_ALSA_SHRTNAME -static int alsa_index = ~((1 << (SNDRV_CARDS - 3)) - 1); /* last three slots */ +#if SNDRV_CARDS <= 32 +#define DEFAULT_ALSA_IDX ~((1 << (SNDRV_CARDS - 3)) - 1) +#else +#define DEFAULT_ALSA_IDX ~((1 << (32 - 3)) - 1) +#endif +static int alsa_index = DEFAULT_ALSA_IDX; /* last three slots */ static char *alsa_id = "ThinkPadEC"; static bool alsa_enable = SNDRV_DEFAULT_ENABLE1; @@ -9163,7 +9168,6 @@ static int __init thinkpad_acpi_module_init(void) mutex_init(&tpacpi_inputdev_send_mutex); tpacpi_inputdev = input_allocate_device(); if (!tpacpi_inputdev) { - pr_err("unable to allocate input device\n"); thinkpad_acpi_module_exit(); return -ENOMEM; } else { diff --git a/drivers/platform/x86/topstar-laptop.c b/drivers/platform/x86/topstar-laptop.c index 67897c8740ba..e597de05e6c2 100644 --- a/drivers/platform/x86/topstar-laptop.c +++ b/drivers/platform/x86/topstar-laptop.c @@ -97,10 +97,8 @@ static int acpi_topstar_init_hkey(struct topstar_hkey *hkey) int error; input = input_allocate_device(); - if (!input) { - pr_err("Unable to allocate input device\n"); + if (!input) return -ENOMEM; - } input->name = "Topstar Laptop extra buttons"; input->phys = "topstar/input0"; diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 0cfadb65f7c6..7fce391818d3 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -975,10 +975,8 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev) u32 hci_result; dev->hotkey_dev = input_allocate_device(); - if (!dev->hotkey_dev) { - pr_info("Unable to register input device\n"); + if (!dev->hotkey_dev) return -ENOMEM; - } dev->hotkey_dev->name = "Toshiba input device"; dev->hotkey_dev->phys = "toshiba_acpi/input0"; diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index 62e8c221d01e..c2e7b2657aeb 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c @@ -672,8 +672,10 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, struct wmi_block *wblock; wblock = dev_get_drvdata(dev); - if (!wblock) - return -ENOMEM; + if (!wblock) { + strcat(buf, "\n"); + return strlen(buf); + } wmi_gtoa(wblock->gblock.guid, guid_string); diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c index 6936e0acedcd..f748cc8cbb03 100644 --- a/drivers/pnp/driver.c +++ b/drivers/pnp/driver.c @@ -197,6 +197,11 @@ static int pnp_bus_freeze(struct device *dev) return __pnp_bus_suspend(dev, PMSG_FREEZE); } +static int pnp_bus_poweroff(struct device *dev) +{ + return __pnp_bus_suspend(dev, PMSG_HIBERNATE); +} + static int pnp_bus_resume(struct device *dev) { struct pnp_dev *pnp_dev = to_pnp_dev(dev); @@ -234,9 +239,14 @@ static int pnp_bus_resume(struct device *dev) } static const struct dev_pm_ops pnp_bus_dev_pm_ops = { + /* Suspend callbacks */ .suspend = pnp_bus_suspend, - .freeze = pnp_bus_freeze, .resume = pnp_bus_resume, + /* Hibernate callbacks */ + .freeze = pnp_bus_freeze, + .thaw = pnp_bus_resume, + .poweroff = pnp_bus_poweroff, + .restore = pnp_bus_resume, }; struct bus_type pnp_bus_type = { diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c index 747826d99059..14655a0f0431 100644 --- a/drivers/pnp/pnpacpi/core.c +++ b/drivers/pnp/pnpacpi/core.c @@ -89,7 +89,7 @@ static int pnpacpi_set_resources(struct pnp_dev *dev) pnp_dbg(&dev->dev, "set resources\n"); - handle = DEVICE_ACPI_HANDLE(&dev->dev); + handle = ACPI_HANDLE(&dev->dev); if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); return -ENODEV; @@ -122,7 +122,7 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev) dev_dbg(&dev->dev, "disable resources\n"); - handle = DEVICE_ACPI_HANDLE(&dev->dev); + handle = ACPI_HANDLE(&dev->dev); if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); return 0; @@ -144,7 +144,7 @@ static bool pnpacpi_can_wakeup(struct pnp_dev *dev) struct acpi_device *acpi_dev; acpi_handle handle; - handle = DEVICE_ACPI_HANDLE(&dev->dev); + handle = ACPI_HANDLE(&dev->dev); if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); return false; @@ -159,7 +159,7 @@ static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state) acpi_handle handle; int error = 0; - handle = DEVICE_ACPI_HANDLE(&dev->dev); + handle = ACPI_HANDLE(&dev->dev); if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); return 0; @@ -194,7 +194,7 @@ static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state) static int pnpacpi_resume(struct pnp_dev *dev) { struct acpi_device *acpi_dev; - acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); + acpi_handle handle = ACPI_HANDLE(&dev->dev); int error = 0; if (!handle || acpi_bus_get_device(handle, &acpi_dev)) { diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c index 8d0fe431dbdd..84419af16f77 100644 --- a/drivers/powercap/powercap_sys.c +++ b/drivers/powercap/powercap_sys.c @@ -377,9 +377,14 @@ static void create_power_zone_common_attributes( if (power_zone->ops->get_max_energy_range_uj) power_zone->zone_dev_attrs[count++] = &dev_attr_max_energy_range_uj.attr; - if (power_zone->ops->get_energy_uj) + if (power_zone->ops->get_energy_uj) { + if (power_zone->ops->reset_energy_uj) + dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO; + else + dev_attr_energy_uj.attr.mode = S_IRUGO; power_zone->zone_dev_attrs[count++] = &dev_attr_energy_uj.attr; + } if (power_zone->ops->get_power_uw) power_zone->zone_dev_attrs[count++] = &dev_attr_power_uw.attr; diff --git a/drivers/regulator/arizona-micsupp.c b/drivers/regulator/arizona-micsupp.c index 724706a97dc4..fd3154d86901 100644 --- a/drivers/regulator/arizona-micsupp.c +++ b/drivers/regulator/arizona-micsupp.c @@ -174,6 +174,33 @@ static const struct regulator_desc arizona_micsupp = { .owner = THIS_MODULE, }; +static const struct regulator_linear_range arizona_micsupp_ext_ranges[] = { + REGULATOR_LINEAR_RANGE(900000, 0, 0x14, 25000), + REGULATOR_LINEAR_RANGE(1500000, 0x15, 0x27, 100000), +}; + +static const struct regulator_desc arizona_micsupp_ext = { + .name = "MICVDD", + .supply_name = "CPVDD", + .type = REGULATOR_VOLTAGE, + .n_voltages = 40, + .ops = &arizona_micsupp_ops, + + .vsel_reg = ARIZONA_LDO2_CONTROL_1, + .vsel_mask = ARIZONA_LDO2_VSEL_MASK, + .enable_reg = ARIZONA_MIC_CHARGE_PUMP_1, + .enable_mask = ARIZONA_CPMIC_ENA, + .bypass_reg = ARIZONA_MIC_CHARGE_PUMP_1, + .bypass_mask = ARIZONA_CPMIC_BYPASS, + + .linear_ranges = arizona_micsupp_ext_ranges, + .n_linear_ranges = ARRAY_SIZE(arizona_micsupp_ext_ranges), + + .enable_time = 3000, + + .owner = THIS_MODULE, +}; + static const struct regulator_init_data arizona_micsupp_default = { .constraints = { .valid_ops_mask = REGULATOR_CHANGE_STATUS | @@ -186,9 +213,22 @@ static const struct regulator_init_data arizona_micsupp_default = { .num_consumer_supplies = 1, }; +static const struct regulator_init_data arizona_micsupp_ext_default = { + .constraints = { + .valid_ops_mask = REGULATOR_CHANGE_STATUS | + REGULATOR_CHANGE_VOLTAGE | + REGULATOR_CHANGE_BYPASS, + .min_uV = 900000, + .max_uV = 3300000, + }, + + .num_consumer_supplies = 1, +}; + static int arizona_micsupp_probe(struct platform_device *pdev) { struct arizona *arizona = dev_get_drvdata(pdev->dev.parent); + const struct regulator_desc *desc; struct regulator_config config = { }; struct arizona_micsupp *micsupp; int ret; @@ -207,7 +247,17 @@ static int arizona_micsupp_probe(struct platform_device *pdev) * default init_data for it. This will be overridden with * platform data if provided. */ - micsupp->init_data = arizona_micsupp_default; + switch (arizona->type) { + case WM5110: + desc = &arizona_micsupp_ext; + micsupp->init_data = arizona_micsupp_ext_default; + break; + default: + desc = &arizona_micsupp; + micsupp->init_data = arizona_micsupp_default; + break; + } + micsupp->init_data.consumer_supplies = &micsupp->supply; micsupp->supply.supply = "MICVDD"; micsupp->supply.dev_name = dev_name(arizona->dev); @@ -226,7 +276,7 @@ static int arizona_micsupp_probe(struct platform_device *pdev) ARIZONA_CPMIC_BYPASS, 0); micsupp->regulator = devm_regulator_register(&pdev->dev, - &arizona_micsupp, + desc, &config); if (IS_ERR(micsupp->regulator)) { ret = PTR_ERR(micsupp->regulator); diff --git a/drivers/regulator/as3722-regulator.c b/drivers/regulator/as3722-regulator.c index 5917fe3dc983..b9f1d24c6812 100644 --- a/drivers/regulator/as3722-regulator.c +++ b/drivers/regulator/as3722-regulator.c @@ -590,8 +590,8 @@ static int as3722_sd016_set_current_limit(struct regulator_dev *rdev, default: return -EINVAL; } + ret <<= ffs(mask) - 1; val = ret & mask; - val <<= ffs(mask) - 1; return as3722_update_bits(as3722, reg, mask, val); } diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 6382f0af353b..d85f31385b24 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -119,6 +119,11 @@ static const char *rdev_get_name(struct regulator_dev *rdev) return ""; } +static bool have_full_constraints(void) +{ + return has_full_constraints || of_have_populated_dt(); +} + /** * of_get_regulator - get a regulator device node based on supply name * @dev: Device pointer for the consumer (of regulator) device @@ -1340,7 +1345,7 @@ static struct regulator *_regulator_get(struct device *dev, const char *id, * Assume that a regulator is physically present and enabled * even if it isn't hooked up and just provide a dummy. */ - if (has_full_constraints && allow_dummy) { + if (have_full_constraints() && allow_dummy) { pr_warn("%s supply %s not found, using dummy regulator\n", devname, id); @@ -2184,6 +2189,9 @@ int regulator_list_voltage(struct regulator *regulator, unsigned selector) struct regulator_ops *ops = rdev->desc->ops; int ret; + if (rdev->desc->fixed_uV && rdev->desc->n_voltages == 1 && !selector) + return rdev->desc->fixed_uV; + if (!ops->list_voltage || selector >= rdev->desc->n_voltages) return -EINVAL; @@ -3624,7 +3632,7 @@ int regulator_suspend_finish(void) if (error) ret = error; } else { - if (!has_full_constraints) + if (!have_full_constraints()) goto unlock; if (!ops->disable) goto unlock; @@ -3822,7 +3830,7 @@ static int __init regulator_init_complete(void) if (!enabled) goto unlock; - if (has_full_constraints) { + if (have_full_constraints()) { /* We log since this may kill the system if it * goes wrong. */ rdev_info(rdev, "disabling\n"); diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c index 04406a918c04..234960dc9607 100644 --- a/drivers/regulator/gpio-regulator.c +++ b/drivers/regulator/gpio-regulator.c @@ -139,6 +139,7 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np) struct property *prop; const char *regtype; int proplen, gpio, i; + int ret; config = devm_kzalloc(dev, sizeof(struct gpio_regulator_config), @@ -202,7 +203,11 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np) } config->nr_states = i; - of_property_read_string(np, "regulator-type", ®type); + ret = of_property_read_string(np, "regulator-type", ®type); + if (ret < 0) { + dev_err(dev, "Missing 'regulator-type' property\n"); + return ERR_PTR(-EINVAL); + } if (!strncmp("voltage", regtype, 7)) config->type = REGULATOR_VOLTAGE; diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c index ba67b2c4e2e7..8b5e4c712a01 100644 --- a/drivers/regulator/pfuze100-regulator.c +++ b/drivers/regulator/pfuze100-regulator.c @@ -38,7 +38,7 @@ #define PFUZE100_DEVICEID 0x0 #define PFUZE100_REVID 0x3 -#define PFUZE100_FABID 0x3 +#define PFUZE100_FABID 0x4 #define PFUZE100_SW1ABVOL 0x20 #define PFUZE100_SW1CVOL 0x2e @@ -308,9 +308,15 @@ static int pfuze_identify(struct pfuze_chip *pfuze_chip) if (ret) return ret; - if (value & 0x0f) { - dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value); - return -ENODEV; + switch (value & 0x0f) { + /* Freescale misprogrammed 1-3% of parts prior to week 8 of 2013 as ID=8 */ + case 0x8: + dev_info(pfuze_chip->dev, "Assuming misprogrammed ID=0x8"); + case 0x0: + break; + default: + dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value); + return -ENODEV; } ret = regmap_read(pfuze_chip->regmap, PFUZE100_REVID, &value); diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c index cbf91e25cf7f..aeb40aad0ae7 100644 --- a/drivers/regulator/s5m8767.c +++ b/drivers/regulator/s5m8767.c @@ -925,7 +925,7 @@ static int s5m8767_pmic_probe(struct platform_device *pdev) config.dev = s5m8767->dev; config.init_data = pdata->regulators[i].initdata; config.driver_data = s5m8767; - config.regmap = iodev->regmap; + config.regmap = iodev->regmap_pmic; config.of_node = pdata->regulators[i].reg_node; rdev[i] = devm_regulator_register(&pdev->dev, ®ulators[id], diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 15f166a470a7..007730222116 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -626,7 +626,7 @@ comment "Platform RTC drivers" config RTC_DRV_CMOS tristate "PC-style 'CMOS'" - depends on X86 || ALPHA || ARM || M32R || ATARI || PPC || MIPS || SPARC64 + depends on X86 || ARM || M32R || ATARI || PPC || MIPS || SPARC64 default y if X86 help Say "yes" here to get direct support for the real time clock @@ -643,6 +643,14 @@ config RTC_DRV_CMOS This driver can also be built as a module. If so, the module will be called rtc-cmos. +config RTC_DRV_ALPHA + bool "Alpha PC-style CMOS" + depends on ALPHA + default y + help + Direct support for the real-time clock found on every Alpha + system, specifically MC146818 compatibles. If in doubt, say Y. + config RTC_DRV_VRTC tristate "Virtual RTC for Intel MID platforms" depends on X86_INTEL_MID diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c index 8b2cd8a5a2ff..3281c90691c3 100644 --- a/drivers/rtc/rtc-at91rm9200.c +++ b/drivers/rtc/rtc-at91rm9200.c @@ -220,6 +220,8 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) at91_alarm_year = tm.tm_year; + tm.tm_mon = alrm->time.tm_mon; + tm.tm_mday = alrm->time.tm_mday; tm.tm_hour = alrm->time.tm_hour; tm.tm_min = alrm->time.tm_min; tm.tm_sec = alrm->time.tm_sec; @@ -428,6 +430,14 @@ static int __exit at91_rtc_remove(struct platform_device *pdev) return 0; } +static void at91_rtc_shutdown(struct platform_device *pdev) +{ + /* Disable all interrupts */ + at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | + AT91_RTC_SECEV | AT91_RTC_TIMEV | + AT91_RTC_CALEV); +} + #ifdef CONFIG_PM_SLEEP /* AT91RM9200 RTC Power management control */ @@ -466,6 +476,7 @@ static SIMPLE_DEV_PM_OPS(at91_rtc_pm_ops, at91_rtc_suspend, at91_rtc_resume); static struct platform_driver at91_rtc_driver = { .remove = __exit_p(at91_rtc_remove), + .shutdown = at91_rtc_shutdown, .driver = { .name = "at91_rtc", .owner = THIS_MODULE, diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c index b7fd02bc0a14..ae8119dc2846 100644 --- a/drivers/rtc/rtc-s5m.c +++ b/drivers/rtc/rtc-s5m.c @@ -28,10 +28,20 @@ #include <linux/mfd/samsung/irq.h> #include <linux/mfd/samsung/rtc.h> +/* + * Maximum number of retries for checking changes in UDR field + * of SEC_RTC_UDR_CON register (to limit possible endless loop). + * + * After writing to RTC registers (setting time or alarm) read the UDR field + * in SEC_RTC_UDR_CON register. UDR is auto-cleared when data have + * been transferred. + */ +#define UDR_READ_RETRY_CNT 5 + struct s5m_rtc_info { struct device *dev; struct sec_pmic_dev *s5m87xx; - struct regmap *rtc; + struct regmap *regmap; struct rtc_device *rtc_dev; int irq; int device_type; @@ -84,12 +94,31 @@ static int s5m8767_tm_to_data(struct rtc_time *tm, u8 *data) } } +/* + * Read RTC_UDR_CON register and wait till UDR field is cleared. + * This indicates that time/alarm update ended. + */ +static inline int s5m8767_wait_for_udr_update(struct s5m_rtc_info *info) +{ + int ret, retry = UDR_READ_RETRY_CNT; + unsigned int data; + + do { + ret = regmap_read(info->regmap, SEC_RTC_UDR_CON, &data); + } while (--retry && (data & RTC_UDR_MASK) && !ret); + + if (!retry) + dev_err(info->dev, "waiting for UDR update, reached max number of retries\n"); + + return ret; +} + static inline int s5m8767_rtc_set_time_reg(struct s5m_rtc_info *info) { int ret; unsigned int data; - ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data); + ret = regmap_read(info->regmap, SEC_RTC_UDR_CON, &data); if (ret < 0) { dev_err(info->dev, "failed to read update reg(%d)\n", ret); return ret; @@ -98,15 +127,13 @@ static inline int s5m8767_rtc_set_time_reg(struct s5m_rtc_info *info) data |= RTC_TIME_EN_MASK; data |= RTC_UDR_MASK; - ret = regmap_write(info->rtc, SEC_RTC_UDR_CON, data); + ret = regmap_write(info->regmap, SEC_RTC_UDR_CON, data); if (ret < 0) { dev_err(info->dev, "failed to write update reg(%d)\n", ret); return ret; } - do { - ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data); - } while ((data & RTC_UDR_MASK) && !ret); + ret = s5m8767_wait_for_udr_update(info); return ret; } @@ -116,7 +143,7 @@ static inline int s5m8767_rtc_set_alarm_reg(struct s5m_rtc_info *info) int ret; unsigned int data; - ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data); + ret = regmap_read(info->regmap, SEC_RTC_UDR_CON, &data); if (ret < 0) { dev_err(info->dev, "%s: fail to read update reg(%d)\n", __func__, ret); @@ -126,16 +153,14 @@ static inline int s5m8767_rtc_set_alarm_reg(struct s5m_rtc_info *info) data &= ~RTC_TIME_EN_MASK; data |= RTC_UDR_MASK; - ret = regmap_write(info->rtc, SEC_RTC_UDR_CON, data); + ret = regmap_write(info->regmap, SEC_RTC_UDR_CON, data); if (ret < 0) { dev_err(info->dev, "%s: fail to write update reg(%d)\n", __func__, ret); return ret; } - do { - ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data); - } while ((data & RTC_UDR_MASK) && !ret); + ret = s5m8767_wait_for_udr_update(info); return ret; } @@ -178,7 +203,7 @@ static int s5m_rtc_read_time(struct device *dev, struct rtc_time *tm) u8 data[8]; int ret; - ret = regmap_bulk_read(info->rtc, SEC_RTC_SEC, data, 8); + ret = regmap_bulk_read(info->regmap, SEC_RTC_SEC, data, 8); if (ret < 0) return ret; @@ -226,7 +251,7 @@ static int s5m_rtc_set_time(struct device *dev, struct rtc_time *tm) 1900 + tm->tm_year, 1 + tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec, tm->tm_wday); - ret = regmap_raw_write(info->rtc, SEC_RTC_SEC, data, 8); + ret = regmap_raw_write(info->regmap, SEC_RTC_SEC, data, 8); if (ret < 0) return ret; @@ -242,20 +267,20 @@ static int s5m_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) unsigned int val; int ret, i; - ret = regmap_bulk_read(info->rtc, SEC_ALARM0_SEC, data, 8); + ret = regmap_bulk_read(info->regmap, SEC_ALARM0_SEC, data, 8); if (ret < 0) return ret; switch (info->device_type) { case S5M8763X: s5m8763_data_to_tm(data, &alrm->time); - ret = regmap_read(info->rtc, SEC_ALARM0_CONF, &val); + ret = regmap_read(info->regmap, SEC_ALARM0_CONF, &val); if (ret < 0) return ret; alrm->enabled = !!val; - ret = regmap_read(info->rtc, SEC_RTC_STATUS, &val); + ret = regmap_read(info->regmap, SEC_RTC_STATUS, &val); if (ret < 0) return ret; @@ -278,7 +303,7 @@ static int s5m_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) } alrm->pending = 0; - ret = regmap_read(info->rtc, SEC_RTC_STATUS, &val); + ret = regmap_read(info->regmap, SEC_RTC_STATUS, &val); if (ret < 0) return ret; break; @@ -301,7 +326,7 @@ static int s5m_rtc_stop_alarm(struct s5m_rtc_info *info) int ret, i; struct rtc_time tm; - ret = regmap_bulk_read(info->rtc, SEC_ALARM0_SEC, data, 8); + ret = regmap_bulk_read(info->regmap, SEC_ALARM0_SEC, data, 8); if (ret < 0) return ret; @@ -312,14 +337,14 @@ static int s5m_rtc_stop_alarm(struct s5m_rtc_info *info) switch (info->device_type) { case S5M8763X: - ret = regmap_write(info->rtc, SEC_ALARM0_CONF, 0); + ret = regmap_write(info->regmap, SEC_ALARM0_CONF, 0); break; case S5M8767X: for (i = 0; i < 7; i++) data[i] &= ~ALARM_ENABLE_MASK; - ret = regmap_raw_write(info->rtc, SEC_ALARM0_SEC, data, 8); + ret = regmap_raw_write(info->regmap, SEC_ALARM0_SEC, data, 8); if (ret < 0) return ret; @@ -341,7 +366,7 @@ static int s5m_rtc_start_alarm(struct s5m_rtc_info *info) u8 alarm0_conf; struct rtc_time tm; - ret = regmap_bulk_read(info->rtc, SEC_ALARM0_SEC, data, 8); + ret = regmap_bulk_read(info->regmap, SEC_ALARM0_SEC, data, 8); if (ret < 0) return ret; @@ -353,7 +378,7 @@ static int s5m_rtc_start_alarm(struct s5m_rtc_info *info) switch (info->device_type) { case S5M8763X: alarm0_conf = 0x77; - ret = regmap_write(info->rtc, SEC_ALARM0_CONF, alarm0_conf); + ret = regmap_write(info->regmap, SEC_ALARM0_CONF, alarm0_conf); break; case S5M8767X: @@ -368,7 +393,7 @@ static int s5m_rtc_start_alarm(struct s5m_rtc_info *info) if (data[RTC_YEAR1] & 0x7f) data[RTC_YEAR1] |= ALARM_ENABLE_MASK; - ret = regmap_raw_write(info->rtc, SEC_ALARM0_SEC, data, 8); + ret = regmap_raw_write(info->regmap, SEC_ALARM0_SEC, data, 8); if (ret < 0) return ret; ret = s5m8767_rtc_set_alarm_reg(info); @@ -410,7 +435,7 @@ static int s5m_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) if (ret < 0) return ret; - ret = regmap_raw_write(info->rtc, SEC_ALARM0_SEC, data, 8); + ret = regmap_raw_write(info->regmap, SEC_ALARM0_SEC, data, 8); if (ret < 0) return ret; @@ -455,7 +480,7 @@ static const struct rtc_class_ops s5m_rtc_ops = { static void s5m_rtc_enable_wtsr(struct s5m_rtc_info *info, bool enable) { int ret; - ret = regmap_update_bits(info->rtc, SEC_WTSR_SMPL_CNTL, + ret = regmap_update_bits(info->regmap, SEC_WTSR_SMPL_CNTL, WTSR_ENABLE_MASK, enable ? WTSR_ENABLE_MASK : 0); if (ret < 0) @@ -466,7 +491,7 @@ static void s5m_rtc_enable_wtsr(struct s5m_rtc_info *info, bool enable) static void s5m_rtc_enable_smpl(struct s5m_rtc_info *info, bool enable) { int ret; - ret = regmap_update_bits(info->rtc, SEC_WTSR_SMPL_CNTL, + ret = regmap_update_bits(info->regmap, SEC_WTSR_SMPL_CNTL, SMPL_ENABLE_MASK, enable ? SMPL_ENABLE_MASK : 0); if (ret < 0) @@ -481,7 +506,7 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info) int ret; struct rtc_time tm; - ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &tp_read); + ret = regmap_read(info->regmap, SEC_RTC_UDR_CON, &tp_read); if (ret < 0) { dev_err(info->dev, "%s: fail to read control reg(%d)\n", __func__, ret); @@ -493,7 +518,7 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info) data[1] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT); info->rtc_24hr_mode = 1; - ret = regmap_raw_write(info->rtc, SEC_ALARM0_CONF, data, 2); + ret = regmap_raw_write(info->regmap, SEC_ALARM0_CONF, data, 2); if (ret < 0) { dev_err(info->dev, "%s: fail to write controlm reg(%d)\n", __func__, ret); @@ -515,7 +540,7 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info) ret = s5m_rtc_set_time(info->dev, &tm); } - ret = regmap_update_bits(info->rtc, SEC_RTC_UDR_CON, + ret = regmap_update_bits(info->regmap, SEC_RTC_UDR_CON, RTC_TCON_MASK, tp_read | RTC_TCON_MASK); if (ret < 0) dev_err(info->dev, "%s: fail to update TCON reg(%d)\n", @@ -542,17 +567,19 @@ static int s5m_rtc_probe(struct platform_device *pdev) info->dev = &pdev->dev; info->s5m87xx = s5m87xx; - info->rtc = s5m87xx->rtc; + info->regmap = s5m87xx->regmap_rtc; info->device_type = s5m87xx->device_type; info->wtsr_smpl = s5m87xx->wtsr_smpl; switch (pdata->device_type) { case S5M8763X: - info->irq = s5m87xx->irq_base + S5M8763_IRQ_ALARM0; + info->irq = regmap_irq_get_virq(s5m87xx->irq_data, + S5M8763_IRQ_ALARM0); break; case S5M8767X: - info->irq = s5m87xx->irq_base + S5M8767_IRQ_RTCA1; + info->irq = regmap_irq_get_virq(s5m87xx->irq_data, + S5M8767_IRQ_RTCA1); break; default: @@ -596,7 +623,7 @@ static void s5m_rtc_shutdown(struct platform_device *pdev) if (info->wtsr_smpl) { for (i = 0; i < 3; i++) { s5m_rtc_enable_wtsr(info, false); - regmap_read(info->rtc, SEC_WTSR_SMPL_CNTL, &val); + regmap_read(info->regmap, SEC_WTSR_SMPL_CNTL, &val); pr_debug("%s: WTSR_SMPL reg(0x%02x)\n", __func__, val); if (val & WTSR_ENABLE_MASK) pr_emerg("%s: fail to disable WTSR\n", @@ -612,6 +639,30 @@ static void s5m_rtc_shutdown(struct platform_device *pdev) s5m_rtc_enable_smpl(info, false); } +static int s5m_rtc_resume(struct device *dev) +{ + struct s5m_rtc_info *info = dev_get_drvdata(dev); + int ret = 0; + + if (device_may_wakeup(dev)) + ret = disable_irq_wake(info->irq); + + return ret; +} + +static int s5m_rtc_suspend(struct device *dev) +{ + struct s5m_rtc_info *info = dev_get_drvdata(dev); + int ret = 0; + + if (device_may_wakeup(dev)) + ret = enable_irq_wake(info->irq); + + return ret; +} + +static SIMPLE_DEV_PM_OPS(s5m_rtc_pm_ops, s5m_rtc_suspend, s5m_rtc_resume); + static const struct platform_device_id s5m_rtc_id[] = { { "s5m-rtc", 0 }, }; @@ -620,6 +671,7 @@ static struct platform_driver s5m_rtc_driver = { .driver = { .name = "s5m-rtc", .owner = THIS_MODULE, + .pm = &s5m_rtc_pm_ops, }, .probe = s5m_rtc_probe, .shutdown = s5m_rtc_shutdown, diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index cee7e2708a1f..95e45782692f 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -3224,6 +3224,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev, fcx_multitrack = private->features.feature[40] & 0x20; data_size = blk_rq_bytes(req); + if (data_size % blksize) + return ERR_PTR(-EINVAL); /* tpm write request add CBC data on each track boundary */ if (rq_data_dir(req) == WRITE) data_size += (last_trk - first_trk) * 4; diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c index f64921756ad6..f224d59c4b6b 100644 --- a/drivers/s390/block/dasd_genhd.c +++ b/drivers/s390/block/dasd_genhd.c @@ -87,7 +87,6 @@ void dasd_gendisk_free(struct dasd_block *block) { if (block->gdp) { del_gendisk(block->gdp); - block->gdp->queue = NULL; block->gdp->private_data = NULL; put_disk(block->gdp); block->gdp = NULL; diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index 548209a9c43c..d0ab5019d885 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -118,22 +118,6 @@ static void scm_request_done(struct scm_request *scmrq) spin_unlock_irqrestore(&list_lock, flags); } -static int scm_open(struct block_device *blkdev, fmode_t mode) -{ - return scm_get_ref(); -} - -static void scm_release(struct gendisk *gendisk, fmode_t mode) -{ - scm_put_ref(); -} - -static const struct block_device_operations scm_blk_devops = { - .owner = THIS_MODULE, - .open = scm_open, - .release = scm_release, -}; - static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req) { return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT; @@ -256,7 +240,7 @@ static void scm_blk_request(struct request_queue *rq) atomic_inc(&bdev->queued_reqs); blk_start_request(req); - ret = scm_start_aob(scmrq->aob); + ret = eadm_start_aob(scmrq->aob); if (ret) { SCM_LOG(5, "no subchannel"); scm_request_requeue(scmrq); @@ -320,7 +304,7 @@ static void scm_blk_handle_error(struct scm_request *scmrq) } restart: - if (!scm_start_aob(scmrq->aob)) + if (!eadm_start_aob(scmrq->aob)) return; requeue: @@ -363,6 +347,10 @@ static void scm_blk_tasklet(struct scm_blk_dev *bdev) blk_run_queue(bdev->rq); } +static const struct block_device_operations scm_blk_devops = { + .owner = THIS_MODULE, +}; + int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) { struct request_queue *rq; diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c index c0d102e3a48b..27f930cd657f 100644 --- a/drivers/s390/block/scm_blk_cluster.c +++ b/drivers/s390/block/scm_blk_cluster.c @@ -187,7 +187,7 @@ bool scm_need_cluster_request(struct scm_request *scmrq) void scm_initiate_cluster_request(struct scm_request *scmrq) { scm_prepare_cluster_request(scmrq); - if (scm_start_aob(scmrq->aob)) + if (eadm_start_aob(scmrq->aob)) scm_request_requeue(scmrq); } diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile index 17821a026c9c..b69ab17f13fa 100644 --- a/drivers/s390/char/Makefile +++ b/drivers/s390/char/Makefile @@ -3,7 +3,8 @@ # obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ - sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o + sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \ + sclp_early.o obj-$(CONFIG_TN3270) += raw3270.o obj-$(CONFIG_TN3270_CONSOLE) += con3270.o diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index f93cc32eb818..71e974738014 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c @@ -564,6 +564,7 @@ static void __exit fs3270_exit(void) { raw3270_unregister_notifier(&fs3270_notifier); + device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, 0)); __unregister_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270"); } diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h index 40d1406289ed..6fbe09686d18 100644 --- a/drivers/s390/char/sclp.h +++ b/drivers/s390/char/sclp.h @@ -99,6 +99,7 @@ struct init_sccb { } __attribute__((packed)); extern u64 sclp_facilities; + #define SCLP_HAS_CHP_INFO (sclp_facilities & 0x8000000000000000ULL) #define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL) #define SCLP_HAS_CPU_INFO (sclp_facilities & 0x0800000000000000ULL) @@ -179,6 +180,10 @@ void sclp_sdias_exit(void); extern int sclp_console_pages; extern int sclp_console_drop; extern unsigned long sclp_console_full; +extern u8 sclp_fac84; +extern unsigned long long sclp_rzm; +extern unsigned long long sclp_rnmax; +extern __initdata int sclp_early_read_info_sccb_valid; /* useful inlines */ diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index 77df9cb00688..eaa21d542c5c 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c @@ -28,168 +28,6 @@ #include "sclp.h" -#define SCLP_CMDW_READ_SCP_INFO 0x00020001 -#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 - -struct read_info_sccb { - struct sccb_header header; /* 0-7 */ - u16 rnmax; /* 8-9 */ - u8 rnsize; /* 10 */ - u8 _reserved0[24 - 11]; /* 11-15 */ - u8 loadparm[8]; /* 24-31 */ - u8 _reserved1[48 - 32]; /* 32-47 */ - u64 facilities; /* 48-55 */ - u8 _reserved2[84 - 56]; /* 56-83 */ - u8 fac84; /* 84 */ - u8 fac85; /* 85 */ - u8 _reserved3[91 - 86]; /* 86-90 */ - u8 flags; /* 91 */ - u8 _reserved4[100 - 92]; /* 92-99 */ - u32 rnsize2; /* 100-103 */ - u64 rnmax2; /* 104-111 */ - u8 _reserved5[4096 - 112]; /* 112-4095 */ -} __attribute__((packed, aligned(PAGE_SIZE))); - -static struct init_sccb __initdata early_event_mask_sccb __aligned(PAGE_SIZE); -static struct read_info_sccb __initdata early_read_info_sccb; -static int __initdata early_read_info_sccb_valid; - -u64 sclp_facilities; -static u8 sclp_fac84; -static unsigned long long rzm; -static unsigned long long rnmax; - -static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb) -{ - int rc; - - __ctl_set_bit(0, 9); - rc = sclp_service_call(cmd, sccb); - if (rc) - goto out; - __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | - PSW_MASK_BA | PSW_MASK_EXT | PSW_MASK_WAIT); - local_irq_disable(); -out: - /* Contents of the sccb might have changed. */ - barrier(); - __ctl_clear_bit(0, 9); - return rc; -} - -static void __init sclp_read_info_early(void) -{ - int rc; - int i; - struct read_info_sccb *sccb; - sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED, - SCLP_CMDW_READ_SCP_INFO}; - - sccb = &early_read_info_sccb; - for (i = 0; i < ARRAY_SIZE(commands); i++) { - do { - memset(sccb, 0, sizeof(*sccb)); - sccb->header.length = sizeof(*sccb); - sccb->header.function_code = 0x80; - sccb->header.control_mask[2] = 0x80; - rc = sclp_cmd_sync_early(commands[i], sccb); - } while (rc == -EBUSY); - - if (rc) - break; - if (sccb->header.response_code == 0x10) { - early_read_info_sccb_valid = 1; - break; - } - if (sccb->header.response_code != 0x1f0) - break; - } -} - -static void __init sclp_event_mask_early(void) -{ - struct init_sccb *sccb = &early_event_mask_sccb; - int rc; - - do { - memset(sccb, 0, sizeof(*sccb)); - sccb->header.length = sizeof(*sccb); - sccb->mask_length = sizeof(sccb_mask_t); - rc = sclp_cmd_sync_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb); - } while (rc == -EBUSY); -} - -void __init sclp_facilities_detect(void) -{ - struct read_info_sccb *sccb; - - sclp_read_info_early(); - if (!early_read_info_sccb_valid) - return; - - sccb = &early_read_info_sccb; - sclp_facilities = sccb->facilities; - sclp_fac84 = sccb->fac84; - if (sccb->fac85 & 0x02) - S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP; - rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; - rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; - rzm <<= 20; - - sclp_event_mask_early(); -} - -bool __init sclp_has_linemode(void) -{ - struct init_sccb *sccb = &early_event_mask_sccb; - - if (sccb->header.response_code != 0x20) - return 0; - if (!(sccb->sclp_send_mask & (EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK))) - return 0; - if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK))) - return 0; - return 1; -} - -bool __init sclp_has_vt220(void) -{ - struct init_sccb *sccb = &early_event_mask_sccb; - - if (sccb->header.response_code != 0x20) - return 0; - if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK) - return 1; - return 0; -} - -unsigned long long sclp_get_rnmax(void) -{ - return rnmax; -} - -unsigned long long sclp_get_rzm(void) -{ - return rzm; -} - -/* - * This function will be called after sclp_facilities_detect(), which gets - * called from early.c code. Therefore the sccb should have valid contents. - */ -void __init sclp_get_ipl_info(struct sclp_ipl_info *info) -{ - struct read_info_sccb *sccb; - - if (!early_read_info_sccb_valid) - return; - sccb = &early_read_info_sccb; - info->is_valid = 1; - if (sccb->flags & 0x2) - info->has_dump = 1; - memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN); -} - static void sclp_sync_callback(struct sclp_req *req, void *data) { struct completion *completion = data; @@ -356,14 +194,14 @@ struct assign_storage_sccb { int arch_get_memory_phys_device(unsigned long start_pfn) { - if (!rzm) + if (!sclp_rzm) return 0; - return PFN_PHYS(start_pfn) >> ilog2(rzm); + return PFN_PHYS(start_pfn) >> ilog2(sclp_rzm); } static unsigned long long rn2addr(u16 rn) { - return (unsigned long long) (rn - 1) * rzm; + return (unsigned long long) (rn - 1) * sclp_rzm; } static int do_assign_storage(sclp_cmdw_t cmd, u16 rn) @@ -404,7 +242,7 @@ static int sclp_assign_storage(u16 rn) if (rc) return rc; start = rn2addr(rn); - storage_key_init_range(start, start + rzm); + storage_key_init_range(start, start + sclp_rzm); return 0; } @@ -462,7 +300,7 @@ static int sclp_mem_change_state(unsigned long start, unsigned long size, istart = rn2addr(incr->rn); if (start + size - 1 < istart) break; - if (start > istart + rzm - 1) + if (start > istart + sclp_rzm - 1) continue; if (online) rc |= sclp_assign_storage(incr->rn); @@ -526,7 +364,7 @@ static void __init add_memory_merged(u16 rn) if (!first_rn) goto skip_add; start = rn2addr(first_rn); - size = (unsigned long long ) num * rzm; + size = (unsigned long long) num * sclp_rzm; if (start >= VMEM_MAX_PHYS) goto skip_add; if (start + size > VMEM_MAX_PHYS) @@ -574,7 +412,7 @@ static void __init insert_increment(u16 rn, int standby, int assigned) } if (!assigned) new_incr->rn = last_rn + 1; - if (new_incr->rn > rnmax) { + if (new_incr->rn > sclp_rnmax) { kfree(new_incr); return; } @@ -617,7 +455,7 @@ static int __init sclp_detect_standby_memory(void) if (OLDMEM_BASE) /* No standby memory in kdump mode */ return 0; - if (!early_read_info_sccb_valid) + if (!sclp_early_read_info_sccb_valid) return 0; if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL) return 0; @@ -661,7 +499,7 @@ static int __init sclp_detect_standby_memory(void) } if (rc || list_empty(&sclp_mem_list)) goto out; - for (i = 1; i <= rnmax - assigned; i++) + for (i = 1; i <= sclp_rnmax - assigned; i++) insert_increment(0, 1, 0); rc = register_memory_notifier(&sclp_mem_nb); if (rc) diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c new file mode 100644 index 000000000000..1465e9563101 --- /dev/null +++ b/drivers/s390/char/sclp_early.c @@ -0,0 +1,263 @@ +/* + * SCLP early driver + * + * Copyright IBM Corp. 2013 + */ + +#define KMSG_COMPONENT "sclp_early" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <asm/ctl_reg.h> +#include <asm/sclp.h> +#include <asm/ipl.h> +#include "sclp_sdias.h" +#include "sclp.h" + +#define SCLP_CMDW_READ_SCP_INFO 0x00020001 +#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 + +struct read_info_sccb { + struct sccb_header header; /* 0-7 */ + u16 rnmax; /* 8-9 */ + u8 rnsize; /* 10 */ + u8 _reserved0[24 - 11]; /* 11-15 */ + u8 loadparm[8]; /* 24-31 */ + u8 _reserved1[48 - 32]; /* 32-47 */ + u64 facilities; /* 48-55 */ + u8 _reserved2[84 - 56]; /* 56-83 */ + u8 fac84; /* 84 */ + u8 fac85; /* 85 */ + u8 _reserved3[91 - 86]; /* 86-90 */ + u8 flags; /* 91 */ + u8 _reserved4[100 - 92]; /* 92-99 */ + u32 rnsize2; /* 100-103 */ + u64 rnmax2; /* 104-111 */ + u8 _reserved5[4096 - 112]; /* 112-4095 */ +} __packed __aligned(PAGE_SIZE); + +static __initdata struct read_info_sccb early_read_info_sccb; +static __initdata char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE); +static unsigned long sclp_hsa_size; + +__initdata int sclp_early_read_info_sccb_valid; +u64 sclp_facilities; +u8 sclp_fac84; +unsigned long long sclp_rzm; +unsigned long long sclp_rnmax; + +static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb) +{ + int rc; + + __ctl_set_bit(0, 9); + rc = sclp_service_call(cmd, sccb); + if (rc) + goto out; + __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | + PSW_MASK_BA | PSW_MASK_EXT | PSW_MASK_WAIT); + local_irq_disable(); +out: + /* Contents of the sccb might have changed. */ + barrier(); + __ctl_clear_bit(0, 9); + return rc; +} + +static void __init sclp_read_info_early(void) +{ + int rc; + int i; + struct read_info_sccb *sccb; + sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED, + SCLP_CMDW_READ_SCP_INFO}; + + sccb = &early_read_info_sccb; + for (i = 0; i < ARRAY_SIZE(commands); i++) { + do { + memset(sccb, 0, sizeof(*sccb)); + sccb->header.length = sizeof(*sccb); + sccb->header.function_code = 0x80; + sccb->header.control_mask[2] = 0x80; + rc = sclp_cmd_sync_early(commands[i], sccb); + } while (rc == -EBUSY); + + if (rc) + break; + if (sccb->header.response_code == 0x10) { + sclp_early_read_info_sccb_valid = 1; + break; + } + if (sccb->header.response_code != 0x1f0) + break; + } +} + +static void __init sclp_facilities_detect(void) +{ + struct read_info_sccb *sccb; + + sclp_read_info_early(); + if (!sclp_early_read_info_sccb_valid) + return; + + sccb = &early_read_info_sccb; + sclp_facilities = sccb->facilities; + sclp_fac84 = sccb->fac84; + if (sccb->fac85 & 0x02) + S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP; + sclp_rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; + sclp_rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; + sclp_rzm <<= 20; +} + +bool __init sclp_has_linemode(void) +{ + struct init_sccb *sccb = (void *) &sccb_early; + + if (sccb->header.response_code != 0x20) + return 0; + if (!(sccb->sclp_send_mask & (EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK))) + return 0; + if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK))) + return 0; + return 1; +} + +bool __init sclp_has_vt220(void) +{ + struct init_sccb *sccb = (void *) &sccb_early; + + if (sccb->header.response_code != 0x20) + return 0; + if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK) + return 1; + return 0; +} + +unsigned long long sclp_get_rnmax(void) +{ + return sclp_rnmax; +} + +unsigned long long sclp_get_rzm(void) +{ + return sclp_rzm; +} + +/* + * This function will be called after sclp_facilities_detect(), which gets + * called from early.c code. Therefore the sccb should have valid contents. + */ +void __init sclp_get_ipl_info(struct sclp_ipl_info *info) +{ + struct read_info_sccb *sccb; + + if (!sclp_early_read_info_sccb_valid) + return; + sccb = &early_read_info_sccb; + info->is_valid = 1; + if (sccb->flags & 0x2) + info->has_dump = 1; + memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN); +} + +static int __init sclp_cmd_early(sclp_cmdw_t cmd, void *sccb) +{ + int rc; + + do { + rc = sclp_cmd_sync_early(cmd, sccb); + } while (rc == -EBUSY); + + if (rc) + return -EIO; + if (((struct sccb_header *) sccb)->response_code != 0x0020) + return -EIO; + return 0; +} + +static void __init sccb_init_eq_size(struct sdias_sccb *sccb) +{ + memset(sccb, 0, sizeof(*sccb)); + + sccb->hdr.length = sizeof(*sccb); + sccb->evbuf.hdr.length = sizeof(struct sdias_evbuf); + sccb->evbuf.hdr.type = EVTYP_SDIAS; + sccb->evbuf.event_qual = SDIAS_EQ_SIZE; + sccb->evbuf.data_id = SDIAS_DI_FCP_DUMP; + sccb->evbuf.event_id = 4712; + sccb->evbuf.dbs = 1; +} + +static int __init sclp_set_event_mask(unsigned long receive_mask, + unsigned long send_mask) +{ + struct init_sccb *sccb = (void *) &sccb_early; + + memset(sccb, 0, sizeof(*sccb)); + sccb->header.length = sizeof(*sccb); + sccb->mask_length = sizeof(sccb_mask_t); + sccb->receive_mask = receive_mask; + sccb->send_mask = send_mask; + return sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb); +} + +static long __init sclp_hsa_size_init(void) +{ + struct sdias_sccb *sccb = (void *) &sccb_early; + + sccb_init_eq_size(sccb); + if (sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_DATA, sccb)) + return -EIO; + if (sccb->evbuf.blk_cnt != 0) + return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE; + return 0; +} + +static long __init sclp_hsa_copy_wait(void) +{ + struct sccb_header *sccb = (void *) &sccb_early; + + memset(sccb, 0, PAGE_SIZE); + sccb->length = PAGE_SIZE; + if (sclp_cmd_early(SCLP_CMDW_READ_EVENT_DATA, sccb)) + return -EIO; + return (((struct sdias_sccb *) sccb)->evbuf.blk_cnt - 1) * PAGE_SIZE; +} + +unsigned long sclp_get_hsa_size(void) +{ + return sclp_hsa_size; +} + +static void __init sclp_hsa_size_detect(void) +{ + long size; + + /* First try synchronous interface (LPAR) */ + if (sclp_set_event_mask(0, 0x40000010)) + return; + size = sclp_hsa_size_init(); + if (size < 0) + return; + if (size != 0) + goto out; + /* Then try asynchronous interface (z/VM) */ + if (sclp_set_event_mask(0x00000010, 0x40000010)) + return; + size = sclp_hsa_size_init(); + if (size < 0) + return; + size = sclp_hsa_copy_wait(); + if (size < 0) + return; +out: + sclp_hsa_size = size; +} + +void __init sclp_early_detect(void) +{ + sclp_facilities_detect(); + sclp_hsa_size_detect(); + sclp_set_event_mask(0, 0); +} diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c index b1032931a1c4..561a0414b352 100644 --- a/drivers/s390/char/sclp_sdias.c +++ b/drivers/s390/char/sclp_sdias.c @@ -1,7 +1,7 @@ /* - * Sclp "store data in absolut storage" + * SCLP "store data in absolute storage" * - * Copyright IBM Corp. 2003, 2007 + * Copyright IBM Corp. 2003, 2013 * Author(s): Michael Holzheu */ @@ -14,6 +14,7 @@ #include <asm/debug.h> #include <asm/ipl.h> +#include "sclp_sdias.h" #include "sclp.h" #include "sclp_rw.h" @@ -22,46 +23,12 @@ #define SDIAS_RETRIES 300 #define SDIAS_SLEEP_TICKS 50 -#define EQ_STORE_DATA 0x0 -#define EQ_SIZE 0x1 -#define DI_FCP_DUMP 0x0 -#define ASA_SIZE_32 0x0 -#define ASA_SIZE_64 0x1 -#define EVSTATE_ALL_STORED 0x0 -#define EVSTATE_NO_DATA 0x3 -#define EVSTATE_PART_STORED 0x10 - static struct debug_info *sdias_dbf; static struct sclp_register sclp_sdias_register = { .send_mask = EVTYP_SDIAS_MASK, }; -struct sdias_evbuf { - struct evbuf_header hdr; - u8 event_qual; - u8 data_id; - u64 reserved2; - u32 event_id; - u16 reserved3; - u8 asa_size; - u8 event_status; - u32 reserved4; - u32 blk_cnt; - u64 asa; - u32 reserved5; - u32 fbn; - u32 reserved6; - u32 lbn; - u16 reserved7; - u16 dbs; -} __attribute__((packed)); - -struct sdias_sccb { - struct sccb_header hdr; - struct sdias_evbuf evbuf; -} __attribute__((packed)); - static struct sdias_sccb sccb __attribute__((aligned(4096))); static struct sdias_evbuf sdias_evbuf; @@ -148,8 +115,8 @@ int sclp_sdias_blk_count(void) sccb.hdr.length = sizeof(sccb); sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf); sccb.evbuf.hdr.type = EVTYP_SDIAS; - sccb.evbuf.event_qual = EQ_SIZE; - sccb.evbuf.data_id = DI_FCP_DUMP; + sccb.evbuf.event_qual = SDIAS_EQ_SIZE; + sccb.evbuf.data_id = SDIAS_DI_FCP_DUMP; sccb.evbuf.event_id = 4712; sccb.evbuf.dbs = 1; @@ -208,13 +175,13 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks) sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf); sccb.evbuf.hdr.type = EVTYP_SDIAS; sccb.evbuf.hdr.flags = 0; - sccb.evbuf.event_qual = EQ_STORE_DATA; - sccb.evbuf.data_id = DI_FCP_DUMP; + sccb.evbuf.event_qual = SDIAS_EQ_STORE_DATA; + sccb.evbuf.data_id = SDIAS_DI_FCP_DUMP; sccb.evbuf.event_id = 4712; #ifdef CONFIG_64BIT - sccb.evbuf.asa_size = ASA_SIZE_64; + sccb.evbuf.asa_size = SDIAS_ASA_SIZE_64; #else - sccb.evbuf.asa_size = ASA_SIZE_32; + sccb.evbuf.asa_size = SDIAS_ASA_SIZE_32; #endif sccb.evbuf.event_status = 0; sccb.evbuf.blk_cnt = nr_blks; @@ -240,20 +207,19 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks) } switch (sdias_evbuf.event_status) { - case EVSTATE_ALL_STORED: - TRACE("all stored\n"); - break; - case EVSTATE_PART_STORED: - TRACE("part stored: %i\n", sdias_evbuf.blk_cnt); - break; - case EVSTATE_NO_DATA: - TRACE("no data\n"); - /* fall through */ - default: - pr_err("Error from SCLP while copying hsa. " - "Event status = %x\n", - sdias_evbuf.event_status); - rc = -EIO; + case SDIAS_EVSTATE_ALL_STORED: + TRACE("all stored\n"); + break; + case SDIAS_EVSTATE_PART_STORED: + TRACE("part stored: %i\n", sdias_evbuf.blk_cnt); + break; + case SDIAS_EVSTATE_NO_DATA: + TRACE("no data\n"); + /* fall through */ + default: + pr_err("Error from SCLP while copying hsa. Event status = %x\n", + sdias_evbuf.event_status); + rc = -EIO; } out: mutex_unlock(&sdias_mutex); diff --git a/drivers/s390/char/sclp_sdias.h b/drivers/s390/char/sclp_sdias.h new file mode 100644 index 000000000000..f2431c414150 --- /dev/null +++ b/drivers/s390/char/sclp_sdias.h @@ -0,0 +1,46 @@ +/* + * SCLP "store data in absolute storage" + * + * Copyright IBM Corp. 2003, 2013 + */ + +#ifndef SCLP_SDIAS_H +#define SCLP_SDIAS_H + +#include "sclp.h" + +#define SDIAS_EQ_STORE_DATA 0x0 +#define SDIAS_EQ_SIZE 0x1 +#define SDIAS_DI_FCP_DUMP 0x0 +#define SDIAS_ASA_SIZE_32 0x0 +#define SDIAS_ASA_SIZE_64 0x1 +#define SDIAS_EVSTATE_ALL_STORED 0x0 +#define SDIAS_EVSTATE_NO_DATA 0x3 +#define SDIAS_EVSTATE_PART_STORED 0x10 + +struct sdias_evbuf { + struct evbuf_header hdr; + u8 event_qual; + u8 data_id; + u64 reserved2; + u32 event_id; + u16 reserved3; + u8 asa_size; + u8 event_status; + u32 reserved4; + u32 blk_cnt; + u64 asa; + u32 reserved5; + u32 fbn; + u32 reserved6; + u32 lbn; + u16 reserved7; + u16 dbs; +} __packed; + +struct sdias_sccb { + struct sccb_header hdr; + struct sdias_evbuf evbuf; +} __packed; + +#endif /* SCLP_SDIAS_H */ diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index ffb1fcf0bf5b..3d8e4d63f514 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -328,9 +328,9 @@ static ssize_t zcore_read(struct file *file, char __user *buf, size_t count, mem_offs = 0; /* Copy from HSA data */ - if (*ppos < (ZFCPDUMP_HSA_SIZE + HEADER_SIZE)) { - size = min((count - hdr_count), (size_t) (ZFCPDUMP_HSA_SIZE - - mem_start)); + if (*ppos < sclp_get_hsa_size() + HEADER_SIZE) { + size = min((count - hdr_count), + (size_t) (sclp_get_hsa_size() - mem_start)); rc = memcpy_hsa_user(buf + hdr_count, mem_start, size); if (rc) goto fail; @@ -490,7 +490,7 @@ static ssize_t zcore_hsa_read(struct file *filp, char __user *buf, static char str[18]; if (hsa_available) - snprintf(str, sizeof(str), "%lx\n", ZFCPDUMP_HSA_SIZE); + snprintf(str, sizeof(str), "%lx\n", sclp_get_hsa_size()); else snprintf(str, sizeof(str), "0\n"); return simple_read_from_buffer(buf, count, ppos, str, strlen(str)); @@ -584,17 +584,9 @@ static int __init sys_info_init(enum arch_id arch, unsigned long mem_end) static int __init check_sdias(void) { - int rc, act_hsa_size; - - rc = sclp_sdias_blk_count(); - if (rc < 0) { + if (!sclp_get_hsa_size()) { TRACE("Could not determine HSA size\n"); - return rc; - } - act_hsa_size = (rc - 1) * PAGE_SIZE; - if (act_hsa_size < ZFCPDUMP_HSA_SIZE) { - TRACE("HSA size too small: %i\n", act_hsa_size); - return -EINVAL; + return -ENODEV; } return 0; } @@ -662,7 +654,7 @@ static int __init zcore_reipl_init(void) ipl_block = (void *) __get_free_page(GFP_KERNEL); if (!ipl_block) return -ENOMEM; - if (ipib_info.ipib < ZFCPDUMP_HSA_SIZE) + if (ipib_info.ipib < sclp_get_hsa_size()) rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE); else rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE); diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c index aca7bfc113aa..3a2ee4a740b4 100644 --- a/drivers/s390/cio/eadm_sch.c +++ b/drivers/s390/cio/eadm_sch.c @@ -190,7 +190,7 @@ static struct subchannel *eadm_get_idle_sch(void) return NULL; } -static int eadm_start_aob(struct aob *aob) +int eadm_start_aob(struct aob *aob) { struct eadm_private *private; struct subchannel *sch; @@ -218,6 +218,7 @@ out_unlock: return ret; } +EXPORT_SYMBOL_GPL(eadm_start_aob); static int eadm_subchannel_probe(struct subchannel *sch) { @@ -380,11 +381,6 @@ static struct css_driver eadm_subchannel_driver = { .restore = eadm_subchannel_restore, }; -static struct eadm_ops eadm_ops = { - .eadm_start = eadm_start_aob, - .owner = THIS_MODULE, -}; - static int __init eadm_sch_init(void) { int ret; @@ -404,7 +400,6 @@ static int __init eadm_sch_init(void) if (ret) goto cleanup; - register_eadm_ops(&eadm_ops); return ret; cleanup: @@ -415,7 +410,6 @@ cleanup: static void __exit eadm_sch_exit(void) { - unregister_eadm_ops(&eadm_ops); css_driver_unregister(&eadm_subchannel_driver); isc_unregister(EADM_SCH_ISC); debug_unregister(eadm_debug); diff --git a/drivers/s390/cio/scm.c b/drivers/s390/cio/scm.c index 46ec25632e8b..15268edc54ae 100644 --- a/drivers/s390/cio/scm.c +++ b/drivers/s390/cio/scm.c @@ -15,8 +15,6 @@ #include "chsc.h" static struct device *scm_root; -static struct eadm_ops *eadm_ops; -static DEFINE_MUTEX(eadm_ops_mutex); #define to_scm_dev(n) container_of(n, struct scm_device, dev) #define to_scm_drv(d) container_of(d, struct scm_driver, drv) @@ -73,49 +71,6 @@ void scm_driver_unregister(struct scm_driver *scmdrv) } EXPORT_SYMBOL_GPL(scm_driver_unregister); -int scm_get_ref(void) -{ - int ret = 0; - - mutex_lock(&eadm_ops_mutex); - if (!eadm_ops || !try_module_get(eadm_ops->owner)) - ret = -ENOENT; - mutex_unlock(&eadm_ops_mutex); - - return ret; -} -EXPORT_SYMBOL_GPL(scm_get_ref); - -void scm_put_ref(void) -{ - mutex_lock(&eadm_ops_mutex); - module_put(eadm_ops->owner); - mutex_unlock(&eadm_ops_mutex); -} -EXPORT_SYMBOL_GPL(scm_put_ref); - -void register_eadm_ops(struct eadm_ops *ops) -{ - mutex_lock(&eadm_ops_mutex); - eadm_ops = ops; - mutex_unlock(&eadm_ops_mutex); -} -EXPORT_SYMBOL_GPL(register_eadm_ops); - -void unregister_eadm_ops(struct eadm_ops *ops) -{ - mutex_lock(&eadm_ops_mutex); - eadm_ops = NULL; - mutex_unlock(&eadm_ops_mutex); -} -EXPORT_SYMBOL_GPL(unregister_eadm_ops); - -int scm_start_aob(struct aob *aob) -{ - return eadm_ops->eadm_start(aob); -} -EXPORT_SYMBOL_GPL(scm_start_aob); - void scm_irq_handler(struct aob *aob, int error) { struct aob_rq_header *aobrq = (void *) aob->request.data; diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index 5e1e12c0cf42..0a7325361d29 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -2025,7 +2025,8 @@ static struct scsi_host_template driver_template = { .cmd_per_lun = TW_MAX_CMDS_PER_LUN, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = twa_host_attrs, - .emulated = 1 + .emulated = 1, + .no_write_same = 1, }; /* This function will probe and initialize a card */ diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c index c845bdbeb6c0..4de346017e9f 100644 --- a/drivers/scsi/3w-sas.c +++ b/drivers/scsi/3w-sas.c @@ -1600,7 +1600,8 @@ static struct scsi_host_template driver_template = { .cmd_per_lun = TW_MAX_CMDS_PER_LUN, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = twl_host_attrs, - .emulated = 1 + .emulated = 1, + .no_write_same = 1, }; /* This function will probe and initialize a card */ diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c index b9276d10b25c..752624e6bc00 100644 --- a/drivers/scsi/3w-xxxx.c +++ b/drivers/scsi/3w-xxxx.c @@ -2279,7 +2279,8 @@ static struct scsi_host_template driver_template = { .cmd_per_lun = TW_MAX_CMDS_PER_LUN, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = tw_host_attrs, - .emulated = 1 + .emulated = 1, + .no_write_same = 1, }; /* This function will probe and initialize a card */ diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index d85ac1a9d2c0..fbcd48d0bfc3 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -511,7 +511,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) goto cleanup; } - if (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr))) { + if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) || + (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) { rcode = -EINVAL; goto cleanup; } diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index f0d432c139d0..4921ed19a027 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -1081,6 +1081,7 @@ static struct scsi_host_template aac_driver_template = { #endif .use_clustering = ENABLE_CLUSTERING, .emulated = 1, + .no_write_same = 1, }; static void __aac_shutdown(struct aac_dev * aac) diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 97fd450aff09..4f6a30b8e5f9 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -137,6 +137,7 @@ static struct scsi_host_template arcmsr_scsi_host_template = { .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = arcmsr_host_attrs, + .no_write_same = 1, }; static struct pci_device_id arcmsr_device_id_table[] = { {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)}, diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h index 94d5d0102f7d..42bcb970445a 100644 --- a/drivers/scsi/bfa/bfa_fcs.h +++ b/drivers/scsi/bfa/bfa_fcs.h @@ -296,6 +296,7 @@ wwn_t bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn, struct bfa_fcs_lport_s *bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t lpwwn); +void bfa_fcs_lport_set_symname(struct bfa_fcs_lport_s *port, char *symname); void bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port, struct bfa_lport_info_s *port_info); void bfa_fcs_lport_get_attr(struct bfa_fcs_lport_s *port, diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c index 2f61a5af3658..f5e4e61a0fd7 100644 --- a/drivers/scsi/bfa/bfa_fcs_lport.c +++ b/drivers/scsi/bfa/bfa_fcs_lport.c @@ -1097,6 +1097,17 @@ bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport, bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE); } +void +bfa_fcs_lport_set_symname(struct bfa_fcs_lport_s *port, + char *symname) +{ + strcpy(port->port_cfg.sym_name.symname, symname); + + if (bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online)) + bfa_fcs_lport_ns_util_send_rspn_id( + BFA_FCS_GET_NS_FROM_PORT(port), NULL); +} + /* * fcs_lport_api */ @@ -5140,9 +5151,6 @@ bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced) u8 *psymbl = &symbl[0]; int len; - if (!bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online)) - return; - /* Avoid sending RSPN in the following states. */ if (bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_offline) || bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_plogi_sending) || diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c index e9a681d31223..40be670a1cbc 100644 --- a/drivers/scsi/bfa/bfad_attr.c +++ b/drivers/scsi/bfa/bfad_attr.c @@ -593,11 +593,8 @@ bfad_im_vport_set_symbolic_name(struct fc_vport *fc_vport) return; spin_lock_irqsave(&bfad->bfad_lock, flags); - if (strlen(sym_name) > 0) { - strcpy(fcs_vport->lport.port_cfg.sym_name.symname, sym_name); - bfa_fcs_lport_ns_util_send_rspn_id( - BFA_FCS_GET_NS_FROM_PORT((&fcs_vport->lport)), NULL); - } + if (strlen(sym_name) > 0) + bfa_fcs_lport_set_symname(&fcs_vport->lport, sym_name); spin_unlock_irqrestore(&bfad->bfad_lock, flags); } diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index ee4fa40a50b1..ce5ef0190bad 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c @@ -4684,6 +4684,7 @@ static struct scsi_host_template gdth_template = { .cmd_per_lun = GDTH_MAXC_P_L, .unchecked_isa_dma = 1, .use_clustering = ENABLE_CLUSTERING, + .no_write_same = 1, }; #ifdef CONFIG_ISA diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index f334859024c0..f2c5005f312a 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -395,6 +395,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) shost->use_clustering = sht->use_clustering; shost->ordered_tag = sht->ordered_tag; shost->eh_deadline = shost_eh_deadline * HZ; + shost->no_write_same = sht->no_write_same; if (sht->supported_mode == MODE_UNKNOWN) /* means we didn't set it ... default to INITIATOR */ diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 22f6432eb475..20a5e6ecf945 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -561,6 +561,7 @@ static struct scsi_host_template hpsa_driver_template = { .sdev_attrs = hpsa_sdev_attrs, .shost_attrs = hpsa_shost_attrs, .max_sectors = 8192, + .no_write_same = 1, }; @@ -1288,7 +1289,7 @@ static void complete_scsi_command(struct CommandList *cp) "has check condition: aborted command: " "ASC: 0x%x, ASCQ: 0x%x\n", cp, asc, ascq); - cmd->result = DID_SOFT_ERROR << 16; + cmd->result |= DID_SOFT_ERROR << 16; break; } /* Must be some other type of check condition */ @@ -4925,7 +4926,7 @@ reinit_after_soft_reset: hpsa_hba_inquiry(h); hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ start_controller_lockup_detector(h); - return 1; + return 0; clean4: hpsa_free_sg_chain_blocks(h); diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 36ac1c34ce97..573f4128b6b6 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -6305,7 +6305,8 @@ static struct scsi_host_template driver_template = { .use_clustering = ENABLE_CLUSTERING, .shost_attrs = ipr_ioa_attrs, .sdev_attrs = ipr_dev_attrs, - .proc_name = IPR_NAME + .proc_name = IPR_NAME, + .no_write_same = 1, }; /** diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c index 8d5ea8a1e5a6..52a216f21ae5 100644 --- a/drivers/scsi/ips.c +++ b/drivers/scsi/ips.c @@ -374,6 +374,7 @@ static struct scsi_host_template ips_driver_template = { .sg_tablesize = IPS_MAX_SG, .cmd_per_lun = 3, .use_clustering = ENABLE_CLUSTERING, + .no_write_same = 1, }; diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 161c98efade9..d2895836f9fa 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -211,7 +211,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) qc->tf.nsect = 0; } - ata_tf_to_fis(&qc->tf, 1, 0, (u8*)&task->ata_task.fis); + ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *)&task->ata_task.fis); task->uldd_task = qc; if (ata_is_atapi(qc->tf.protocol)) { memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len); diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 90c95a3385d1..816db12ef5d5 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c @@ -4244,6 +4244,7 @@ static struct scsi_host_template megaraid_template = { .eh_device_reset_handler = megaraid_reset, .eh_bus_reset_handler = megaraid_reset, .eh_host_reset_handler = megaraid_reset, + .no_write_same = 1, }; static int diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index d1a4b82836ea..e2237a97cb9d 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c @@ -367,6 +367,7 @@ static struct scsi_host_template megaraid_template_g = { .eh_host_reset_handler = megaraid_reset_handler, .change_queue_depth = megaraid_change_queue_depth, .use_clustering = ENABLE_CLUSTERING, + .no_write_same = 1, .sdev_attrs = megaraid_sdev_attrs, .shost_attrs = megaraid_shost_attrs, }; diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 0a743a5d1647..c99812bf2a73 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -2148,6 +2148,7 @@ static struct scsi_host_template megasas_template = { .bios_param = megasas_bios_param, .use_clustering = ENABLE_CLUSTERING, .change_queue_depth = megasas_change_queue_depth, + .no_write_same = 1, }; /** diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index f16ece91b94a..0a1296a87d66 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -3403,6 +3403,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) unsigned long flags; u8 deviceType = pPayload->sas_identify.dev_type; port->port_state = portstate; + phy->phy_state = PHY_STATE_LINK_UP_SPC; PM8001_MSG_DBG(pm8001_ha, pm8001_printk("HW_EVENT_SAS_PHY_UP port id = %d, phy id = %d\n", port_id, phy_id)); @@ -3483,6 +3484,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_printk("HW_EVENT_SATA_PHY_UP port id = %d," " phy id = %d\n", port_id, phy_id)); port->port_state = portstate; + phy->phy_state = PHY_STATE_LINK_UP_SPC; port->port_attached = 1; pm8001_get_lrate_mode(phy, link_rate); phy->phy_type |= PORT_TYPE_SATA; diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h index 6d91e2446542..e4867e690c84 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.h +++ b/drivers/scsi/pm8001/pm8001_hwi.h @@ -131,6 +131,10 @@ #define LINKRATE_30 (0x02 << 8) #define LINKRATE_60 (0x04 << 8) +/* for phy state */ + +#define PHY_STATE_LINK_UP_SPC 0x1 + /* for new SPC controllers MEMBASE III is shared between BIOS and DATA */ #define GSM_SM_BASE 0x4F0000 struct mpi_msg_hdr{ diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 34f5f5ffef05..73a120d81b4d 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -175,20 +175,16 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha) static void pm8001_tasklet(unsigned long opaque) { struct pm8001_hba_info *pm8001_ha; - u32 vec; - pm8001_ha = (struct pm8001_hba_info *)opaque; + struct isr_param *irq_vector; + + irq_vector = (struct isr_param *)opaque; + pm8001_ha = irq_vector->drv_inst; if (unlikely(!pm8001_ha)) BUG_ON(1); - vec = pm8001_ha->int_vector; - PM8001_CHIP_DISP->isr(pm8001_ha, vec); + PM8001_CHIP_DISP->isr(pm8001_ha, irq_vector->irq_id); } #endif -static struct pm8001_hba_info *outq_to_hba(u8 *outq) -{ - return container_of((outq - *outq), struct pm8001_hba_info, outq[0]); -} - /** * pm8001_interrupt_handler_msix - main MSIX interrupt handler. * It obtains the vector number and calls the equivalent bottom @@ -198,18 +194,20 @@ static struct pm8001_hba_info *outq_to_hba(u8 *outq) */ static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque) { - struct pm8001_hba_info *pm8001_ha = outq_to_hba(opaque); - u8 outq = *(u8 *)opaque; + struct isr_param *irq_vector; + struct pm8001_hba_info *pm8001_ha; irqreturn_t ret = IRQ_HANDLED; + irq_vector = (struct isr_param *)opaque; + pm8001_ha = irq_vector->drv_inst; + if (unlikely(!pm8001_ha)) return IRQ_NONE; if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha)) return IRQ_NONE; - pm8001_ha->int_vector = outq; #ifdef PM8001_USE_TASKLET - tasklet_schedule(&pm8001_ha->tasklet); + tasklet_schedule(&pm8001_ha->tasklet[irq_vector->irq_id]); #else - ret = PM8001_CHIP_DISP->isr(pm8001_ha, outq); + ret = PM8001_CHIP_DISP->isr(pm8001_ha, irq_vector->irq_id); #endif return ret; } @@ -230,9 +228,8 @@ static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id) if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha)) return IRQ_NONE; - pm8001_ha->int_vector = 0; #ifdef PM8001_USE_TASKLET - tasklet_schedule(&pm8001_ha->tasklet); + tasklet_schedule(&pm8001_ha->tasklet[0]); #else ret = PM8001_CHIP_DISP->isr(pm8001_ha, 0); #endif @@ -457,7 +454,7 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev, { struct pm8001_hba_info *pm8001_ha; struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); - + int j; pm8001_ha = sha->lldd_ha; if (!pm8001_ha) @@ -480,12 +477,14 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev, pm8001_ha->iomb_size = IOMB_SIZE_SPC; #ifdef PM8001_USE_TASKLET - /** - * default tasklet for non msi-x interrupt handler/first msi-x - * interrupt handler - **/ - tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet, - (unsigned long)pm8001_ha); + /* Tasklet for non msi-x interrupt handler */ + if ((!pdev->msix_cap) || (pm8001_ha->chip_id == chip_8001)) + tasklet_init(&pm8001_ha->tasklet[0], pm8001_tasklet, + (unsigned long)&(pm8001_ha->irq_vector[0])); + else + for (j = 0; j < PM8001_MAX_MSIX_VEC; j++) + tasklet_init(&pm8001_ha->tasklet[j], pm8001_tasklet, + (unsigned long)&(pm8001_ha->irq_vector[j])); #endif pm8001_ioremap(pm8001_ha); if (!pm8001_alloc(pm8001_ha, ent)) @@ -733,19 +732,20 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha) "pci_enable_msix request ret:%d no of intr %d\n", rc, pm8001_ha->number_of_intr)); - for (i = 0; i < number_of_intr; i++) - pm8001_ha->outq[i] = i; for (i = 0; i < number_of_intr; i++) { snprintf(intr_drvname[i], sizeof(intr_drvname[0]), DRV_NAME"%d", i); + pm8001_ha->irq_vector[i].irq_id = i; + pm8001_ha->irq_vector[i].drv_inst = pm8001_ha; + if (request_irq(pm8001_ha->msix_entries[i].vector, pm8001_interrupt_handler_msix, flag, - intr_drvname[i], &pm8001_ha->outq[i])) { + intr_drvname[i], &(pm8001_ha->irq_vector[i]))) { for (j = 0; j < i; j++) free_irq( pm8001_ha->msix_entries[j].vector, - &pm8001_ha->outq[j]); + &(pm8001_ha->irq_vector[i])); pci_disable_msix(pm8001_ha->pdev); break; } @@ -907,7 +907,7 @@ static void pm8001_pci_remove(struct pci_dev *pdev) { struct sas_ha_struct *sha = pci_get_drvdata(pdev); struct pm8001_hba_info *pm8001_ha; - int i; + int i, j; pm8001_ha = sha->lldd_ha; sas_unregister_ha(sha); sas_remove_host(pm8001_ha->shost); @@ -921,13 +921,18 @@ static void pm8001_pci_remove(struct pci_dev *pdev) synchronize_irq(pm8001_ha->msix_entries[i].vector); for (i = 0; i < pm8001_ha->number_of_intr; i++) free_irq(pm8001_ha->msix_entries[i].vector, - &pm8001_ha->outq[i]); + &(pm8001_ha->irq_vector[i])); pci_disable_msix(pdev); #else free_irq(pm8001_ha->irq, sha); #endif #ifdef PM8001_USE_TASKLET - tasklet_kill(&pm8001_ha->tasklet); + /* For non-msix and msix interrupts */ + if ((!pdev->msix_cap) || (pm8001_ha->chip_id == chip_8001)) + tasklet_kill(&pm8001_ha->tasklet[0]); + else + for (j = 0; j < PM8001_MAX_MSIX_VEC; j++) + tasklet_kill(&pm8001_ha->tasklet[j]); #endif pm8001_free(pm8001_ha); kfree(sha->sas_phy); @@ -948,7 +953,7 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state) { struct sas_ha_struct *sha = pci_get_drvdata(pdev); struct pm8001_hba_info *pm8001_ha; - int i; + int i, j; u32 device_state; pm8001_ha = sha->lldd_ha; flush_workqueue(pm8001_wq); @@ -964,13 +969,18 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state) synchronize_irq(pm8001_ha->msix_entries[i].vector); for (i = 0; i < pm8001_ha->number_of_intr; i++) free_irq(pm8001_ha->msix_entries[i].vector, - &pm8001_ha->outq[i]); + &(pm8001_ha->irq_vector[i])); pci_disable_msix(pdev); #else free_irq(pm8001_ha->irq, sha); #endif #ifdef PM8001_USE_TASKLET - tasklet_kill(&pm8001_ha->tasklet); + /* For non-msix and msix interrupts */ + if ((!pdev->msix_cap) || (pm8001_ha->chip_id == chip_8001)) + tasklet_kill(&pm8001_ha->tasklet[0]); + else + for (j = 0; j < PM8001_MAX_MSIX_VEC; j++) + tasklet_kill(&pm8001_ha->tasklet[j]); #endif device_state = pci_choose_state(pdev, state); pm8001_printk("pdev=0x%p, slot=%s, entering " @@ -993,7 +1003,7 @@ static int pm8001_pci_resume(struct pci_dev *pdev) struct sas_ha_struct *sha = pci_get_drvdata(pdev); struct pm8001_hba_info *pm8001_ha; int rc; - u8 i = 0; + u8 i = 0, j; u32 device_state; pm8001_ha = sha->lldd_ha; device_state = pdev->current_state; @@ -1033,10 +1043,14 @@ static int pm8001_pci_resume(struct pci_dev *pdev) if (rc) goto err_out_disable; #ifdef PM8001_USE_TASKLET - /* default tasklet for non msi-x interrupt handler/first msi-x - * interrupt handler */ - tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet, - (unsigned long)pm8001_ha); + /* Tasklet for non msi-x interrupt handler */ + if ((!pdev->msix_cap) || (pm8001_ha->chip_id == chip_8001)) + tasklet_init(&pm8001_ha->tasklet[0], pm8001_tasklet, + (unsigned long)&(pm8001_ha->irq_vector[0])); + else + for (j = 0; j < PM8001_MAX_MSIX_VEC; j++) + tasklet_init(&pm8001_ha->tasklet[j], pm8001_tasklet, + (unsigned long)&(pm8001_ha->irq_vector[j])); #endif PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0); if (pm8001_ha->chip_id != chip_8001) { @@ -1169,6 +1183,7 @@ module_exit(pm8001_exit); MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>"); MODULE_AUTHOR("Anand Kumar Santhanam <AnandKumar.Santhanam@pmcs.com>"); MODULE_AUTHOR("Sangeetha Gnanasekaran <Sangeetha.Gnanasekaran@pmcs.com>"); +MODULE_AUTHOR("Nikith Ganigarakoppal <Nikith.Ganigarakoppal@pmcs.com>"); MODULE_DESCRIPTION( "PMC-Sierra PM8001/8081/8088/8089/8074/8076/8077 " "SAS/SATA controller driver"); diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index f4eb18e51631..f50ac44b950e 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -1098,15 +1098,17 @@ int pm8001_lu_reset(struct domain_device *dev, u8 *lun) struct pm8001_tmf_task tmf_task; struct pm8001_device *pm8001_dev = dev->lldd_dev; struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); + DECLARE_COMPLETION_ONSTACK(completion_setstate); if (dev_is_sata(dev)) { struct sas_phy *phy = sas_get_local_phy(dev); rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , dev, 1, 0); rc = sas_phy_reset(phy, 1); sas_put_local_phy(phy); + pm8001_dev->setds_completion = &completion_setstate; rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, pm8001_dev, 0x01); - msleep(2000); + wait_for_completion(&completion_setstate); } else { tmf_task.tmf = TMF_LU_RESET; rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task); diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h index 6037d477a183..6c5fd5ee22d3 100644 --- a/drivers/scsi/pm8001/pm8001_sas.h +++ b/drivers/scsi/pm8001/pm8001_sas.h @@ -466,6 +466,10 @@ struct pm8001_hba_memspace { u64 membase; u32 memsize; }; +struct isr_param { + struct pm8001_hba_info *drv_inst; + u32 irq_id; +}; struct pm8001_hba_info { char name[PM8001_NAME_LENGTH]; struct list_head list; @@ -519,14 +523,13 @@ struct pm8001_hba_info { int number_of_intr;/*will be used in remove()*/ #endif #ifdef PM8001_USE_TASKLET - struct tasklet_struct tasklet; + struct tasklet_struct tasklet[PM8001_MAX_MSIX_VEC]; #endif u32 logging_level; u32 fw_status; u32 smp_exp_mode; - u32 int_vector; const struct firmware *fw_image; - u8 outq[PM8001_MAX_MSIX_VEC]; + struct isr_param irq_vector[PM8001_MAX_MSIX_VEC]; }; struct pm8001_work { diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index 8987b1706216..c950dc5c9943 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -2894,6 +2894,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) unsigned long flags; u8 deviceType = pPayload->sas_identify.dev_type; port->port_state = portstate; + phy->phy_state = PHY_STATE_LINK_UP_SPCV; PM8001_MSG_DBG(pm8001_ha, pm8001_printk( "portid:%d; phyid:%d; linkrate:%d; " "portstate:%x; devicetype:%x\n", @@ -2978,6 +2979,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) port_id, phy_id, link_rate, portstate)); port->port_state = portstate; + phy->phy_state = PHY_STATE_LINK_UP_SPCV; port->port_attached = 1; pm8001_get_lrate_mode(phy, link_rate); phy->phy_type |= PORT_TYPE_SATA; diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h index c86816bea424..9970a385795d 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.h +++ b/drivers/scsi/pm8001/pm80xx_hwi.h @@ -215,6 +215,8 @@ #define SAS_DOPNRJT_RTRY_TMO 128 #define SAS_COPNRJT_RTRY_TMO 128 +/* for phy state */ +#define PHY_STATE_LINK_UP_SPCV 0x2 /* Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second. Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index e43db7742047..be8ce54f99b2 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -1404,11 +1404,22 @@ enum { }; #define PMCRAID_AEN_CMD_MAX (__PMCRAID_AEN_CMD_MAX - 1) +static struct genl_multicast_group pmcraid_mcgrps[] = { + { .name = "events", /* not really used - see ID discussion below */ }, +}; + static struct genl_family pmcraid_event_family = { - .id = GENL_ID_GENERATE, + /* + * Due to prior multicast group abuse (the code having assumed that + * the family ID can be used as a multicast group ID) we need to + * statically allocate a family (and thus group) ID. + */ + .id = GENL_ID_PMCRAID, .name = "pmcraid", .version = 1, - .maxattr = PMCRAID_AEN_ATTR_MAX + .maxattr = PMCRAID_AEN_ATTR_MAX, + .mcgrps = pmcraid_mcgrps, + .n_mcgrps = ARRAY_SIZE(pmcraid_mcgrps), }; /** @@ -1511,8 +1522,8 @@ static int pmcraid_notify_aen( return result; } - result = - genlmsg_multicast(skb, 0, pmcraid_event_family.id, GFP_ATOMIC); + result = genlmsg_multicast(&pmcraid_event_family, skb, + 0, 0, GFP_ATOMIC); /* If there are no listeners, genlmsg_multicast may return non-zero * value. @@ -4314,6 +4325,7 @@ static struct scsi_host_template pmcraid_host_template = { .this_id = -1, .sg_tablesize = PMCRAID_MAX_IOADLS, .max_sectors = PMCRAID_IOA_MAX_SECTORS, + .no_write_same = 1, .cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = pmcraid_host_attrs, diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index f85b9e5c1f05..7eb19be35d46 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -330,7 +330,7 @@ static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg) struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); - return QLA_TPG_ATTRIB(tpg)->generate_node_acls; + return tpg->tpg_attrib.generate_node_acls; } static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg) @@ -338,7 +338,7 @@ static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg) struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); - return QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls; + return tpg->tpg_attrib.cache_dynamic_acls; } static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg) @@ -346,7 +346,7 @@ static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg) struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); - return QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect; + return tpg->tpg_attrib.demo_mode_write_protect; } static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg) @@ -354,7 +354,7 @@ static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg) struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); - return QLA_TPG_ATTRIB(tpg)->prod_mode_write_protect; + return tpg->tpg_attrib.prod_mode_write_protect; } static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg) @@ -362,7 +362,7 @@ static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); - return QLA_TPG_ATTRIB(tpg)->demo_mode_login_only; + return tpg->tpg_attrib.demo_mode_login_only; } static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl( @@ -847,7 +847,7 @@ static ssize_t tcm_qla2xxx_tpg_attrib_show_##name( \ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \ struct tcm_qla2xxx_tpg, se_tpg); \ \ - return sprintf(page, "%u\n", QLA_TPG_ATTRIB(tpg)->name); \ + return sprintf(page, "%u\n", tpg->tpg_attrib.name); \ } \ \ static ssize_t tcm_qla2xxx_tpg_attrib_store_##name( \ @@ -1027,10 +1027,10 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg( * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic * NodeACLs */ - QLA_TPG_ATTRIB(tpg)->generate_node_acls = 1; - QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect = 1; - QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls = 1; - QLA_TPG_ATTRIB(tpg)->demo_mode_login_only = 1; + tpg->tpg_attrib.generate_node_acls = 1; + tpg->tpg_attrib.demo_mode_write_protect = 1; + tpg->tpg_attrib.cache_dynamic_acls = 1; + tpg->tpg_attrib.demo_mode_login_only = 1; ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn, &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); @@ -1830,16 +1830,16 @@ static int tcm_qla2xxx_register_configfs(void) /* * Setup default attribute lists for various fabric->tf_cit_tmpl */ - TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = + fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; + fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs; + fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = tcm_qla2xxx_tpg_attrib_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; /* * Register the fabric for use within TCM */ @@ -1870,15 +1870,15 @@ static int tcm_qla2xxx_register_configfs(void) /* * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl */ - TF_CIT_TMPL(npiv_fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; - TF_CIT_TMPL(npiv_fabric)->tfc_tpg_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(npiv_fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; - TF_CIT_TMPL(npiv_fabric)->tfc_tpg_param_cit.ct_attrs = NULL; - TF_CIT_TMPL(npiv_fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; - TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; - TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; + npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; + npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL; + npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; + npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; + npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; + npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; + npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; + npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; + npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; /* * Register the npiv_fabric for use within TCM */ diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h index 329327528a55..771f7b816443 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h @@ -45,8 +45,6 @@ struct tcm_qla2xxx_tpg { struct se_portal_group se_tpg; }; -#define QLA_TPG_ATTRIB(tpg) (&(tpg)->tpg_attrib) - struct tcm_qla2xxx_fc_loopid { struct se_node_acl *se_nacl; }; diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index e6c4bff04339..69725f7c32c1 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2659,6 +2659,12 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer) { struct scsi_device *sdev = sdkp->device; + if (sdev->host->no_write_same) { + sdev->no_write_same = 1; + + return; + } + if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) { /* too large values might cause issues with arcmsr */ int vpd_buf_len = 64; diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 1a28f5632797..17d740427240 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1697,6 +1697,7 @@ static struct scsi_host_template scsi_driver = { .use_clustering = DISABLE_CLUSTERING, /* Make sure we dont get a sg segment crosses a page boundary */ .dma_boundary = PAGE_SIZE-1, + .no_write_same = 1, }; enum { diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index 3ed666fe840a..9025edd7dc45 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c @@ -377,7 +377,7 @@ out_master_put: static int bcm2835_spi_remove(struct platform_device *pdev) { - struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); + struct spi_master *master = platform_get_drvdata(pdev); struct bcm2835_spi *bs = spi_master_get_devdata(master); free_irq(bs->irq, master); diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c index 80d56b214eb5..469ecd876358 100644 --- a/drivers/spi/spi-bcm63xx.c +++ b/drivers/spi/spi-bcm63xx.c @@ -435,7 +435,7 @@ out: static int bcm63xx_spi_remove(struct platform_device *pdev) { - struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); + struct spi_master *master = platform_get_drvdata(pdev); struct bcm63xx_spi *bs = spi_master_get_devdata(master); /* reset spi block */ diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c index b9f0192758d6..6d207afec8cb 100644 --- a/drivers/spi/spi-dw-mid.c +++ b/drivers/spi/spi-dw-mid.c @@ -150,7 +150,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) &dws->tx_sgl, 1, DMA_MEM_TO_DEV, - DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); + DMA_PREP_INTERRUPT); txdesc->callback = dw_spi_dma_done; txdesc->callback_param = dws; @@ -173,7 +173,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) &dws->rx_sgl, 1, DMA_DEV_TO_MEM, - DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); + DMA_PREP_INTERRUPT); rxdesc->callback = dw_spi_dma_done; rxdesc->callback_param = dws; diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c index 9602bbd8d7ea..87676587d783 100644 --- a/drivers/spi/spi-mpc512x-psc.c +++ b/drivers/spi/spi-mpc512x-psc.c @@ -557,7 +557,7 @@ free_master: static int mpc512x_psc_spi_do_remove(struct device *dev) { - struct spi_master *master = spi_master_get(dev_get_drvdata(dev)); + struct spi_master *master = dev_get_drvdata(dev); struct mpc512x_psc_spi *mps = spi_master_get_devdata(master); clk_disable_unprepare(mps->clk_mclk); diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c index 73afb56c08cc..3adebfa22e3d 100644 --- a/drivers/spi/spi-mxs.c +++ b/drivers/spi/spi-mxs.c @@ -565,7 +565,7 @@ static int mxs_spi_remove(struct platform_device *pdev) struct mxs_spi *spi; struct mxs_ssp *ssp; - master = spi_master_get(platform_get_drvdata(pdev)); + master = platform_get_drvdata(pdev); spi = spi_master_get_devdata(master); ssp = &spi->ssp; diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index cb0e1f1137ad..7765b1999537 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -1073,6 +1073,8 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev) static struct acpi_device_id pxa2xx_spi_acpi_match[] = { { "INT33C0", 0 }, { "INT33C1", 0 }, + { "INT3430", 0 }, + { "INT3431", 0 }, { "80860F0E", 0 }, { }, }; @@ -1291,6 +1293,9 @@ static int pxa2xx_spi_resume(struct device *dev) /* Enable the SSP clock */ clk_prepare_enable(ssp->clk); + /* Restore LPSS private register bits */ + lpss_ssp_setup(drv_data); + /* Start the queue running */ status = spi_master_resume(drv_data->master); if (status != 0) { diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index 58449ad4ad0d..9e829cee7357 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c @@ -885,14 +885,13 @@ static void rspi_release_dma(struct rspi_data *rspi) static int rspi_remove(struct platform_device *pdev) { - struct rspi_data *rspi = spi_master_get(platform_get_drvdata(pdev)); + struct rspi_data *rspi = platform_get_drvdata(pdev); spi_unregister_master(rspi->master); rspi_release_dma(rspi); free_irq(platform_get_irq(pdev, 0), rspi); clk_put(rspi->clk); iounmap(rspi->addr); - spi_master_put(rspi->master); return 0; } diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c index 0b71270fbf67..4396bd448540 100644 --- a/drivers/spi/spi-ti-qspi.c +++ b/drivers/spi/spi-ti-qspi.c @@ -161,7 +161,7 @@ static int ti_qspi_setup(struct spi_device *spi) qspi->spi_max_frequency, clk_div); ret = pm_runtime_get_sync(qspi->dev); - if (ret) { + if (ret < 0) { dev_err(qspi->dev, "pm_runtime_get_sync() failed\n"); return ret; } @@ -459,11 +459,10 @@ static int ti_qspi_probe(struct platform_device *pdev) if (!of_property_read_u32(np, "num-cs", &num_cs)) master->num_chipselect = num_cs; - platform_set_drvdata(pdev, master); - qspi = spi_master_get_devdata(master); qspi->master = master; qspi->dev = &pdev->dev; + platform_set_drvdata(pdev, qspi); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -517,10 +516,26 @@ free_master: static int ti_qspi_remove(struct platform_device *pdev) { - struct ti_qspi *qspi = platform_get_drvdata(pdev); + struct spi_master *master; + struct ti_qspi *qspi; + int ret; + + master = platform_get_drvdata(pdev); + qspi = spi_master_get_devdata(master); + + ret = pm_runtime_get_sync(qspi->dev); + if (ret < 0) { + dev_err(qspi->dev, "pm_runtime_get_sync() failed\n"); + return ret; + } ti_qspi_write(qspi, QSPI_WC_INT_DISABLE, QSPI_INTR_ENABLE_CLEAR_REG); + pm_runtime_put(qspi->dev); + pm_runtime_disable(&pdev->dev); + + spi_unregister_master(master); + return 0; } diff --git a/drivers/spi/spi-txx9.c b/drivers/spi/spi-txx9.c index 637cce2b8bdd..18c9bb2b5f39 100644 --- a/drivers/spi/spi-txx9.c +++ b/drivers/spi/spi-txx9.c @@ -425,7 +425,7 @@ exit: static int txx9spi_remove(struct platform_device *dev) { - struct spi_master *master = spi_master_get(platform_get_drvdata(dev)); + struct spi_master *master = platform_get_drvdata(dev); struct txx9spi *c = spi_master_get_devdata(master); destroy_workqueue(c->workqueue); diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 8d85ddc46011..349ebba4b199 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -357,6 +357,19 @@ struct spi_device *spi_alloc_device(struct spi_master *master) } EXPORT_SYMBOL_GPL(spi_alloc_device); +static void spi_dev_set_name(struct spi_device *spi) +{ + struct acpi_device *adev = ACPI_COMPANION(&spi->dev); + + if (adev) { + dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); + return; + } + + dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), + spi->chip_select); +} + /** * spi_add_device - Add spi_device allocated with spi_alloc_device * @spi: spi_device to register @@ -383,9 +396,7 @@ int spi_add_device(struct spi_device *spi) } /* Set the bus ID string */ - dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), - spi->chip_select); - + spi_dev_set_name(spi); /* We need to make sure there's no other device with this * chipselect **BEFORE** we call setup(), else we'll trash @@ -1144,7 +1155,7 @@ static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, return AE_NO_MEMORY; } - ACPI_HANDLE_SET(&spi->dev, handle); + ACPI_COMPANION_SET(&spi->dev, adev); spi->irq = -1; INIT_LIST_HEAD(&resource_list); @@ -1404,7 +1415,7 @@ int devm_spi_register_master(struct device *dev, struct spi_master *master) return -ENOMEM; ret = spi_register_master(master); - if (ret != 0) { + if (!ret) { *ptr = master; devres_add(dev, ptr); } else { diff --git a/drivers/staging/btmtk_usb/btmtk_usb.c b/drivers/staging/btmtk_usb/btmtk_usb.c index 7a9bf3b57810..9a5ebd6cc512 100644 --- a/drivers/staging/btmtk_usb/btmtk_usb.c +++ b/drivers/staging/btmtk_usb/btmtk_usb.c @@ -1284,9 +1284,8 @@ done: kfree_skb(skb); } -static int btmtk_usb_send_frame(struct sk_buff *skb) +static int btmtk_usb_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { - struct hci_dev *hdev = (struct hci_dev *)skb->dev; struct btmtk_usb_data *data = hci_get_drvdata(hdev); struct usb_ctrlrequest *dr; struct urb *urb; diff --git a/drivers/staging/comedi/drivers/pcl730.c b/drivers/staging/comedi/drivers/pcl730.c index d041b714db29..2baaf1db6fbf 100644 --- a/drivers/staging/comedi/drivers/pcl730.c +++ b/drivers/staging/comedi/drivers/pcl730.c @@ -173,11 +173,11 @@ static int pcl730_do_insn_bits(struct comedi_device *dev, if (mask) { if (mask & 0x00ff) outb(s->state & 0xff, dev->iobase + reg); - if ((mask & 0xff00) & (s->n_chan > 8)) + if ((mask & 0xff00) && (s->n_chan > 8)) outb((s->state >> 8) & 0xff, dev->iobase + reg + 1); - if ((mask & 0xff0000) & (s->n_chan > 16)) + if ((mask & 0xff0000) && (s->n_chan > 16)) outb((s->state >> 16) & 0xff, dev->iobase + reg + 2); - if ((mask & 0xff000000) & (s->n_chan > 24)) + if ((mask & 0xff000000) && (s->n_chan > 24)) outb((s->state >> 24) & 0xff, dev->iobase + reg + 3); } diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c index 6815cfe2664e..b486099b543d 100644 --- a/drivers/staging/comedi/drivers/s626.c +++ b/drivers/staging/comedi/drivers/s626.c @@ -494,7 +494,7 @@ static void s626_send_dac(struct comedi_device *dev, uint32_t val) * Private helper function: Write setpoint to an application DAC channel. */ static void s626_set_dac(struct comedi_device *dev, uint16_t chan, - unsigned short dacdata) + int16_t dacdata) { struct s626_private *devpriv = dev->private; uint16_t signmask; diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c index 933b01a0f03d..0adf3cffddb0 100644 --- a/drivers/staging/comedi/drivers/vmk80xx.c +++ b/drivers/staging/comedi/drivers/vmk80xx.c @@ -465,7 +465,7 @@ static int vmk80xx_do_insn_bits(struct comedi_device *dev, unsigned char *rx_buf = devpriv->usb_rx_buf; unsigned char *tx_buf = devpriv->usb_tx_buf; int reg, cmd; - int ret; + int ret = 0; if (devpriv->model == VMK8061_MODEL) { reg = VMK8061_DO_REG; diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_download.c b/drivers/staging/ft1000/ft1000-usb/ft1000_download.c index 68ded17c0f5c..12f333fa59b5 100644 --- a/drivers/staging/ft1000/ft1000-usb/ft1000_download.c +++ b/drivers/staging/ft1000/ft1000-usb/ft1000_download.c @@ -578,7 +578,7 @@ static int request_code_segment(struct ft1000_usb *ft1000dev, u16 **s_file, u8 **c_file, const u8 *endpoint, bool boot_case) { long word_length; - int status; + int status = 0; /*DEBUG("FT1000:REQUEST_CODE_SEGMENT\n");i*/ word_length = get_request_value(ft1000dev); @@ -1074,4 +1074,3 @@ int scram_dnldr(struct ft1000_usb *ft1000dev, void *pFileStart, return status; } - diff --git a/drivers/staging/iio/magnetometer/Kconfig b/drivers/staging/iio/magnetometer/Kconfig index a3ea69e9d800..34634da1f9f7 100644 --- a/drivers/staging/iio/magnetometer/Kconfig +++ b/drivers/staging/iio/magnetometer/Kconfig @@ -6,6 +6,8 @@ menu "Magnetometer sensors" config SENSORS_HMC5843 tristate "Honeywell HMC5843/5883/5883L 3-Axis Magnetometer" depends on I2C + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER help Say Y here to add support for the Honeywell HMC5843, HMC5883 and HMC5883L 3-Axis Magnetometer (digital compass). diff --git a/drivers/staging/imx-drm/Makefile b/drivers/staging/imx-drm/Makefile index 2c3a9e178fb5..8742432d7b01 100644 --- a/drivers/staging/imx-drm/Makefile +++ b/drivers/staging/imx-drm/Makefile @@ -8,4 +8,6 @@ obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o obj-$(CONFIG_DRM_IMX_FB_HELPER) += imx-fbdev.o obj-$(CONFIG_DRM_IMX_IPUV3_CORE) += ipu-v3/ -obj-$(CONFIG_DRM_IMX_IPUV3) += ipuv3-crtc.o ipuv3-plane.o + +imx-ipuv3-crtc-objs := ipuv3-crtc.o ipuv3-plane.o +obj-$(CONFIG_DRM_IMX_IPUV3) += imx-ipuv3-crtc.o diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c index 51aa9772f959..6bd015ac9d68 100644 --- a/drivers/staging/imx-drm/imx-drm-core.c +++ b/drivers/staging/imx-drm/imx-drm-core.c @@ -72,6 +72,7 @@ int imx_drm_crtc_id(struct imx_drm_crtc *crtc) { return crtc->pipe; } +EXPORT_SYMBOL_GPL(imx_drm_crtc_id); static void imx_drm_driver_lastclose(struct drm_device *drm) { diff --git a/drivers/staging/lustre/lustre/ptlrpc/pinger.c b/drivers/staging/lustre/lustre/ptlrpc/pinger.c index 5dec771d70ee..4d340f4a2198 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/pinger.c +++ b/drivers/staging/lustre/lustre/ptlrpc/pinger.c @@ -409,8 +409,8 @@ int ptlrpc_stop_pinger(void) struct l_wait_info lwi = { 0 }; int rc = 0; - if (!thread_is_init(&pinger_thread) && - !thread_is_stopped(&pinger_thread)) + if (thread_is_init(&pinger_thread) || + thread_is_stopped(&pinger_thread)) return -EALREADY; ptlrpc_pinger_remove_timeouts(); diff --git a/drivers/staging/media/go7007/go7007-usb.c b/drivers/staging/media/go7007/go7007-usb.c index 58684da45e6c..b658c2316df3 100644 --- a/drivers/staging/media/go7007/go7007-usb.c +++ b/drivers/staging/media/go7007/go7007-usb.c @@ -15,6 +15,8 @@ * Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> @@ -661,7 +663,7 @@ static int go7007_usb_interface_reset(struct go7007 *go) if (usb->board->flags & GO7007_USB_EZUSB) { /* Reset buffer in EZ-USB */ - dev_dbg(go->dev, "resetting EZ-USB buffers\n"); + pr_debug("resetting EZ-USB buffers\n"); if (go7007_usb_vendor_request(go, 0x10, 0, 0, NULL, 0, 0) < 0 || go7007_usb_vendor_request(go, 0x10, 0, 0, NULL, 0, 0) < 0) return -1; @@ -689,7 +691,7 @@ static int go7007_usb_ezusb_write_interrupt(struct go7007 *go, u16 status_reg = 0; int timeout = 500; - dev_dbg(go->dev, "WriteInterrupt: %04x %04x\n", addr, data); + pr_debug("WriteInterrupt: %04x %04x\n", addr, data); for (i = 0; i < 100; ++i) { r = usb_control_msg(usb->usbdev, @@ -734,7 +736,7 @@ static int go7007_usb_onboard_write_interrupt(struct go7007 *go, int r; int timeout = 500; - dev_dbg(go->dev, "WriteInterrupt: %04x %04x\n", addr, data); + pr_debug("WriteInterrupt: %04x %04x\n", addr, data); go->usb_buf[0] = data & 0xff; go->usb_buf[1] = data >> 8; @@ -771,7 +773,7 @@ static void go7007_usb_readinterrupt_complete(struct urb *urb) go->interrupt_available = 1; go->interrupt_data = __le16_to_cpu(regs[0]); go->interrupt_value = __le16_to_cpu(regs[1]); - dev_dbg(go->dev, "ReadInterrupt: %04x %04x\n", + pr_debug("ReadInterrupt: %04x %04x\n", go->interrupt_value, go->interrupt_data); } @@ -891,7 +893,7 @@ static int go7007_usb_send_firmware(struct go7007 *go, u8 *data, int len) int transferred, pipe; int timeout = 500; - dev_dbg(go->dev, "DownloadBuffer sending %d bytes\n", len); + pr_debug("DownloadBuffer sending %d bytes\n", len); if (usb->board->flags & GO7007_USB_EZUSB) pipe = usb_sndbulkpipe(usb->usbdev, 2); @@ -977,7 +979,7 @@ static int go7007_usb_i2c_master_xfer(struct i2c_adapter *adapter, !(msgs[i].flags & I2C_M_RD) && (msgs[i + 1].flags & I2C_M_RD)) { #ifdef GO7007_I2C_DEBUG - dev_dbg(go->dev, "i2c write/read %d/%d bytes on %02x\n", + pr_debug("i2c write/read %d/%d bytes on %02x\n", msgs[i].len, msgs[i + 1].len, msgs[i].addr); #endif buf[0] = 0x01; @@ -988,7 +990,7 @@ static int go7007_usb_i2c_master_xfer(struct i2c_adapter *adapter, buf[buf_len++] = msgs[++i].len; } else if (msgs[i].flags & I2C_M_RD) { #ifdef GO7007_I2C_DEBUG - dev_dbg(go->dev, "i2c read %d bytes on %02x\n", + pr_debug("i2c read %d bytes on %02x\n", msgs[i].len, msgs[i].addr); #endif buf[0] = 0x01; @@ -998,7 +1000,7 @@ static int go7007_usb_i2c_master_xfer(struct i2c_adapter *adapter, buf_len = 4; } else { #ifdef GO7007_I2C_DEBUG - dev_dbg(go->dev, "i2c write %d bytes on %02x\n", + pr_debug("i2c write %d bytes on %02x\n", msgs[i].len, msgs[i].addr); #endif buf[0] = 0x00; @@ -1057,7 +1059,7 @@ static int go7007_usb_probe(struct usb_interface *intf, char *name; int video_pipe, i, v_urb_len; - dev_dbg(go->dev, "probing new GO7007 USB board\n"); + pr_debug("probing new GO7007 USB board\n"); switch (id->driver_info) { case GO7007_BOARDID_MATRIX_II: @@ -1097,13 +1099,13 @@ static int go7007_usb_probe(struct usb_interface *intf, board = &board_px_tv402u; break; case GO7007_BOARDID_LIFEVIEW_LR192: - dev_err(go->dev, "The Lifeview TV Walker Ultra is not supported. Sorry!\n"); + dev_err(&intf->dev, "The Lifeview TV Walker Ultra is not supported. Sorry!\n"); return -ENODEV; name = "Lifeview TV Walker Ultra"; board = &board_lifeview_lr192; break; case GO7007_BOARDID_SENSORAY_2250: - dev_info(go->dev, "Sensoray 2250 found\n"); + dev_info(&intf->dev, "Sensoray 2250 found\n"); name = "Sensoray 2250/2251"; board = &board_sensoray_2250; break; @@ -1112,7 +1114,7 @@ static int go7007_usb_probe(struct usb_interface *intf, board = &board_ads_usbav_709; break; default: - dev_err(go->dev, "unknown board ID %d!\n", + dev_err(&intf->dev, "unknown board ID %d!\n", (unsigned int)id->driver_info); return -ENODEV; } @@ -1247,7 +1249,7 @@ static int go7007_usb_probe(struct usb_interface *intf, sizeof(go->name)); break; default: - dev_dbg(go->dev, "unable to detect tuner type!\n"); + pr_debug("unable to detect tuner type!\n"); break; } /* Configure tuner mode selection inputs connected diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c index 3066ee2e753b..49ea76b3435d 100644 --- a/drivers/staging/nvec/nvec.c +++ b/drivers/staging/nvec/nvec.c @@ -681,7 +681,8 @@ static irqreturn_t nvec_interrupt(int irq, void *dev) dev_err(nvec->dev, "RX buffer overflow on %p: " "Trying to write byte %u of %u\n", - nvec->rx, nvec->rx->pos, NVEC_MSG_SIZE); + nvec->rx, nvec->rx ? nvec->rx->pos : 0, + NVEC_MSG_SIZE); break; default: nvec->state = 0; diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c index 2c678f409573..2f548ebada59 100644 --- a/drivers/staging/rtl8188eu/core/rtw_ap.c +++ b/drivers/staging/rtl8188eu/core/rtw_ap.c @@ -1115,6 +1115,9 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len) return _FAIL; } + /* fix bug of flush_cam_entry at STOP AP mode */ + psta->state |= WIFI_AP_STATE; + rtw_indicate_connect(padapter); pmlmepriv->cur_network.join_res = true;/* for check if already set beacon */ return ret; } diff --git a/drivers/staging/tidspbridge/Kconfig b/drivers/staging/tidspbridge/Kconfig index 165b918b8171..1b6d581c438b 100644 --- a/drivers/staging/tidspbridge/Kconfig +++ b/drivers/staging/tidspbridge/Kconfig @@ -4,7 +4,7 @@ menuconfig TIDSPBRIDGE tristate "DSP Bridge driver" - depends on ARCH_OMAP3 && !ARCH_MULTIPLATFORM + depends on ARCH_OMAP3 && !ARCH_MULTIPLATFORM && BROKEN select MAILBOX select OMAP2PLUS_MBOX help diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c index 1aa4a3fd0f1b..56e355b3e7fa 100644 --- a/drivers/staging/tidspbridge/rmgr/drv_interface.c +++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c @@ -258,7 +258,8 @@ err: /* This function maps kernel space memory to user space memory. */ static int bridge_mmap(struct file *filp, struct vm_area_struct *vma) { - u32 status; + struct omap_dsp_platform_data *pdata = + omap_dspbridge_dev->dev.platform_data; /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); @@ -268,13 +269,9 @@ static int bridge_mmap(struct file *filp, struct vm_area_struct *vma) vma->vm_start, vma->vm_end, vma->vm_page_prot, vma->vm_flags); - status = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, - vma->vm_end - vma->vm_start, - vma->vm_page_prot); - if (status != 0) - status = -EAGAIN; - - return status; + return vm_iomap_memory(vma, + pdata->phys_mempool_base, + pdata->phys_mempool_size); } static const struct file_operations bridge_fops = { diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c index aab0012bba92..ab8b2ba6eedd 100644 --- a/drivers/staging/vt6655/hostap.c +++ b/drivers/staging/vt6655/hostap.c @@ -143,7 +143,8 @@ static int hostap_disable_hostapd(PSDevice pDevice, int rtnl_locked) DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n", pDevice->dev->name, pDevice->apdev->name); } - free_netdev(pDevice->apdev); + if (pDevice->apdev) + free_netdev(pDevice->apdev); pDevice->apdev = NULL; pDevice->bEnable8021x = false; pDevice->bEnableHostWEP = false; diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c index 1e8b8412e67e..4aa5ef54b683 100644 --- a/drivers/staging/vt6656/baseband.c +++ b/drivers/staging/vt6656/baseband.c @@ -939,6 +939,7 @@ int BBbVT3184Init(struct vnt_private *pDevice) u8 * pbyAgc; u16 wLengthAgc; u8 abyArray[256]; + u8 data; ntStatus = CONTROLnsRequestIn(pDevice, MESSAGE_TYPE_READ, @@ -1104,6 +1105,16 @@ else { ControlvWriteByte(pDevice,MESSAGE_REQUEST_BBREG,0x0D,0x01); RFbRFTableDownload(pDevice); + + /* Fix for TX USB resets from vendors driver */ + CONTROLnsRequestIn(pDevice, MESSAGE_TYPE_READ, USB_REG4, + MESSAGE_REQUEST_MEM, sizeof(data), &data); + + data |= 0x2; + + CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_WRITE, USB_REG4, + MESSAGE_REQUEST_MEM, sizeof(data), &data); + return true;//ntStatus; } diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c index ae1676d190c5..67ba48b9a8d9 100644 --- a/drivers/staging/vt6656/hostap.c +++ b/drivers/staging/vt6656/hostap.c @@ -133,7 +133,8 @@ static int hostap_disable_hostapd(struct vnt_private *pDevice, int rtnl_locked) DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n", pDevice->dev->name, pDevice->apdev->name); } - free_netdev(pDevice->apdev); + if (pDevice->apdev) + free_netdev(pDevice->apdev); pDevice->apdev = NULL; pDevice->bEnable8021x = false; pDevice->bEnableHostWEP = false; diff --git a/drivers/staging/vt6656/rndis.h b/drivers/staging/vt6656/rndis.h index 5e073062017a..5cf5e732a36f 100644 --- a/drivers/staging/vt6656/rndis.h +++ b/drivers/staging/vt6656/rndis.h @@ -66,6 +66,8 @@ #define VIAUSB20_PACKET_HEADER 0x04 +#define USB_REG4 0x604 + typedef struct _CMD_MESSAGE { u8 byData[256]; diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c index 79ce363b2ea9..3277d9838f4e 100644 --- a/drivers/staging/zram/zram_drv.c +++ b/drivers/staging/zram/zram_drv.c @@ -652,21 +652,30 @@ static ssize_t reset_store(struct device *dev, return -ENOMEM; /* Do not reset an active device! */ - if (bdev->bd_holders) - return -EBUSY; + if (bdev->bd_holders) { + ret = -EBUSY; + goto out; + } ret = kstrtou16(buf, 10, &do_reset); if (ret) - return ret; + goto out; - if (!do_reset) - return -EINVAL; + if (!do_reset) { + ret = -EINVAL; + goto out; + } /* Make sure all pending I/O is finished */ fsync_bdev(bdev); + bdput(bdev); zram_reset_device(zram, true); return len; + +out: + bdput(bdev); + return ret; } static void __zram_make_request(struct zram *zram, struct bio *bio, int rw) diff --git a/drivers/staging/zsmalloc/zsmalloc-main.c b/drivers/staging/zsmalloc/zsmalloc-main.c index 1a67537dbc56..3b950e5a918f 100644 --- a/drivers/staging/zsmalloc/zsmalloc-main.c +++ b/drivers/staging/zsmalloc/zsmalloc-main.c @@ -430,7 +430,12 @@ static struct page *get_next_page(struct page *page) return next; } -/* Encode <page, obj_idx> as a single handle value */ +/* + * Encode <page, obj_idx> as a single handle value. + * On hardware platforms with physical memory starting at 0x0 the pfn + * could be 0 so we ensure that the handle will never be 0 by adjusting the + * encoded obj_idx value before encoding. + */ static void *obj_location_to_handle(struct page *page, unsigned long obj_idx) { unsigned long handle; @@ -441,17 +446,21 @@ static void *obj_location_to_handle(struct page *page, unsigned long obj_idx) } handle = page_to_pfn(page) << OBJ_INDEX_BITS; - handle |= (obj_idx & OBJ_INDEX_MASK); + handle |= ((obj_idx + 1) & OBJ_INDEX_MASK); return (void *)handle; } -/* Decode <page, obj_idx> pair from the given object handle */ +/* + * Decode <page, obj_idx> pair from the given object handle. We adjust the + * decoded obj_idx back to its original value since it was adjusted in + * obj_location_to_handle(). + */ static void obj_handle_to_location(unsigned long handle, struct page **page, unsigned long *obj_idx) { *page = pfn_to_page(handle >> OBJ_INDEX_BITS); - *obj_idx = handle & OBJ_INDEX_MASK; + *obj_idx = (handle & OBJ_INDEX_MASK) - 1; } static unsigned long obj_idx_to_offset(struct page *page, diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 38e44b9abf0f..d70e9119e906 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -805,14 +805,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int iscsi_task_attr; int sam_task_attr; - spin_lock_bh(&conn->sess->session_stats_lock); - conn->sess->cmd_pdus++; - if (conn->sess->se_sess->se_node_acl) { - spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock); - conn->sess->se_sess->se_node_acl->num_cmds++; - spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock); - } - spin_unlock_bh(&conn->sess->session_stats_lock); + atomic_long_inc(&conn->sess->cmd_pdus); hdr = (struct iscsi_scsi_req *) buf; payload_length = ntoh24(hdr->dlength); @@ -1254,20 +1247,12 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf, int rc; if (!payload_length) { - pr_err("DataOUT payload is ZERO, protocol error.\n"); - return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, - buf); + pr_warn("DataOUT payload is ZERO, ignoring.\n"); + return 0; } /* iSCSI write */ - spin_lock_bh(&conn->sess->session_stats_lock); - conn->sess->rx_data_octets += payload_length; - if (conn->sess->se_sess->se_node_acl) { - spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock); - conn->sess->se_sess->se_node_acl->write_bytes += payload_length; - spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock); - } - spin_unlock_bh(&conn->sess->session_stats_lock); + atomic_long_add(payload_length, &conn->sess->rx_data_octets); if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) { pr_err("DataSegmentLength: %u is greater than" @@ -1486,7 +1471,7 @@ EXPORT_SYMBOL(iscsit_check_dataout_payload); static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf) { - struct iscsi_cmd *cmd; + struct iscsi_cmd *cmd = NULL; struct iscsi_data *hdr = (struct iscsi_data *)buf; int rc; bool data_crc_failed = false; @@ -1954,6 +1939,13 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, (unsigned char *)hdr); } + if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL) || + (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)) { + pr_err("Multi sequence text commands currently not supported\n"); + return iscsit_reject_cmd(cmd, ISCSI_REASON_CMD_NOT_SUPPORTED, + (unsigned char *)hdr); + } + pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x," " ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn, hdr->exp_statsn, payload_length); @@ -2630,14 +2622,7 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn) return -1; } - spin_lock_bh(&conn->sess->session_stats_lock); - conn->sess->tx_data_octets += datain.length; - if (conn->sess->se_sess->se_node_acl) { - spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock); - conn->sess->se_sess->se_node_acl->read_bytes += datain.length; - spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock); - } - spin_unlock_bh(&conn->sess->session_stats_lock); + atomic_long_add(datain.length, &conn->sess->tx_data_octets); /* * Special case for successfully execution w/ both DATAIN * and Sense Data. @@ -3162,9 +3147,7 @@ void iscsit_build_rsp_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn, if (inc_stat_sn) cmd->stat_sn = conn->stat_sn++; - spin_lock_bh(&conn->sess->session_stats_lock); - conn->sess->rsp_pdus++; - spin_unlock_bh(&conn->sess->session_stats_lock); + atomic_long_inc(&conn->sess->rsp_pdus); memset(hdr, 0, ISCSI_HDR_LEN); hdr->opcode = ISCSI_OP_SCSI_CMD_RSP; @@ -3374,6 +3357,7 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) struct iscsi_tiqn *tiqn; struct iscsi_tpg_np *tpg_np; int buffer_len, end_of_buf = 0, len = 0, payload_len = 0; + int target_name_printed; unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */ unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL; @@ -3411,19 +3395,23 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) continue; } - len = sprintf(buf, "TargetName=%s", tiqn->tiqn); - len += 1; - - if ((len + payload_len) > buffer_len) { - end_of_buf = 1; - goto eob; - } - memcpy(payload + payload_len, buf, len); - payload_len += len; + target_name_printed = 0; spin_lock(&tiqn->tiqn_tpg_lock); list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) { + /* If demo_mode_discovery=0 and generate_node_acls=0 + * (demo mode dislabed) do not return + * TargetName+TargetAddress unless a NodeACL exists. + */ + + if ((tpg->tpg_attrib.generate_node_acls == 0) && + (tpg->tpg_attrib.demo_mode_discovery == 0) && + (!core_tpg_get_initiator_node_acl(&tpg->tpg_se_tpg, + cmd->conn->sess->sess_ops->InitiatorName))) { + continue; + } + spin_lock(&tpg->tpg_state_lock); if ((tpg->tpg_state == TPG_STATE_FREE) || (tpg->tpg_state == TPG_STATE_INACTIVE)) { @@ -3438,6 +3426,22 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) struct iscsi_np *np = tpg_np->tpg_np; bool inaddr_any = iscsit_check_inaddr_any(np); + if (!target_name_printed) { + len = sprintf(buf, "TargetName=%s", + tiqn->tiqn); + len += 1; + + if ((len + payload_len) > buffer_len) { + spin_unlock(&tpg->tpg_np_lock); + spin_unlock(&tiqn->tiqn_tpg_lock); + end_of_buf = 1; + goto eob; + } + memcpy(payload + payload_len, buf, len); + payload_len += len; + target_name_printed = 1; + } + len = sprintf(buf, "TargetAddress=" "%s:%hu,%hu", (inaddr_any == false) ? @@ -4092,9 +4096,7 @@ restart: * hit default in the switch below. */ memset(buffer, 0xff, ISCSI_HDR_LEN); - spin_lock_bh(&conn->sess->session_stats_lock); - conn->sess->conn_digest_errors++; - spin_unlock_bh(&conn->sess->session_stats_lock); + atomic_long_inc(&conn->sess->conn_digest_errors); } else { pr_debug("Got HeaderDigest CRC32C" " 0x%08x\n", checksum); @@ -4381,7 +4383,7 @@ int iscsit_close_connection( int iscsit_close_session(struct iscsi_session *sess) { - struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); + struct iscsi_portal_group *tpg = sess->tpg; struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; if (atomic_read(&sess->nconn)) { diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index 7505fddca15f..de77d9aa22c6 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c @@ -111,7 +111,7 @@ static struct iscsi_chap *chap_server_open( /* * Set Identifier. */ - chap->id = ISCSI_TPG_C(conn)->tpg_chap_id++; + chap->id = conn->tpg->tpg_chap_id++; *aic_len += sprintf(aic_str + *aic_len, "CHAP_I=%d", chap->id); *aic_len += 1; pr_debug("[server] Sending CHAP_I=%d\n", chap->id); @@ -146,6 +146,7 @@ static int chap_server_compute_md5( unsigned char client_digest[MD5_SIGNATURE_SIZE]; unsigned char server_digest[MD5_SIGNATURE_SIZE]; unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH]; + size_t compare_len; struct iscsi_chap *chap = conn->auth_protocol; struct crypto_hash *tfm; struct hash_desc desc; @@ -184,7 +185,9 @@ static int chap_server_compute_md5( goto out; } - if (memcmp(chap_n, auth->userid, strlen(auth->userid)) != 0) { + /* Include the terminating NULL in the compare */ + compare_len = strlen(auth->userid) + 1; + if (strncmp(chap_n, auth->userid, compare_len) != 0) { pr_err("CHAP_N values do not match!\n"); goto out; } diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index fd145259361d..e3318edb233d 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -372,7 +372,7 @@ static ssize_t iscsi_nacl_attrib_show_##name( \ struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \ se_node_acl); \ \ - return sprintf(page, "%u\n", ISCSI_NODE_ATTRIB(nacl)->name); \ + return sprintf(page, "%u\n", nacl->node_attrib.name); \ } \ \ static ssize_t iscsi_nacl_attrib_store_##name( \ @@ -897,7 +897,7 @@ static struct se_node_acl *lio_target_make_nodeacl( if (!se_nacl_new) return ERR_PTR(-ENOMEM); - cmdsn_depth = ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth; + cmdsn_depth = tpg->tpg_attrib.default_cmdsn_depth; /* * se_nacl_new may be released by core_tpg_add_initiator_node_acl() * when converting a NdoeACL from demo mode -> explict @@ -920,9 +920,9 @@ static struct se_node_acl *lio_target_make_nodeacl( return ERR_PTR(-ENOMEM); } - stats_cg->default_groups[0] = &NODE_STAT_GRPS(acl)->iscsi_sess_stats_group; + stats_cg->default_groups[0] = &acl->node_stat_grps.iscsi_sess_stats_group; stats_cg->default_groups[1] = NULL; - config_group_init_type_name(&NODE_STAT_GRPS(acl)->iscsi_sess_stats_group, + config_group_init_type_name(&acl->node_stat_grps.iscsi_sess_stats_group, "iscsi_sess_stats", &iscsi_stat_sess_cit); return se_nacl; @@ -967,7 +967,7 @@ static ssize_t iscsi_tpg_attrib_show_##name( \ if (iscsit_get_tpg(tpg) < 0) \ return -EINVAL; \ \ - rb = sprintf(page, "%u\n", ISCSI_TPG_ATTRIB(tpg)->name); \ + rb = sprintf(page, "%u\n", tpg->tpg_attrib.name); \ iscsit_put_tpg(tpg); \ return rb; \ } \ @@ -1041,6 +1041,16 @@ TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR); */ DEF_TPG_ATTRIB(prod_mode_write_protect); TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR); +/* + * Define iscsi_tpg_attrib_s_demo_mode_discovery, + */ +DEF_TPG_ATTRIB(demo_mode_discovery); +TPG_ATTR(demo_mode_discovery, S_IRUGO | S_IWUSR); +/* + * Define iscsi_tpg_attrib_s_default_erl + */ +DEF_TPG_ATTRIB(default_erl); +TPG_ATTR(default_erl, S_IRUGO | S_IWUSR); static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = { &iscsi_tpg_attrib_authentication.attr, @@ -1051,6 +1061,8 @@ static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = { &iscsi_tpg_attrib_cache_dynamic_acls.attr, &iscsi_tpg_attrib_demo_mode_write_protect.attr, &iscsi_tpg_attrib_prod_mode_write_protect.attr, + &iscsi_tpg_attrib_demo_mode_discovery.attr, + &iscsi_tpg_attrib_default_erl.attr, NULL, }; @@ -1514,21 +1526,21 @@ static struct se_wwn *lio_target_call_coreaddtiqn( return ERR_PTR(-ENOMEM); } - stats_cg->default_groups[0] = &WWN_STAT_GRPS(tiqn)->iscsi_instance_group; - stats_cg->default_groups[1] = &WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group; - stats_cg->default_groups[2] = &WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group; - stats_cg->default_groups[3] = &WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group; - stats_cg->default_groups[4] = &WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group; + stats_cg->default_groups[0] = &tiqn->tiqn_stat_grps.iscsi_instance_group; + stats_cg->default_groups[1] = &tiqn->tiqn_stat_grps.iscsi_sess_err_group; + stats_cg->default_groups[2] = &tiqn->tiqn_stat_grps.iscsi_tgt_attr_group; + stats_cg->default_groups[3] = &tiqn->tiqn_stat_grps.iscsi_login_stats_group; + stats_cg->default_groups[4] = &tiqn->tiqn_stat_grps.iscsi_logout_stats_group; stats_cg->default_groups[5] = NULL; - config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_instance_group, + config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_instance_group, "iscsi_instance", &iscsi_stat_instance_cit); - config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group, + config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_sess_err_group, "iscsi_sess_err", &iscsi_stat_sess_err_cit); - config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group, + config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_tgt_attr_group, "iscsi_tgt_attr", &iscsi_stat_tgt_attr_cit); - config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group, + config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_login_stats_group, "iscsi_login_stats", &iscsi_stat_login_cit); - config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group, + config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_logout_stats_group, "iscsi_logout_stats", &iscsi_stat_logout_cit); pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn); @@ -1784,6 +1796,11 @@ static int lio_queue_status(struct se_cmd *se_cmd) struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); cmd->i_state = ISTATE_SEND_STATUS; + + if (cmd->se_cmd.scsi_status || cmd->sense_reason) { + iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); + return 0; + } cmd->conn->conn_transport->iscsit_queue_status(cmd->conn, cmd); return 0; @@ -1815,21 +1832,21 @@ static u32 lio_tpg_get_default_depth(struct se_portal_group *se_tpg) { struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; - return ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth; + return tpg->tpg_attrib.default_cmdsn_depth; } static int lio_tpg_check_demo_mode(struct se_portal_group *se_tpg) { struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; - return ISCSI_TPG_ATTRIB(tpg)->generate_node_acls; + return tpg->tpg_attrib.generate_node_acls; } static int lio_tpg_check_demo_mode_cache(struct se_portal_group *se_tpg) { struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; - return ISCSI_TPG_ATTRIB(tpg)->cache_dynamic_acls; + return tpg->tpg_attrib.cache_dynamic_acls; } static int lio_tpg_check_demo_mode_write_protect( @@ -1837,7 +1854,7 @@ static int lio_tpg_check_demo_mode_write_protect( { struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; - return ISCSI_TPG_ATTRIB(tpg)->demo_mode_write_protect; + return tpg->tpg_attrib.demo_mode_write_protect; } static int lio_tpg_check_prod_mode_write_protect( @@ -1845,7 +1862,7 @@ static int lio_tpg_check_prod_mode_write_protect( { struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; - return ISCSI_TPG_ATTRIB(tpg)->prod_mode_write_protect; + return tpg->tpg_attrib.prod_mode_write_protect; } static void lio_tpg_release_fabric_acl( @@ -1908,9 +1925,12 @@ static void lio_set_default_node_attributes(struct se_node_acl *se_acl) { struct iscsi_node_acl *acl = container_of(se_acl, struct iscsi_node_acl, se_node_acl); + struct se_portal_group *se_tpg = se_acl->se_tpg; + struct iscsi_portal_group *tpg = container_of(se_tpg, + struct iscsi_portal_group, tpg_se_tpg); - ISCSI_NODE_ATTRIB(acl)->nacl = acl; - iscsit_set_default_node_attribues(acl); + acl->node_attrib.nacl = acl; + iscsit_set_default_node_attribues(acl, tpg); } static int lio_check_stop_free(struct se_cmd *se_cmd) @@ -1995,17 +2015,17 @@ int iscsi_target_register_configfs(void) * Setup default attribute lists for various fabric->tf_cit_tmpl * sturct config_item_type's */ - TF_CIT_TMPL(fabric)->tfc_discovery_cit.ct_attrs = lio_target_discovery_auth_attrs; - TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_auth_cit.ct_attrs = lio_target_tpg_auth_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = lio_target_nacl_attrib_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = lio_target_nacl_auth_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = lio_target_nacl_param_attrs; + fabric->tf_cit_tmpl.tfc_discovery_cit.ct_attrs = lio_target_discovery_auth_attrs; + fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs; + fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs; + fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs; + fabric->tf_cit_tmpl.tfc_tpg_auth_cit.ct_attrs = lio_target_tpg_auth_attrs; + fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs; + fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs; + fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs; + fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = lio_target_nacl_attrib_attrs; + fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = lio_target_nacl_auth_attrs; + fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = lio_target_nacl_param_attrs; ret = target_fabric_configfs_register(fabric); if (ret < 0) { diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h index 9a5721b8ff96..48f7b3bf4e8c 100644 --- a/drivers/target/iscsi/iscsi_target_core.h +++ b/drivers/target/iscsi/iscsi_target_core.h @@ -37,9 +37,6 @@ #define NA_RANDOM_DATAIN_PDU_OFFSETS 0 #define NA_RANDOM_DATAIN_SEQ_OFFSETS 0 #define NA_RANDOM_R2T_OFFSETS 0 -#define NA_DEFAULT_ERL 0 -#define NA_DEFAULT_ERL_MAX 2 -#define NA_DEFAULT_ERL_MIN 0 /* struct iscsi_tpg_attrib sanity values */ #define TA_AUTHENTICATION 1 @@ -58,6 +55,8 @@ #define TA_DEMO_MODE_WRITE_PROTECT 1 /* Disabled by default in production mode w/ explict ACLs */ #define TA_PROD_MODE_WRITE_PROTECT 0 +#define TA_DEMO_MODE_DISCOVERY 1 +#define TA_DEFAULT_ERL 0 #define TA_CACHE_CORE_NPS 0 @@ -192,6 +191,7 @@ enum recover_cmdsn_ret_table { CMDSN_NORMAL_OPERATION = 0, CMDSN_LOWER_THAN_EXP = 1, CMDSN_HIGHER_THAN_EXP = 2, + CMDSN_MAXCMDSN_OVERRUN = 3, }; /* Used for iscsi_handle_immediate_data() return values */ @@ -650,14 +650,13 @@ struct iscsi_session { /* Used for session reference counting */ int session_usage_count; int session_waiting_on_uc; - u32 cmd_pdus; - u32 rsp_pdus; - u64 tx_data_octets; - u64 rx_data_octets; - u32 conn_digest_errors; - u32 conn_timeout_errors; + atomic_long_t cmd_pdus; + atomic_long_t rsp_pdus; + atomic_long_t tx_data_octets; + atomic_long_t rx_data_octets; + atomic_long_t conn_digest_errors; + atomic_long_t conn_timeout_errors; u64 creation_time; - spinlock_t session_stats_lock; /* Number of active connections */ atomic_t nconn; atomic_t session_continuation; @@ -755,11 +754,6 @@ struct iscsi_node_acl { struct se_node_acl se_node_acl; }; -#define NODE_STAT_GRPS(nacl) (&(nacl)->node_stat_grps) - -#define ISCSI_NODE_ATTRIB(t) (&(t)->node_attrib) -#define ISCSI_NODE_AUTH(t) (&(t)->node_auth) - struct iscsi_tpg_attrib { u32 authentication; u32 login_timeout; @@ -769,6 +763,8 @@ struct iscsi_tpg_attrib { u32 default_cmdsn_depth; u32 demo_mode_write_protect; u32 prod_mode_write_protect; + u32 demo_mode_discovery; + u32 default_erl; struct iscsi_portal_group *tpg; }; @@ -835,12 +831,6 @@ struct iscsi_portal_group { struct list_head tpg_list; } ____cacheline_aligned; -#define ISCSI_TPG_C(c) ((struct iscsi_portal_group *)(c)->tpg) -#define ISCSI_TPG_LUN(c, l) ((iscsi_tpg_list_t *)(c)->tpg->tpg_lun_list_t[l]) -#define ISCSI_TPG_S(s) ((struct iscsi_portal_group *)(s)->tpg) -#define ISCSI_TPG_ATTRIB(t) (&(t)->tpg_attrib) -#define SE_TPG(tpg) (&(tpg)->tpg_se_tpg) - struct iscsi_wwn_stat_grps { struct config_group iscsi_stat_group; struct config_group iscsi_instance_group; @@ -871,8 +861,6 @@ struct iscsi_tiqn { struct iscsi_logout_stats logout_stats; } ____cacheline_aligned; -#define WWN_STAT_GRPS(tiqn) (&(tiqn)->tiqn_stat_grps) - struct iscsit_global { /* In core shutdown */ u32 in_shutdown; diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c index 6c7a5104a4cd..7087c736daa5 100644 --- a/drivers/target/iscsi/iscsi_target_device.c +++ b/drivers/target/iscsi/iscsi_target_device.c @@ -58,11 +58,7 @@ void iscsit_increment_maxcmdsn(struct iscsi_cmd *cmd, struct iscsi_session *sess cmd->maxcmdsn_inc = 1; - if (!mutex_trylock(&sess->cmdsn_mutex)) { - sess->max_cmd_sn += 1; - pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn); - return; - } + mutex_lock(&sess->cmdsn_mutex); sess->max_cmd_sn += 1; pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn); mutex_unlock(&sess->cmdsn_mutex); diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c index 41052e512d92..0d1e6ee3e992 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.c +++ b/drivers/target/iscsi/iscsi_target_erl0.c @@ -757,7 +757,7 @@ int iscsit_check_post_dataout( static void iscsit_handle_time2retain_timeout(unsigned long data) { struct iscsi_session *sess = (struct iscsi_session *) data; - struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); + struct iscsi_portal_group *tpg = sess->tpg; struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; spin_lock_bh(&se_tpg->session_lock); @@ -785,7 +785,7 @@ static void iscsit_handle_time2retain_timeout(unsigned long data) tiqn->sess_err_stats.last_sess_failure_type = ISCSI_SESS_ERR_CXN_TIMEOUT; tiqn->sess_err_stats.cxn_timeout_errors++; - sess->conn_timeout_errors++; + atomic_long_inc(&sess->conn_timeout_errors); spin_unlock(&tiqn->sess_err_stats.lock); } } @@ -801,9 +801,9 @@ void iscsit_start_time2retain_handler(struct iscsi_session *sess) * Only start Time2Retain timer when the associated TPG is still in * an ACTIVE (eg: not disabled or shutdown) state. */ - spin_lock(&ISCSI_TPG_S(sess)->tpg_state_lock); - tpg_active = (ISCSI_TPG_S(sess)->tpg_state == TPG_STATE_ACTIVE); - spin_unlock(&ISCSI_TPG_S(sess)->tpg_state_lock); + spin_lock(&sess->tpg->tpg_state_lock); + tpg_active = (sess->tpg->tpg_state == TPG_STATE_ACTIVE); + spin_unlock(&sess->tpg->tpg_state_lock); if (!tpg_active) return; @@ -829,7 +829,7 @@ void iscsit_start_time2retain_handler(struct iscsi_session *sess) */ int iscsit_stop_time2retain_timer(struct iscsi_session *sess) { - struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); + struct iscsi_portal_group *tpg = sess->tpg; struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; if (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED) diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 1794c753954a..4eb93b2b6473 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -305,7 +305,6 @@ static int iscsi_login_zero_tsih_s1( } sess->creation_time = get_jiffies_64(); - spin_lock_init(&sess->session_stats_lock); /* * The FFP CmdSN window values will be allocated from the TPG's * Initiator Node's ACL once the login has been successfully completed. @@ -347,15 +346,15 @@ static int iscsi_login_zero_tsih_s2( * Assign a new TPG Session Handle. Note this is protected with * struct iscsi_portal_group->np_login_sem from iscsit_access_np(). */ - sess->tsih = ++ISCSI_TPG_S(sess)->ntsih; + sess->tsih = ++sess->tpg->ntsih; if (!sess->tsih) - sess->tsih = ++ISCSI_TPG_S(sess)->ntsih; + sess->tsih = ++sess->tpg->ntsih; /* * Create the default params from user defined values.. */ if (iscsi_copy_param_list(&conn->param_list, - ISCSI_TPG_C(conn)->param_list, 1) < 0) { + conn->tpg->param_list, 1) < 0) { iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ISCSI_LOGIN_STATUS_NO_RESOURCES); return -1; @@ -380,7 +379,7 @@ static int iscsi_login_zero_tsih_s2( * In our case, we have already located the struct iscsi_tiqn at this point. */ memset(buf, 0, 32); - sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt); + sprintf(buf, "TargetPortalGroupTag=%hu", sess->tpg->tpgt); if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ISCSI_LOGIN_STATUS_NO_RESOURCES); @@ -575,7 +574,7 @@ static int iscsi_login_non_zero_tsih_s2( iscsi_login_set_conn_values(sess, conn, pdu->cid); if (iscsi_copy_param_list(&conn->param_list, - ISCSI_TPG_C(conn)->param_list, 0) < 0) { + conn->tpg->param_list, 0) < 0) { iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ISCSI_LOGIN_STATUS_NO_RESOURCES); return -1; @@ -593,7 +592,7 @@ static int iscsi_login_non_zero_tsih_s2( * In our case, we have already located the struct iscsi_tiqn at this point. */ memset(buf, 0, 32); - sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt); + sprintf(buf, "TargetPortalGroupTag=%hu", sess->tpg->tpgt); if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ISCSI_LOGIN_STATUS_NO_RESOURCES); @@ -691,7 +690,7 @@ int iscsi_post_login_handler( int stop_timer = 0; struct iscsi_session *sess = conn->sess; struct se_session *se_sess = sess->se_sess; - struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); + struct iscsi_portal_group *tpg = sess->tpg; struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; struct iscsi_thread_set *ts; @@ -1154,7 +1153,7 @@ old_sess_out: spin_lock_bh(&conn->sess->conn_lock); if (conn->sess->session_state == TARG_SESS_STATE_FAILED) { struct se_portal_group *se_tpg = - &ISCSI_TPG_C(conn)->tpg_se_tpg; + &conn->tpg->tpg_se_tpg; atomic_set(&conn->sess->session_continuation, 0); spin_unlock_bh(&conn->sess->conn_lock); diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index ef6d836a4d09..83c965c65386 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -88,7 +88,7 @@ int extract_param( if (len < 0) return -1; - if (len > max_length) { + if (len >= max_length) { pr_err("Length of input: %d exceeds max_length:" " %d\n", len, max_length); return -1; @@ -140,7 +140,7 @@ static u32 iscsi_handle_authentication( iscsi_nacl = container_of(se_nacl, struct iscsi_node_acl, se_node_acl); - auth = ISCSI_NODE_AUTH(iscsi_nacl); + auth = &iscsi_nacl->node_auth; } } else { /* @@ -789,7 +789,7 @@ static int iscsi_target_handle_csg_zero( return -1; if (!iscsi_check_negotiated_keys(conn->param_list)) { - if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication && + if (conn->tpg->tpg_attrib.authentication && !strncmp(param->value, NONE, 4)) { pr_err("Initiator sent AuthMethod=None but" " Target is enforcing iSCSI Authentication," @@ -799,7 +799,7 @@ static int iscsi_target_handle_csg_zero( return -1; } - if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication && + if (conn->tpg->tpg_attrib.authentication && !login->auth_complete) return 0; @@ -862,7 +862,7 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log } if (!login->auth_complete && - ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication) { + conn->tpg->tpg_attrib.authentication) { pr_err("Initiator is requesting CSG: 1, has not been" " successfully authenticated, and the Target is" " enforcing iSCSI Authentication, login failed.\n"); diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.c b/drivers/target/iscsi/iscsi_target_nodeattrib.c index 93bdc475eb00..16454a922e2b 100644 --- a/drivers/target/iscsi/iscsi_target_nodeattrib.c +++ b/drivers/target/iscsi/iscsi_target_nodeattrib.c @@ -33,7 +33,8 @@ static inline char *iscsit_na_get_initiatorname( } void iscsit_set_default_node_attribues( - struct iscsi_node_acl *acl) + struct iscsi_node_acl *acl, + struct iscsi_portal_group *tpg) { struct iscsi_node_attrib *a = &acl->node_attrib; @@ -44,7 +45,7 @@ void iscsit_set_default_node_attribues( a->random_datain_pdu_offsets = NA_RANDOM_DATAIN_PDU_OFFSETS; a->random_datain_seq_offsets = NA_RANDOM_DATAIN_SEQ_OFFSETS; a->random_r2t_offsets = NA_RANDOM_R2T_OFFSETS; - a->default_erl = NA_DEFAULT_ERL; + a->default_erl = tpg->tpg_attrib.default_erl; } int iscsit_na_dataout_timeout( diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.h b/drivers/target/iscsi/iscsi_target_nodeattrib.h index c970b326ef23..0c69a46a62ec 100644 --- a/drivers/target/iscsi/iscsi_target_nodeattrib.h +++ b/drivers/target/iscsi/iscsi_target_nodeattrib.h @@ -1,7 +1,8 @@ #ifndef ISCSI_TARGET_NODEATTRIB_H #define ISCSI_TARGET_NODEATTRIB_H -extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *); +extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *, + struct iscsi_portal_group *); extern int iscsit_na_dataout_timeout(struct iscsi_node_acl *, u32); extern int iscsit_na_dataout_timeout_retries(struct iscsi_node_acl *, u32); extern int iscsit_na_nopin_timeout(struct iscsi_node_acl *, u32); diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c index f788e8b5e855..103395510307 100644 --- a/drivers/target/iscsi/iscsi_target_stat.c +++ b/drivers/target/iscsi/iscsi_target_stat.c @@ -792,7 +792,8 @@ static ssize_t iscsi_stat_sess_show_attr_cmd_pdus( if (se_sess) { sess = se_sess->fabric_sess_ptr; if (sess) - ret = snprintf(page, PAGE_SIZE, "%u\n", sess->cmd_pdus); + ret = snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&sess->cmd_pdus)); } spin_unlock_bh(&se_nacl->nacl_sess_lock); @@ -815,7 +816,8 @@ static ssize_t iscsi_stat_sess_show_attr_rsp_pdus( if (se_sess) { sess = se_sess->fabric_sess_ptr; if (sess) - ret = snprintf(page, PAGE_SIZE, "%u\n", sess->rsp_pdus); + ret = snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&sess->rsp_pdus)); } spin_unlock_bh(&se_nacl->nacl_sess_lock); @@ -838,8 +840,8 @@ static ssize_t iscsi_stat_sess_show_attr_txdata_octs( if (se_sess) { sess = se_sess->fabric_sess_ptr; if (sess) - ret = snprintf(page, PAGE_SIZE, "%llu\n", - (unsigned long long)sess->tx_data_octets); + ret = snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&sess->tx_data_octets)); } spin_unlock_bh(&se_nacl->nacl_sess_lock); @@ -862,8 +864,8 @@ static ssize_t iscsi_stat_sess_show_attr_rxdata_octs( if (se_sess) { sess = se_sess->fabric_sess_ptr; if (sess) - ret = snprintf(page, PAGE_SIZE, "%llu\n", - (unsigned long long)sess->rx_data_octets); + ret = snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&sess->rx_data_octets)); } spin_unlock_bh(&se_nacl->nacl_sess_lock); @@ -886,8 +888,8 @@ static ssize_t iscsi_stat_sess_show_attr_conn_digest_errors( if (se_sess) { sess = se_sess->fabric_sess_ptr; if (sess) - ret = snprintf(page, PAGE_SIZE, "%u\n", - sess->conn_digest_errors); + ret = snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&sess->conn_digest_errors)); } spin_unlock_bh(&se_nacl->nacl_sess_lock); @@ -910,8 +912,8 @@ static ssize_t iscsi_stat_sess_show_attr_conn_timeout_errors( if (se_sess) { sess = se_sess->fabric_sess_ptr; if (sess) - ret = snprintf(page, PAGE_SIZE, "%u\n", - sess->conn_timeout_errors); + ret = snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&sess->conn_timeout_errors)); } spin_unlock_bh(&se_nacl->nacl_sess_lock); diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c index 4faeb47fa5e1..39761837608d 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.c +++ b/drivers/target/iscsi/iscsi_target_tpg.c @@ -223,6 +223,8 @@ static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg) a->cache_dynamic_acls = TA_CACHE_DYNAMIC_ACLS; a->demo_mode_write_protect = TA_DEMO_MODE_WRITE_PROTECT; a->prod_mode_write_protect = TA_PROD_MODE_WRITE_PROTECT; + a->demo_mode_discovery = TA_DEMO_MODE_DISCOVERY; + a->default_erl = TA_DEFAULT_ERL; } int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg) @@ -237,7 +239,7 @@ int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_gro if (iscsi_create_default_params(&tpg->param_list) < 0) goto err_out; - ISCSI_TPG_ATTRIB(tpg)->tpg = tpg; + tpg->tpg_attrib.tpg = tpg; spin_lock(&tpg->tpg_state_lock); tpg->tpg_state = TPG_STATE_INACTIVE; @@ -330,7 +332,7 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg) return -EINVAL; } - if (ISCSI_TPG_ATTRIB(tpg)->authentication) { + if (tpg->tpg_attrib.authentication) { if (!strcmp(param->value, NONE)) { ret = iscsi_update_param_value(param, CHAP); if (ret) @@ -820,3 +822,39 @@ int iscsit_ta_prod_mode_write_protect( return 0; } + +int iscsit_ta_demo_mode_discovery( + struct iscsi_portal_group *tpg, + u32 flag) +{ + struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; + + if ((flag != 0) && (flag != 1)) { + pr_err("Illegal value %d\n", flag); + return -EINVAL; + } + + a->demo_mode_discovery = flag; + pr_debug("iSCSI_TPG[%hu] - Demo Mode Discovery bit:" + " %s\n", tpg->tpgt, (a->demo_mode_discovery) ? + "ON" : "OFF"); + + return 0; +} + +int iscsit_ta_default_erl( + struct iscsi_portal_group *tpg, + u32 default_erl) +{ + struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; + + if ((default_erl != 0) && (default_erl != 1) && (default_erl != 2)) { + pr_err("Illegal value for default_erl: %u\n", default_erl); + return -EINVAL; + } + + a->default_erl = default_erl; + pr_debug("iSCSI_TPG[%hu] - DefaultERL: %u\n", tpg->tpgt, a->default_erl); + + return 0; +} diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h index b77693e2c209..213c0fc7fdc9 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.h +++ b/drivers/target/iscsi/iscsi_target_tpg.h @@ -37,5 +37,7 @@ extern int iscsit_ta_default_cmdsn_depth(struct iscsi_portal_group *, u32); extern int iscsit_ta_cache_dynamic_acls(struct iscsi_portal_group *, u32); extern int iscsit_ta_demo_mode_write_protect(struct iscsi_portal_group *, u32); extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32); +extern int iscsit_ta_demo_mode_discovery(struct iscsi_portal_group *, u32); +extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32); #endif /* ISCSI_TARGET_TPG_H */ diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index b0cac0c342e1..0819e688a398 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -242,9 +242,9 @@ static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cm */ if (iscsi_sna_gt(cmdsn, sess->max_cmd_sn)) { pr_err("Received CmdSN: 0x%08x is greater than" - " MaxCmdSN: 0x%08x, protocol error.\n", cmdsn, + " MaxCmdSN: 0x%08x, ignoring.\n", cmdsn, sess->max_cmd_sn); - ret = CMDSN_ERROR_CANNOT_RECOVER; + ret = CMDSN_MAXCMDSN_OVERRUN; } else if (cmdsn == sess->exp_cmd_sn) { sess->exp_cmd_sn++; @@ -303,14 +303,16 @@ int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, ret = CMDSN_HIGHER_THAN_EXP; break; case CMDSN_LOWER_THAN_EXP: + case CMDSN_MAXCMDSN_OVERRUN: + default: cmd->i_state = ISTATE_REMOVE; iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state); - ret = cmdsn_ret; - break; - default: - reason = ISCSI_REASON_PROTOCOL_ERROR; - reject = true; - ret = cmdsn_ret; + /* + * Existing callers for iscsit_sequence_cmd() will silently + * ignore commands with CMDSN_LOWER_THAN_EXP, so force this + * return for CMDSN_MAXCMDSN_OVERRUN as well.. + */ + ret = CMDSN_LOWER_THAN_EXP; break; } mutex_unlock(&conn->sess->cmdsn_mutex); @@ -980,7 +982,7 @@ static void iscsit_handle_nopin_response_timeout(unsigned long data) tiqn->sess_err_stats.last_sess_failure_type = ISCSI_SESS_ERR_CXN_TIMEOUT; tiqn->sess_err_stats.cxn_timeout_errors++; - conn->sess->conn_timeout_errors++; + atomic_long_inc(&conn->sess->conn_timeout_errors); spin_unlock_bh(&tiqn->sess_err_stats.lock); } } diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 0f6d69dabca1..1b41e6776152 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -135,6 +135,21 @@ static int tcm_loop_change_queue_depth( return sdev->queue_depth; } +static int tcm_loop_change_queue_type(struct scsi_device *sdev, int tag) +{ + if (sdev->tagged_supported) { + scsi_set_tag_type(sdev, tag); + + if (tag) + scsi_activate_tcq(sdev, sdev->queue_depth); + else + scsi_deactivate_tcq(sdev, sdev->queue_depth); + } else + tag = 0; + + return tag; +} + /* * Locate the SAM Task Attr from struct scsi_cmnd * */ @@ -178,7 +193,10 @@ static void tcm_loop_submission_work(struct work_struct *work) set_host_byte(sc, DID_NO_CONNECT); goto out_done; } - + if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) { + set_host_byte(sc, DID_TRANSPORT_DISRUPTED); + goto out_done; + } tl_nexus = tl_hba->tl_nexus; if (!tl_nexus) { scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus" @@ -233,6 +251,7 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) } tl_cmd->sc = sc; + tl_cmd->sc_cmd_tag = sc->tag; INIT_WORK(&tl_cmd->work, tcm_loop_submission_work); queue_work(tcm_loop_workqueue, &tl_cmd->work); return 0; @@ -242,41 +261,21 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) * Called from SCSI EH process context to issue a LUN_RESET TMR * to struct scsi_device */ -static int tcm_loop_device_reset(struct scsi_cmnd *sc) +static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg, + struct tcm_loop_nexus *tl_nexus, + int lun, int task, enum tcm_tmreq_table tmr) { struct se_cmd *se_cmd = NULL; - struct se_portal_group *se_tpg; struct se_session *se_sess; + struct se_portal_group *se_tpg; struct tcm_loop_cmd *tl_cmd = NULL; - struct tcm_loop_hba *tl_hba; - struct tcm_loop_nexus *tl_nexus; struct tcm_loop_tmr *tl_tmr = NULL; - struct tcm_loop_tpg *tl_tpg; - int ret = FAILED, rc; - /* - * Locate the tcm_loop_hba_t pointer - */ - tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); - /* - * Locate the tl_nexus and se_sess pointers - */ - tl_nexus = tl_hba->tl_nexus; - if (!tl_nexus) { - pr_err("Unable to perform device reset without" - " active I_T Nexus\n"); - return FAILED; - } - se_sess = tl_nexus->se_sess; - /* - * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id - */ - tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; - se_tpg = &tl_tpg->tl_se_tpg; + int ret = TMR_FUNCTION_FAILED, rc; tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL); if (!tl_cmd) { pr_err("Unable to allocate memory for tl_cmd\n"); - return FAILED; + return ret; } tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL); @@ -287,6 +286,8 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) init_waitqueue_head(&tl_tmr->tl_tmr_wait); se_cmd = &tl_cmd->tl_se_cmd; + se_tpg = &tl_tpg->tl_se_tpg; + se_sess = tl_nexus->se_sess; /* * Initialize struct se_cmd descriptor from target_core_mod infrastructure */ @@ -294,17 +295,23 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) DMA_NONE, MSG_SIMPLE_TAG, &tl_cmd->tl_sense_buf[0]); - rc = core_tmr_alloc_req(se_cmd, tl_tmr, TMR_LUN_RESET, GFP_KERNEL); + rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL); if (rc < 0) goto release; + + if (tmr == TMR_ABORT_TASK) + se_cmd->se_tmr_req->ref_task_tag = task; + /* - * Locate the underlying TCM struct se_lun from sc->device->lun + * Locate the underlying TCM struct se_lun */ - if (transport_lookup_tmr_lun(se_cmd, sc->device->lun) < 0) + if (transport_lookup_tmr_lun(se_cmd, lun) < 0) { + ret = TMR_LUN_DOES_NOT_EXIST; goto release; + } /* - * Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp() - * to wake us up. + * Queue the TMR to TCM Core and sleep waiting for + * tcm_loop_queue_tm_rsp() to wake us up. */ transport_generic_handle_tmr(se_cmd); wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete)); @@ -312,8 +319,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) * The TMR LUN_RESET has completed, check the response status and * then release allocations. */ - ret = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ? - SUCCESS : FAILED; + ret = se_cmd->se_tmr_req->response; release: if (se_cmd) transport_generic_free_cmd(se_cmd, 1); @@ -323,6 +329,94 @@ release: return ret; } +static int tcm_loop_abort_task(struct scsi_cmnd *sc) +{ + struct tcm_loop_hba *tl_hba; + struct tcm_loop_nexus *tl_nexus; + struct tcm_loop_tpg *tl_tpg; + int ret = FAILED; + + /* + * Locate the tcm_loop_hba_t pointer + */ + tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); + /* + * Locate the tl_nexus and se_sess pointers + */ + tl_nexus = tl_hba->tl_nexus; + if (!tl_nexus) { + pr_err("Unable to perform device reset without" + " active I_T Nexus\n"); + return FAILED; + } + + /* + * Locate the tl_tpg pointer from TargetID in sc->device->id + */ + tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; + ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun, + sc->tag, TMR_ABORT_TASK); + return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; +} + +/* + * Called from SCSI EH process context to issue a LUN_RESET TMR + * to struct scsi_device + */ +static int tcm_loop_device_reset(struct scsi_cmnd *sc) +{ + struct tcm_loop_hba *tl_hba; + struct tcm_loop_nexus *tl_nexus; + struct tcm_loop_tpg *tl_tpg; + int ret = FAILED; + + /* + * Locate the tcm_loop_hba_t pointer + */ + tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); + /* + * Locate the tl_nexus and se_sess pointers + */ + tl_nexus = tl_hba->tl_nexus; + if (!tl_nexus) { + pr_err("Unable to perform device reset without" + " active I_T Nexus\n"); + return FAILED; + } + /* + * Locate the tl_tpg pointer from TargetID in sc->device->id + */ + tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; + ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun, + 0, TMR_LUN_RESET); + return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; +} + +static int tcm_loop_target_reset(struct scsi_cmnd *sc) +{ + struct tcm_loop_hba *tl_hba; + struct tcm_loop_tpg *tl_tpg; + + /* + * Locate the tcm_loop_hba_t pointer + */ + tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); + if (!tl_hba) { + pr_err("Unable to perform device reset without" + " active I_T Nexus\n"); + return FAILED; + } + /* + * Locate the tl_tpg pointer from TargetID in sc->device->id + */ + tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; + if (tl_tpg) { + tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE; + return SUCCESS; + } + return FAILED; +} + static int tcm_loop_slave_alloc(struct scsi_device *sd) { set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags); @@ -331,6 +425,15 @@ static int tcm_loop_slave_alloc(struct scsi_device *sd) static int tcm_loop_slave_configure(struct scsi_device *sd) { + if (sd->tagged_supported) { + scsi_activate_tcq(sd, sd->queue_depth); + scsi_adjust_queue_depth(sd, MSG_SIMPLE_TAG, + sd->host->cmd_per_lun); + } else { + scsi_adjust_queue_depth(sd, 0, + sd->host->cmd_per_lun); + } + return 0; } @@ -340,7 +443,10 @@ static struct scsi_host_template tcm_loop_driver_template = { .name = "TCM_Loopback", .queuecommand = tcm_loop_queuecommand, .change_queue_depth = tcm_loop_change_queue_depth, + .change_queue_type = tcm_loop_change_queue_type, + .eh_abort_handler = tcm_loop_abort_task, .eh_device_reset_handler = tcm_loop_device_reset, + .eh_target_reset_handler = tcm_loop_target_reset, .can_queue = 1024, .this_id = -1, .sg_tablesize = 256, @@ -699,7 +805,10 @@ static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl) static u32 tcm_loop_get_task_tag(struct se_cmd *se_cmd) { - return 1; + struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, + struct tcm_loop_cmd, tl_se_cmd); + + return tl_cmd->sc_cmd_tag; } static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd) @@ -932,7 +1041,10 @@ static int tcm_loop_drop_nexus( struct tcm_loop_nexus *tl_nexus; struct tcm_loop_hba *tl_hba = tpg->tl_hba; - tl_nexus = tpg->tl_hba->tl_nexus; + if (!tl_hba) + return -ENODEV; + + tl_nexus = tl_hba->tl_nexus; if (!tl_nexus) return -ENODEV; @@ -1061,8 +1173,56 @@ check_newline: TF_TPG_BASE_ATTR(tcm_loop, nexus, S_IRUGO | S_IWUSR); +static ssize_t tcm_loop_tpg_show_transport_status( + struct se_portal_group *se_tpg, + char *page) +{ + struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, + struct tcm_loop_tpg, tl_se_tpg); + const char *status = NULL; + ssize_t ret = -EINVAL; + + switch (tl_tpg->tl_transport_status) { + case TCM_TRANSPORT_ONLINE: + status = "online"; + break; + case TCM_TRANSPORT_OFFLINE: + status = "offline"; + break; + default: + break; + } + + if (status) + ret = snprintf(page, PAGE_SIZE, "%s\n", status); + + return ret; +} + +static ssize_t tcm_loop_tpg_store_transport_status( + struct se_portal_group *se_tpg, + const char *page, + size_t count) +{ + struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, + struct tcm_loop_tpg, tl_se_tpg); + + if (!strncmp(page, "online", 6)) { + tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE; + return count; + } + if (!strncmp(page, "offline", 7)) { + tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE; + return count; + } + return -EINVAL; +} + +TF_TPG_BASE_ATTR(tcm_loop, transport_status, S_IRUGO | S_IWUSR); + static struct configfs_attribute *tcm_loop_tpg_attrs[] = { &tcm_loop_tpg_nexus.attr, + &tcm_loop_tpg_transport_status.attr, NULL, }; @@ -1334,11 +1494,11 @@ static int tcm_loop_register_configfs(void) /* * Setup default attribute lists for various fabric->tf_cit_tmpl */ - TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs; + fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs; + fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; /* * Once fabric->tf_ops has been setup, now register the fabric for * use within TCM diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h index dd7a84ee78e1..54c59d0b6608 100644 --- a/drivers/target/loopback/tcm_loop.h +++ b/drivers/target/loopback/tcm_loop.h @@ -10,6 +10,8 @@ struct tcm_loop_cmd { /* State of Linux/SCSI CDB+Data descriptor */ u32 sc_cmd_state; + /* Tagged command queueing */ + u32 sc_cmd_tag; /* Pointer to the CDB+Data descriptor from Linux/SCSI subsystem */ struct scsi_cmnd *sc; /* The TCM I/O descriptor that is accessed via container_of() */ @@ -40,8 +42,12 @@ struct tcm_loop_nacl { struct se_node_acl se_node_acl; }; +#define TCM_TRANSPORT_ONLINE 0 +#define TCM_TRANSPORT_OFFLINE 1 + struct tcm_loop_tpg { unsigned short tl_tpgt; + unsigned short tl_transport_status; atomic_t tl_tpg_port_count; struct se_portal_group tl_se_tpg; struct tcm_loop_hba *tl_hba; diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c index e51b09a04d52..24884cac19ce 100644 --- a/drivers/target/sbp/sbp_target.c +++ b/drivers/target/sbp/sbp_target.c @@ -2556,15 +2556,15 @@ static int sbp_register_configfs(void) /* * Setup default attribute lists for various fabric->tf_cit_tmpl */ - TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = sbp_wwn_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = sbp_tpg_base_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = sbp_tpg_attrib_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = sbp_wwn_attrs; + fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = sbp_tpg_base_attrs; + fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = sbp_tpg_attrib_attrs; + fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; ret = target_fabric_configfs_register(fabric); if (ret < 0) { diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 47244102281e..fdcee326bfbc 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -44,7 +44,7 @@ static sense_reason_t core_alua_check_transition(int state, int *primary); static int core_alua_set_tg_pt_secondary_state( struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, - struct se_port *port, int explict, int offline); + struct se_port *port, int explicit, int offline); static u16 alua_lu_gps_counter; static u32 alua_lu_gps_count; @@ -117,12 +117,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd) /* * Set supported ASYMMETRIC ACCESS State bits */ - buf[off] = 0x80; /* T_SUP */ - buf[off] |= 0x40; /* O_SUP */ - buf[off] |= 0x8; /* U_SUP */ - buf[off] |= 0x4; /* S_SUP */ - buf[off] |= 0x2; /* AN_SUP */ - buf[off++] |= 0x1; /* AO_SUP */ + buf[off++] |= tg_pt_gp->tg_pt_gp_alua_supported_states; /* * TARGET PORT GROUP */ @@ -175,7 +170,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd) if (ext_hdr != 0) { buf[4] = 0x10; /* - * Set the implict transition time (in seconds) for the application + * Set the implicit transition time (in seconds) for the application * client to use as a base for it's transition timeout value. * * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN @@ -188,7 +183,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd) spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; if (tg_pt_gp) - buf[5] = tg_pt_gp->tg_pt_gp_implict_trans_secs; + buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs; spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); } } @@ -199,7 +194,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd) } /* - * SET_TARGET_PORT_GROUPS for explict ALUA operation. + * SET_TARGET_PORT_GROUPS for explicit ALUA operation. * * See spc4r17 section 6.35 */ @@ -232,7 +227,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; /* - * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed + * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed * for the local tg_pt_gp. */ l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem; @@ -251,9 +246,9 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) } spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); - if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)) { + if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) { pr_debug("Unable to process SET_TARGET_PORT_GROUPS" - " while TPGS_EXPLICT_ALUA is disabled\n"); + " while TPGS_EXPLICIT_ALUA is disabled\n"); rc = TCM_UNSUPPORTED_SCSI_OPCODE; goto out; } @@ -330,7 +325,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) spin_unlock(&dev->t10_alua.tg_pt_gps_lock); } else { /* - * Extact the RELATIVE TARGET PORT IDENTIFIER to identify + * Extract the RELATIVE TARGET PORT IDENTIFIER to identify * the Target Port in question for the the incoming * SET_TARGET_PORT_GROUPS op. */ @@ -487,7 +482,7 @@ static inline int core_alua_state_transition( u8 *alua_ascq) { /* - * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITIO as defined by + * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by * spc4r17 section 5.9.2.5 */ switch (cdb[0]) { @@ -515,9 +510,9 @@ static inline int core_alua_state_transition( } /* - * return 1: Is used to signal LUN not accecsable, and check condition/not ready + * return 1: Is used to signal LUN not accessible, and check condition/not ready * return 0: Used to signal success - * reutrn -1: Used to signal failure, and invalid cdb field + * return -1: Used to signal failure, and invalid cdb field */ sense_reason_t target_alua_state_check(struct se_cmd *cmd) @@ -566,12 +561,12 @@ target_alua_state_check(struct se_cmd *cmd) nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs; spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); /* - * Process ALUA_ACCESS_STATE_ACTIVE_OPTMIZED in a separate conditional + * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional * statement so the compiler knows explicitly to check this case first. * For the Optimized ALUA access state case, we want to process the * incoming fabric cmd ASAP.. */ - if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTMIZED) + if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED) return 0; switch (out_alua_state) { @@ -620,13 +615,13 @@ out: } /* - * Check implict and explict ALUA state change request. + * Check implicit and explicit ALUA state change request. */ static sense_reason_t core_alua_check_transition(int state, int *primary) { switch (state) { - case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED: + case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED: case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: case ALUA_ACCESS_STATE_STANDBY: case ALUA_ACCESS_STATE_UNAVAILABLE: @@ -654,7 +649,7 @@ core_alua_check_transition(int state, int *primary) static char *core_alua_dump_state(int state) { switch (state) { - case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED: + case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED: return "Active/Optimized"; case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: return "Active/NonOptimized"; @@ -676,10 +671,10 @@ char *core_alua_dump_status(int status) switch (status) { case ALUA_STATUS_NONE: return "None"; - case ALUA_STATUS_ALTERED_BY_EXPLICT_STPG: - return "Altered by Explict STPG"; - case ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA: - return "Altered by Implict ALUA"; + case ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG: + return "Altered by Explicit STPG"; + case ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA: + return "Altered by Implicit ALUA"; default: return "Unknown"; } @@ -770,7 +765,7 @@ static int core_alua_do_transition_tg_pt( struct se_node_acl *nacl, unsigned char *md_buf, int new_state, - int explict) + int explicit) { struct se_dev_entry *se_deve; struct se_lun_acl *lacl; @@ -784,9 +779,9 @@ static int core_alua_do_transition_tg_pt( old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, ALUA_ACCESS_STATE_TRANSITION); - tg_pt_gp->tg_pt_gp_alua_access_status = (explict) ? - ALUA_STATUS_ALTERED_BY_EXPLICT_STPG : - ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA; + tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ? + ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : + ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; /* * Check for the optional ALUA primary state transition delay */ @@ -802,7 +797,7 @@ static int core_alua_do_transition_tg_pt( * change, a device server shall establish a unit attention * condition for the initiator port associated with every I_T * nexus with the additional sense code set to ASYMMETRIC - * ACCESS STATE CHAGED. + * ACCESS STATE CHANGED. * * After an explicit target port asymmetric access state * change, a device server shall establish a unit attention @@ -821,12 +816,12 @@ static int core_alua_do_transition_tg_pt( lacl = se_deve->se_lun_acl; /* * se_deve->se_lun_acl pointer may be NULL for a - * entry created without explict Node+MappedLUN ACLs + * entry created without explicit Node+MappedLUN ACLs */ if (!lacl) continue; - if (explict && + if (explicit && (nacl != NULL) && (nacl == lacl->se_lun_nacl) && (l_port != NULL) && (l_port == port)) continue; @@ -866,8 +861,8 @@ static int core_alua_do_transition_tg_pt( atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state); pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" - " from primary access state %s to %s\n", (explict) ? "explict" : - "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), + " from primary access state %s to %s\n", (explicit) ? "explicit" : + "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state), core_alua_dump_state(new_state)); @@ -880,7 +875,7 @@ int core_alua_do_port_transition( struct se_port *l_port, struct se_node_acl *l_nacl, int new_state, - int explict) + int explicit) { struct se_device *dev; struct se_port *port; @@ -917,7 +912,7 @@ int core_alua_do_port_transition( * success. */ core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl, - md_buf, new_state, explict); + md_buf, new_state, explicit); atomic_dec(&lu_gp->lu_gp_ref_cnt); smp_mb__after_atomic_dec(); kfree(md_buf); @@ -946,7 +941,7 @@ int core_alua_do_port_transition( continue; /* * If the target behavior port asymmetric access state - * is changed for any target port group accessiable via + * is changed for any target port group accessible via * a logical unit within a LU group, the target port * behavior group asymmetric access states for the same * target port group accessible via other logical units @@ -970,7 +965,7 @@ int core_alua_do_port_transition( * success. */ core_alua_do_transition_tg_pt(tg_pt_gp, port, - nacl, md_buf, new_state, explict); + nacl, md_buf, new_state, explicit); spin_lock(&dev->t10_alua.tg_pt_gps_lock); atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); @@ -987,7 +982,7 @@ int core_alua_do_port_transition( pr_debug("Successfully processed LU Group: %s all ALUA TG PT" " Group IDs: %hu %s transition to primary state: %s\n", config_item_name(&lu_gp->lu_gp_group.cg_item), - l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict", + l_tg_pt_gp->tg_pt_gp_id, (explicit) ? "explicit" : "implicit", core_alua_dump_state(new_state)); atomic_dec(&lu_gp->lu_gp_ref_cnt); @@ -1034,7 +1029,7 @@ static int core_alua_update_tpg_secondary_metadata( static int core_alua_set_tg_pt_secondary_state( struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, struct se_port *port, - int explict, + int explicit, int offline) { struct t10_alua_tg_pt_gp *tg_pt_gp; @@ -1061,13 +1056,13 @@ static int core_alua_set_tg_pt_secondary_state( atomic_set(&port->sep_tg_pt_secondary_offline, 0); md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len; - port->sep_tg_pt_secondary_stat = (explict) ? - ALUA_STATUS_ALTERED_BY_EXPLICT_STPG : - ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA; + port->sep_tg_pt_secondary_stat = (explicit) ? + ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : + ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" - " to secondary access state: %s\n", (explict) ? "explict" : - "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), + " to secondary access state: %s\n", (explicit) ? "explicit" : + "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE"); spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); @@ -1232,7 +1227,7 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp) * struct se_device is released via core_alua_free_lu_gp_mem(). * * If the passed lu_gp does NOT match the default_lu_gp, assume - * we want to re-assocate a given lu_gp_mem with default_lu_gp. + * we want to re-associate a given lu_gp_mem with default_lu_gp. */ spin_lock(&lu_gp_mem->lu_gp_mem_lock); if (lu_gp != default_lu_gp) @@ -1354,18 +1349,25 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev, tg_pt_gp->tg_pt_gp_dev = dev; tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN; atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, - ALUA_ACCESS_STATE_ACTIVE_OPTMIZED); + ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED); /* - * Enable both explict and implict ALUA support by default + * Enable both explicit and implicit ALUA support by default */ tg_pt_gp->tg_pt_gp_alua_access_type = - TPGS_EXPLICT_ALUA | TPGS_IMPLICT_ALUA; + TPGS_EXPLICIT_ALUA | TPGS_IMPLICIT_ALUA; /* * Set the default Active/NonOptimized Delay in milliseconds */ tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS; tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS; - tg_pt_gp->tg_pt_gp_implict_trans_secs = ALUA_DEFAULT_IMPLICT_TRANS_SECS; + tg_pt_gp->tg_pt_gp_implicit_trans_secs = ALUA_DEFAULT_IMPLICIT_TRANS_SECS; + + /* + * Enable all supported states + */ + tg_pt_gp->tg_pt_gp_alua_supported_states = + ALUA_T_SUP | ALUA_O_SUP | + ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP; if (def_group) { spin_lock(&dev->t10_alua.tg_pt_gps_lock); @@ -1465,7 +1467,7 @@ void core_alua_free_tg_pt_gp( * been called from target_core_alua_drop_tg_pt_gp(). * * Here we remove *tg_pt_gp from the global list so that - * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS + * no associations *OR* explicit ALUA via SET_TARGET_PORT_GROUPS * can be made while we are releasing struct t10_alua_tg_pt_gp. */ spin_lock(&dev->t10_alua.tg_pt_gps_lock); @@ -1501,7 +1503,7 @@ void core_alua_free_tg_pt_gp( * core_alua_free_tg_pt_gp_mem(). * * If the passed tg_pt_gp does NOT match the default_tg_pt_gp, - * assume we want to re-assocate a given tg_pt_gp_mem with + * assume we want to re-associate a given tg_pt_gp_mem with * default_tg_pt_gp. */ spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); @@ -1740,13 +1742,13 @@ ssize_t core_alua_show_access_type( struct t10_alua_tg_pt_gp *tg_pt_gp, char *page) { - if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) && - (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) - return sprintf(page, "Implict and Explict\n"); - else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA) - return sprintf(page, "Implict\n"); - else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) - return sprintf(page, "Explict\n"); + if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) && + (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) + return sprintf(page, "Implicit and Explicit\n"); + else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA) + return sprintf(page, "Implicit\n"); + else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) + return sprintf(page, "Explicit\n"); else return sprintf(page, "None\n"); } @@ -1771,11 +1773,11 @@ ssize_t core_alua_store_access_type( } if (tmp == 3) tg_pt_gp->tg_pt_gp_alua_access_type = - TPGS_IMPLICT_ALUA | TPGS_EXPLICT_ALUA; + TPGS_IMPLICIT_ALUA | TPGS_EXPLICIT_ALUA; else if (tmp == 2) - tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICT_ALUA; + tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICIT_ALUA; else if (tmp == 1) - tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICT_ALUA; + tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICIT_ALUA; else tg_pt_gp->tg_pt_gp_alua_access_type = 0; @@ -1844,14 +1846,14 @@ ssize_t core_alua_store_trans_delay_msecs( return count; } -ssize_t core_alua_show_implict_trans_secs( +ssize_t core_alua_show_implicit_trans_secs( struct t10_alua_tg_pt_gp *tg_pt_gp, char *page) { - return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implict_trans_secs); + return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs); } -ssize_t core_alua_store_implict_trans_secs( +ssize_t core_alua_store_implicit_trans_secs( struct t10_alua_tg_pt_gp *tg_pt_gp, const char *page, size_t count) @@ -1861,16 +1863,16 @@ ssize_t core_alua_store_implict_trans_secs( ret = kstrtoul(page, 0, &tmp); if (ret < 0) { - pr_err("Unable to extract implict_trans_secs\n"); + pr_err("Unable to extract implicit_trans_secs\n"); return ret; } - if (tmp > ALUA_MAX_IMPLICT_TRANS_SECS) { - pr_err("Passed implict_trans_secs: %lu, exceeds" - " ALUA_MAX_IMPLICT_TRANS_SECS: %d\n", tmp, - ALUA_MAX_IMPLICT_TRANS_SECS); + if (tmp > ALUA_MAX_IMPLICIT_TRANS_SECS) { + pr_err("Passed implicit_trans_secs: %lu, exceeds" + " ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp, + ALUA_MAX_IMPLICIT_TRANS_SECS); return -EINVAL; } - tg_pt_gp->tg_pt_gp_implict_trans_secs = (int)tmp; + tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int)tmp; return count; } @@ -1970,8 +1972,8 @@ ssize_t core_alua_store_secondary_status( return ret; } if ((tmp != ALUA_STATUS_NONE) && - (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && - (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) { + (tmp != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) && + (tmp != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) { pr_err("Illegal value for alua_tg_pt_status: %lu\n", tmp); return -EINVAL; diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h index e539c3e7f4ad..88e2e835f14a 100644 --- a/drivers/target/target_core_alua.h +++ b/drivers/target/target_core_alua.h @@ -7,15 +7,15 @@ * from spc4r17 section 6.4.2 Table 135 */ #define TPGS_NO_ALUA 0x00 -#define TPGS_IMPLICT_ALUA 0x10 -#define TPGS_EXPLICT_ALUA 0x20 +#define TPGS_IMPLICIT_ALUA 0x10 +#define TPGS_EXPLICIT_ALUA 0x20 /* * ASYMMETRIC ACCESS STATE field * * from spc4r17 section 6.27 Table 245 */ -#define ALUA_ACCESS_STATE_ACTIVE_OPTMIZED 0x0 +#define ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED 0x0 #define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED 0x1 #define ALUA_ACCESS_STATE_STANDBY 0x2 #define ALUA_ACCESS_STATE_UNAVAILABLE 0x3 @@ -23,13 +23,24 @@ #define ALUA_ACCESS_STATE_TRANSITION 0xf /* + * from spc4r36j section 6.37 Table 306 + */ +#define ALUA_T_SUP 0x80 +#define ALUA_O_SUP 0x40 +#define ALUA_LBD_SUP 0x10 +#define ALUA_U_SUP 0x08 +#define ALUA_S_SUP 0x04 +#define ALUA_AN_SUP 0x02 +#define ALUA_AO_SUP 0x01 + +/* * REPORT_TARGET_PORT_GROUP STATUS CODE * * from spc4r17 section 6.27 Table 246 */ #define ALUA_STATUS_NONE 0x00 -#define ALUA_STATUS_ALTERED_BY_EXPLICT_STPG 0x01 -#define ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA 0x02 +#define ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG 0x01 +#define ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA 0x02 /* * From spc4r17, Table D.1: ASC and ASCQ Assignement @@ -46,17 +57,17 @@ #define ALUA_DEFAULT_NONOP_DELAY_MSECS 100 #define ALUA_MAX_NONOP_DELAY_MSECS 10000 /* 10 seconds */ /* - * Used for implict and explict ALUA transitional delay, that is disabled + * Used for implicit and explicit ALUA transitional delay, that is disabled * by default, and is intended to be used for debugging client side ALUA code. */ #define ALUA_DEFAULT_TRANS_DELAY_MSECS 0 #define ALUA_MAX_TRANS_DELAY_MSECS 30000 /* 30 seconds */ /* - * Used for the recommended application client implict transition timeout + * Used for the recommended application client implicit transition timeout * in seconds, returned by the REPORT_TARGET_PORT_GROUPS w/ extended header. */ -#define ALUA_DEFAULT_IMPLICT_TRANS_SECS 0 -#define ALUA_MAX_IMPLICT_TRANS_SECS 255 +#define ALUA_DEFAULT_IMPLICIT_TRANS_SECS 0 +#define ALUA_MAX_IMPLICIT_TRANS_SECS 255 /* * Used by core_alua_update_tpg_primary_metadata() and * core_alua_update_tpg_secondary_metadata() @@ -113,9 +124,9 @@ extern ssize_t core_alua_show_trans_delay_msecs(struct t10_alua_tg_pt_gp *, char *); extern ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *, const char *, size_t); -extern ssize_t core_alua_show_implict_trans_secs(struct t10_alua_tg_pt_gp *, +extern ssize_t core_alua_show_implicit_trans_secs(struct t10_alua_tg_pt_gp *, char *); -extern ssize_t core_alua_store_implict_trans_secs(struct t10_alua_tg_pt_gp *, +extern ssize_t core_alua_store_implicit_trans_secs(struct t10_alua_tg_pt_gp *, const char *, size_t); extern ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *, char *); diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 82e81c542e43..272755d03e5a 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -177,16 +177,16 @@ static struct config_group *target_core_register_fabric( * struct target_fabric_configfs *tf will contain a usage reference. */ pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n", - &TF_CIT_TMPL(tf)->tfc_wwn_cit); + &tf->tf_cit_tmpl.tfc_wwn_cit); tf->tf_group.default_groups = tf->tf_default_groups; tf->tf_group.default_groups[0] = &tf->tf_disc_group; tf->tf_group.default_groups[1] = NULL; config_group_init_type_name(&tf->tf_group, name, - &TF_CIT_TMPL(tf)->tfc_wwn_cit); + &tf->tf_cit_tmpl.tfc_wwn_cit); config_group_init_type_name(&tf->tf_disc_group, "discovery_auth", - &TF_CIT_TMPL(tf)->tfc_discovery_cit); + &tf->tf_cit_tmpl.tfc_discovery_cit); pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:" " %s\n", tf->tf_group.cg_item.ci_name); @@ -2036,7 +2036,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state( int new_state, ret; if (!tg_pt_gp->tg_pt_gp_valid_id) { - pr_err("Unable to do implict ALUA on non valid" + pr_err("Unable to do implicit ALUA on non valid" " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id); return -EINVAL; } @@ -2049,9 +2049,9 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state( } new_state = (int)tmp; - if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) { - pr_err("Unable to process implict configfs ALUA" - " transition while TPGS_IMPLICT_ALUA is disabled\n"); + if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) { + pr_err("Unable to process implicit configfs ALUA" + " transition while TPGS_IMPLICIT_ALUA is disabled\n"); return -EINVAL; } @@ -2097,8 +2097,8 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status( new_status = (int)tmp; if ((new_status != ALUA_STATUS_NONE) && - (new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && - (new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) { + (new_status != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) && + (new_status != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) { pr_err("Illegal ALUA access status: 0x%02x\n", new_status); return -EINVAL; @@ -2131,6 +2131,90 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_type( SE_DEV_ALUA_TG_PT_ATTR(alua_access_type, S_IRUGO | S_IWUSR); /* + * alua_supported_states + */ + +#define SE_DEV_ALUA_SUPPORT_STATE_SHOW(_name, _var, _bit) \ +static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_support_##_name( \ + struct t10_alua_tg_pt_gp *t, char *p) \ +{ \ + return sprintf(p, "%d\n", !!(t->_var & _bit)); \ +} + +#define SE_DEV_ALUA_SUPPORT_STATE_STORE(_name, _var, _bit) \ +static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_support_##_name(\ + struct t10_alua_tg_pt_gp *t, const char *p, size_t c) \ +{ \ + unsigned long tmp; \ + int ret; \ + \ + if (!t->tg_pt_gp_valid_id) { \ + pr_err("Unable to do set ##_name ALUA state on non" \ + " valid tg_pt_gp ID: %hu\n", \ + t->tg_pt_gp_valid_id); \ + return -EINVAL; \ + } \ + \ + ret = kstrtoul(p, 0, &tmp); \ + if (ret < 0) { \ + pr_err("Invalid value '%s', must be '0' or '1'\n", p); \ + return -EINVAL; \ + } \ + if (tmp > 1) { \ + pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \ + return -EINVAL; \ + } \ + if (!tmp) \ + t->_var |= _bit; \ + else \ + t->_var &= ~_bit; \ + \ + return c; \ +} + +SE_DEV_ALUA_SUPPORT_STATE_SHOW(transitioning, + tg_pt_gp_alua_supported_states, ALUA_T_SUP); +SE_DEV_ALUA_SUPPORT_STATE_STORE(transitioning, + tg_pt_gp_alua_supported_states, ALUA_T_SUP); +SE_DEV_ALUA_TG_PT_ATTR(alua_support_transitioning, S_IRUGO | S_IWUSR); + +SE_DEV_ALUA_SUPPORT_STATE_SHOW(offline, + tg_pt_gp_alua_supported_states, ALUA_O_SUP); +SE_DEV_ALUA_SUPPORT_STATE_STORE(offline, + tg_pt_gp_alua_supported_states, ALUA_O_SUP); +SE_DEV_ALUA_TG_PT_ATTR(alua_support_offline, S_IRUGO | S_IWUSR); + +SE_DEV_ALUA_SUPPORT_STATE_SHOW(lba_dependent, + tg_pt_gp_alua_supported_states, ALUA_LBD_SUP); +SE_DEV_ALUA_SUPPORT_STATE_STORE(lba_dependent, + tg_pt_gp_alua_supported_states, ALUA_LBD_SUP); +SE_DEV_ALUA_TG_PT_ATTR(alua_support_lba_dependent, S_IRUGO | S_IWUSR); + +SE_DEV_ALUA_SUPPORT_STATE_SHOW(unavailable, + tg_pt_gp_alua_supported_states, ALUA_U_SUP); +SE_DEV_ALUA_SUPPORT_STATE_STORE(unavailable, + tg_pt_gp_alua_supported_states, ALUA_U_SUP); +SE_DEV_ALUA_TG_PT_ATTR(alua_support_unavailable, S_IRUGO | S_IWUSR); + +SE_DEV_ALUA_SUPPORT_STATE_SHOW(standby, + tg_pt_gp_alua_supported_states, ALUA_S_SUP); +SE_DEV_ALUA_SUPPORT_STATE_STORE(standby, + tg_pt_gp_alua_supported_states, ALUA_S_SUP); +SE_DEV_ALUA_TG_PT_ATTR(alua_support_standby, S_IRUGO | S_IWUSR); + +SE_DEV_ALUA_SUPPORT_STATE_SHOW(active_optimized, + tg_pt_gp_alua_supported_states, ALUA_AO_SUP); +SE_DEV_ALUA_SUPPORT_STATE_STORE(active_optimized, + tg_pt_gp_alua_supported_states, ALUA_AO_SUP); +SE_DEV_ALUA_TG_PT_ATTR(alua_support_active_optimized, S_IRUGO | S_IWUSR); + +SE_DEV_ALUA_SUPPORT_STATE_SHOW(active_nonoptimized, + tg_pt_gp_alua_supported_states, ALUA_AN_SUP); +SE_DEV_ALUA_SUPPORT_STATE_STORE(active_nonoptimized, + tg_pt_gp_alua_supported_states, ALUA_AN_SUP); +SE_DEV_ALUA_TG_PT_ATTR(alua_support_active_nonoptimized, S_IRUGO | S_IWUSR); + +/* * alua_write_metadata */ static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_write_metadata( @@ -2210,24 +2294,24 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs( SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR); /* - * implict_trans_secs + * implicit_trans_secs */ -static ssize_t target_core_alua_tg_pt_gp_show_attr_implict_trans_secs( +static ssize_t target_core_alua_tg_pt_gp_show_attr_implicit_trans_secs( struct t10_alua_tg_pt_gp *tg_pt_gp, char *page) { - return core_alua_show_implict_trans_secs(tg_pt_gp, page); + return core_alua_show_implicit_trans_secs(tg_pt_gp, page); } -static ssize_t target_core_alua_tg_pt_gp_store_attr_implict_trans_secs( +static ssize_t target_core_alua_tg_pt_gp_store_attr_implicit_trans_secs( struct t10_alua_tg_pt_gp *tg_pt_gp, const char *page, size_t count) { - return core_alua_store_implict_trans_secs(tg_pt_gp, page, count); + return core_alua_store_implicit_trans_secs(tg_pt_gp, page, count); } -SE_DEV_ALUA_TG_PT_ATTR(implict_trans_secs, S_IRUGO | S_IWUSR); +SE_DEV_ALUA_TG_PT_ATTR(implicit_trans_secs, S_IRUGO | S_IWUSR); /* * preferred @@ -2350,10 +2434,17 @@ static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = { &target_core_alua_tg_pt_gp_alua_access_state.attr, &target_core_alua_tg_pt_gp_alua_access_status.attr, &target_core_alua_tg_pt_gp_alua_access_type.attr, + &target_core_alua_tg_pt_gp_alua_support_transitioning.attr, + &target_core_alua_tg_pt_gp_alua_support_offline.attr, + &target_core_alua_tg_pt_gp_alua_support_lba_dependent.attr, + &target_core_alua_tg_pt_gp_alua_support_unavailable.attr, + &target_core_alua_tg_pt_gp_alua_support_standby.attr, + &target_core_alua_tg_pt_gp_alua_support_active_nonoptimized.attr, + &target_core_alua_tg_pt_gp_alua_support_active_optimized.attr, &target_core_alua_tg_pt_gp_alua_write_metadata.attr, &target_core_alua_tg_pt_gp_nonop_delay_msecs.attr, &target_core_alua_tg_pt_gp_trans_delay_msecs.attr, - &target_core_alua_tg_pt_gp_implict_trans_secs.attr, + &target_core_alua_tg_pt_gp_implicit_trans_secs.attr, &target_core_alua_tg_pt_gp_preferred.attr, &target_core_alua_tg_pt_gp_tg_pt_gp_id.attr, &target_core_alua_tg_pt_gp_members.attr, diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index d90dbb0f1a69..207b340498a3 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -92,6 +92,9 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) se_cmd->pr_res_key = deve->pr_res_key; se_cmd->orig_fe_lun = unpacked_lun; se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; + + percpu_ref_get(&se_lun->lun_ref); + se_cmd->lun_ref_active = true; } spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); @@ -119,24 +122,20 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; se_cmd->orig_fe_lun = 0; se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; + + percpu_ref_get(&se_lun->lun_ref); + se_cmd->lun_ref_active = true; } /* Directly associate cmd with se_dev */ se_cmd->se_dev = se_lun->lun_se_dev; - /* TODO: get rid of this and use atomics for stats */ dev = se_lun->lun_se_dev; - spin_lock_irqsave(&dev->stats_lock, flags); - dev->num_cmds++; + atomic_long_inc(&dev->num_cmds); if (se_cmd->data_direction == DMA_TO_DEVICE) - dev->write_bytes += se_cmd->data_length; + atomic_long_add(se_cmd->data_length, &dev->write_bytes); else if (se_cmd->data_direction == DMA_FROM_DEVICE) - dev->read_bytes += se_cmd->data_length; - spin_unlock_irqrestore(&dev->stats_lock, flags); - - spin_lock_irqsave(&se_lun->lun_cmd_lock, flags); - list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list); - spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags); + atomic_long_add(se_cmd->data_length, &dev->read_bytes); return 0; } @@ -314,14 +313,14 @@ int core_enable_device_list_for_node( deve = nacl->device_list[mapped_lun]; /* - * Check if the call is handling demo mode -> explict LUN ACL + * Check if the call is handling demo mode -> explicit LUN ACL * transition. This transition must be for the same struct se_lun * + mapped_lun that was setup in demo mode.. */ if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { if (deve->se_lun_acl != NULL) { pr_err("struct se_dev_entry->se_lun_acl" - " already set for demo mode -> explict" + " already set for demo mode -> explicit" " LUN ACL transition\n"); spin_unlock_irq(&nacl->device_list_lock); return -EINVAL; @@ -329,7 +328,7 @@ int core_enable_device_list_for_node( if (deve->se_lun != lun) { pr_err("struct se_dev_entry->se_lun does" " match passed struct se_lun for demo mode" - " -> explict LUN ACL transition\n"); + " -> explicit LUN ACL transition\n"); spin_unlock_irq(&nacl->device_list_lock); return -EINVAL; } @@ -1407,6 +1406,7 @@ static void scsi_dump_inquiry(struct se_device *dev) struct se_device *target_alloc_device(struct se_hba *hba, const char *name) { struct se_device *dev; + struct se_lun *xcopy_lun; dev = hba->transport->alloc_device(hba, name); if (!dev) @@ -1423,7 +1423,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) INIT_LIST_HEAD(&dev->state_list); INIT_LIST_HEAD(&dev->qf_cmd_list); INIT_LIST_HEAD(&dev->g_dev_node); - spin_lock_init(&dev->stats_lock); spin_lock_init(&dev->execute_task_lock); spin_lock_init(&dev->delayed_cmd_lock); spin_lock_init(&dev->dev_reservation_lock); @@ -1469,6 +1468,14 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS; dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS; + xcopy_lun = &dev->xcopy_lun; + xcopy_lun->lun_se_dev = dev; + init_completion(&xcopy_lun->lun_shutdown_comp); + INIT_LIST_HEAD(&xcopy_lun->lun_acl_list); + spin_lock_init(&xcopy_lun->lun_acl_lock); + spin_lock_init(&xcopy_lun->lun_sep_lock); + init_completion(&xcopy_lun->lun_ref_comp); + return dev; } diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 3503996d7d10..dae2ad6a669e 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -385,9 +385,9 @@ static struct config_group *target_fabric_make_mappedlun( } config_group_init_type_name(&lacl->se_lun_group, name, - &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_cit); + &tf->tf_cit_tmpl.tfc_tpg_mappedlun_cit); config_group_init_type_name(&lacl->ml_stat_grps.stat_group, - "statistics", &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_stat_cit); + "statistics", &tf->tf_cit_tmpl.tfc_tpg_mappedlun_stat_cit); lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group; lacl_cg->default_groups[1] = NULL; @@ -504,16 +504,16 @@ static struct config_group *target_fabric_make_nodeacl( nacl_cg->default_groups[4] = NULL; config_group_init_type_name(&se_nacl->acl_group, name, - &TF_CIT_TMPL(tf)->tfc_tpg_nacl_base_cit); + &tf->tf_cit_tmpl.tfc_tpg_nacl_base_cit); config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib", - &TF_CIT_TMPL(tf)->tfc_tpg_nacl_attrib_cit); + &tf->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit); config_group_init_type_name(&se_nacl->acl_auth_group, "auth", - &TF_CIT_TMPL(tf)->tfc_tpg_nacl_auth_cit); + &tf->tf_cit_tmpl.tfc_tpg_nacl_auth_cit); config_group_init_type_name(&se_nacl->acl_param_group, "param", - &TF_CIT_TMPL(tf)->tfc_tpg_nacl_param_cit); + &tf->tf_cit_tmpl.tfc_tpg_nacl_param_cit); config_group_init_type_name(&se_nacl->acl_fabric_stat_group, "fabric_statistics", - &TF_CIT_TMPL(tf)->tfc_tpg_nacl_stat_cit); + &tf->tf_cit_tmpl.tfc_tpg_nacl_stat_cit); return &se_nacl->acl_group; } @@ -595,7 +595,7 @@ static struct config_group *target_fabric_make_np( se_tpg_np->tpg_np_parent = se_tpg; config_group_init_type_name(&se_tpg_np->tpg_np_group, name, - &TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit); + &tf->tf_cit_tmpl.tfc_tpg_np_base_cit); return &se_tpg_np->tpg_np_group; } @@ -899,9 +899,9 @@ static struct config_group *target_fabric_make_lun( } config_group_init_type_name(&lun->lun_group, name, - &TF_CIT_TMPL(tf)->tfc_tpg_port_cit); + &tf->tf_cit_tmpl.tfc_tpg_port_cit); config_group_init_type_name(&lun->port_stat_grps.stat_group, - "statistics", &TF_CIT_TMPL(tf)->tfc_tpg_port_stat_cit); + "statistics", &tf->tf_cit_tmpl.tfc_tpg_port_stat_cit); lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group; lun_cg->default_groups[1] = NULL; @@ -1056,19 +1056,19 @@ static struct config_group *target_fabric_make_tpg( se_tpg->tpg_group.default_groups[6] = NULL; config_group_init_type_name(&se_tpg->tpg_group, name, - &TF_CIT_TMPL(tf)->tfc_tpg_base_cit); + &tf->tf_cit_tmpl.tfc_tpg_base_cit); config_group_init_type_name(&se_tpg->tpg_lun_group, "lun", - &TF_CIT_TMPL(tf)->tfc_tpg_lun_cit); + &tf->tf_cit_tmpl.tfc_tpg_lun_cit); config_group_init_type_name(&se_tpg->tpg_np_group, "np", - &TF_CIT_TMPL(tf)->tfc_tpg_np_cit); + &tf->tf_cit_tmpl.tfc_tpg_np_cit); config_group_init_type_name(&se_tpg->tpg_acl_group, "acls", - &TF_CIT_TMPL(tf)->tfc_tpg_nacl_cit); + &tf->tf_cit_tmpl.tfc_tpg_nacl_cit); config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib", - &TF_CIT_TMPL(tf)->tfc_tpg_attrib_cit); + &tf->tf_cit_tmpl.tfc_tpg_attrib_cit); config_group_init_type_name(&se_tpg->tpg_auth_group, "auth", - &TF_CIT_TMPL(tf)->tfc_tpg_auth_cit); + &tf->tf_cit_tmpl.tfc_tpg_auth_cit); config_group_init_type_name(&se_tpg->tpg_param_group, "param", - &TF_CIT_TMPL(tf)->tfc_tpg_param_cit); + &tf->tf_cit_tmpl.tfc_tpg_param_cit); return &se_tpg->tpg_group; } @@ -1155,9 +1155,9 @@ static struct config_group *target_fabric_make_wwn( wwn->wwn_group.default_groups[1] = NULL; config_group_init_type_name(&wwn->wwn_group, name, - &TF_CIT_TMPL(tf)->tfc_tpg_cit); + &tf->tf_cit_tmpl.tfc_tpg_cit); config_group_init_type_name(&wwn->fabric_stat_group, "fabric_statistics", - &TF_CIT_TMPL(tf)->tfc_wwn_fabric_stats_cit); + &tf->tf_cit_tmpl.tfc_wwn_fabric_stats_cit); return &wwn->wwn_group; } diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index b662f89dedac..0e34cda3271e 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -562,7 +562,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, } else { ret = fd_do_rw(cmd, sgl, sgl_nents, 1); /* - * Perform implict vfs_fsync_range() for fd_do_writev() ops + * Perform implicit vfs_fsync_range() for fd_do_writev() ops * for SCSI WRITEs with Forced Unit Access (FUA) set. * Allow this to happen independent of WCE=0 setting. */ diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index b9a3394fe479..c87959f12760 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -710,6 +710,45 @@ static sector_t iblock_get_blocks(struct se_device *dev) return iblock_emulate_read_cap_with_block_size(dev, bd, q); } +static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev) +{ + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct block_device *bd = ib_dev->ibd_bd; + int ret; + + ret = bdev_alignment_offset(bd); + if (ret == -1) + return 0; + + /* convert offset-bytes to offset-lbas */ + return ret / bdev_logical_block_size(bd); +} + +static unsigned int iblock_get_lbppbe(struct se_device *dev) +{ + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct block_device *bd = ib_dev->ibd_bd; + int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd); + + return ilog2(logs_per_phys); +} + +static unsigned int iblock_get_io_min(struct se_device *dev) +{ + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct block_device *bd = ib_dev->ibd_bd; + + return bdev_io_min(bd); +} + +static unsigned int iblock_get_io_opt(struct se_device *dev) +{ + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct block_device *bd = ib_dev->ibd_bd; + + return bdev_io_opt(bd); +} + static struct sbc_ops iblock_sbc_ops = { .execute_rw = iblock_execute_rw, .execute_sync_cache = iblock_execute_sync_cache, @@ -749,6 +788,10 @@ static struct se_subsystem_api iblock_template = { .show_configfs_dev_params = iblock_show_configfs_dev_params, .get_device_type = sbc_get_device_type, .get_blocks = iblock_get_blocks, + .get_alignment_offset_lbas = iblock_get_alignment_offset_lbas, + .get_lbppbe = iblock_get_lbppbe, + .get_io_min = iblock_get_io_min, + .get_io_opt = iblock_get_io_opt, .get_write_cache = iblock_get_write_cache, }; diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 579128abe3f5..47b63b094cdc 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -75,8 +75,6 @@ extern struct se_device *g_lun0_dev; struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg, const char *); -struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg, - unsigned char *); void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *); void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *); struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32); @@ -102,7 +100,7 @@ int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int); int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int); int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int); bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags); -int transport_clear_lun_from_sessions(struct se_lun *); +int transport_clear_lun_ref(struct se_lun *); void transport_send_task_abort(struct se_cmd *); sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size); void target_qf_do_work(struct work_struct *work); diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index d1ae4c5c3ffd..2f5d77932c80 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -474,7 +474,7 @@ static int core_scsi3_pr_seq_non_holder( * statement. */ if (!ret && !other_cdb) { - pr_debug("Allowing explict CDB: 0x%02x for %s" + pr_debug("Allowing explicit CDB: 0x%02x for %s" " reservation holder\n", cdb[0], core_scsi3_pr_dump_type(pr_reg_type)); @@ -507,7 +507,7 @@ static int core_scsi3_pr_seq_non_holder( */ if (!registered_nexus) { - pr_debug("Allowing implict CDB: 0x%02x" + pr_debug("Allowing implicit CDB: 0x%02x" " for %s reservation on unregistered" " nexus\n", cdb[0], core_scsi3_pr_dump_type(pr_reg_type)); @@ -522,7 +522,7 @@ static int core_scsi3_pr_seq_non_holder( * allow commands from registered nexuses. */ - pr_debug("Allowing implict CDB: 0x%02x for %s" + pr_debug("Allowing implicit CDB: 0x%02x for %s" " reservation\n", cdb[0], core_scsi3_pr_dump_type(pr_reg_type)); @@ -683,7 +683,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( alua_port_list) { /* * This pointer will be NULL for demo mode MappedLUNs - * that have not been make explict via a ConfigFS + * that have not been make explicit via a ConfigFS * MappedLUN group for the SCSI Initiator Node ACL. */ if (!deve_tmp->se_lun_acl) @@ -1158,7 +1158,7 @@ static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg) smp_mb__after_atomic_dec(); } -static int core_scsi3_check_implict_release( +static int core_scsi3_check_implicit_release( struct se_device *dev, struct t10_pr_registration *pr_reg) { @@ -1174,7 +1174,7 @@ static int core_scsi3_check_implict_release( } if (pr_res_holder == pr_reg) { /* - * Perform an implict RELEASE if the registration that + * Perform an implicit RELEASE if the registration that * is being released is holding the reservation. * * From spc4r17, section 5.7.11.1: @@ -1192,7 +1192,7 @@ static int core_scsi3_check_implict_release( * For 'All Registrants' reservation types, all existing * registrations are still processed as reservation holders * in core_scsi3_pr_seq_non_holder() after the initial - * reservation holder is implictly released here. + * reservation holder is implicitly released here. */ } else if (pr_reg->pr_reg_all_tg_pt && (!strcmp(pr_res_holder->pr_reg_nacl->initiatorname, @@ -2125,7 +2125,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, /* * sa_res_key=0 Unregister Reservation Key for registered I_T Nexus. */ - pr_holder = core_scsi3_check_implict_release( + pr_holder = core_scsi3_check_implicit_release( cmd->se_dev, pr_reg); if (pr_holder < 0) { ret = TCM_RESERVATION_CONFLICT; @@ -2402,7 +2402,7 @@ static void __core_scsi3_complete_pro_release( struct se_device *dev, struct se_node_acl *se_nacl, struct t10_pr_registration *pr_reg, - int explict) + int explicit) { struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo; char i_buf[PR_REG_ISID_ID_LEN]; @@ -2416,7 +2416,7 @@ static void __core_scsi3_complete_pro_release( pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared" " reservation holder TYPE: %s ALL_TG_PT: %d\n", - tfo->get_fabric_name(), (explict) ? "explict" : "implict", + tfo->get_fabric_name(), (explicit) ? "explicit" : "implicit", core_scsi3_pr_dump_type(pr_reg->pr_res_type), (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n", @@ -2692,7 +2692,7 @@ static void __core_scsi3_complete_pro_preempt( memset(i_buf, 0, PR_REG_ISID_ID_LEN); core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); /* - * Do an implict RELEASE of the existing reservation. + * Do an implicit RELEASE of the existing reservation. */ if (dev->dev_pr_res_holder) __core_scsi3_complete_pro_release(dev, nacl, @@ -2845,7 +2845,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, * 5.7.11.4 Preempting, Table 52 and Figure 7. * * For a ZERO SA Reservation key, release - * all other registrations and do an implict + * all other registrations and do an implicit * release of active persistent reservation. * * For a non-ZERO SA Reservation key, only diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 131327ac7f5b..4ffe5f2ec0e9 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -27,7 +27,6 @@ #include <linux/string.h> #include <linux/parser.h> #include <linux/timer.h> -#include <linux/blkdev.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <scsi/scsi.h> diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index d9b92b2c524d..52ae54e60105 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -105,12 +105,22 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd) buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff; buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff; buf[11] = dev->dev_attrib.block_size & 0xff; + + if (dev->transport->get_lbppbe) + buf[13] = dev->transport->get_lbppbe(dev) & 0x0f; + + if (dev->transport->get_alignment_offset_lbas) { + u16 lalba = dev->transport->get_alignment_offset_lbas(dev); + buf[14] = (lalba >> 8) & 0x3f; + buf[15] = lalba & 0xff; + } + /* * Set Thin Provisioning Enable bit following sbc3r22 in section * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. */ if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) - buf[14] = 0x80; + buf[14] |= 0x80; rbuf = transport_kmap_data_sg(cmd); if (rbuf) { diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 074539558a54..021c3f4a4f00 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -48,7 +48,7 @@ static void spc_fill_alua_data(struct se_port *port, unsigned char *buf) buf[5] = 0x80; /* - * Set TPGS field for explict and/or implict ALUA access type + * Set TPGS field for explicit and/or implicit ALUA access type * and opteration. * * See spc4r17 section 6.4.2 Table 135 @@ -452,6 +452,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) struct se_device *dev = cmd->se_dev; u32 max_sectors; int have_tp = 0; + int opt, min; /* * Following spc3r22 section 6.5.3 Block Limits VPD page, when @@ -475,7 +476,10 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) /* * Set OPTIMAL TRANSFER LENGTH GRANULARITY */ - put_unaligned_be16(1, &buf[6]); + if (dev->transport->get_io_min && (min = dev->transport->get_io_min(dev))) + put_unaligned_be16(min / dev->dev_attrib.block_size, &buf[6]); + else + put_unaligned_be16(1, &buf[6]); /* * Set MAXIMUM TRANSFER LENGTH @@ -487,7 +491,10 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) /* * Set OPTIMAL TRANSFER LENGTH */ - put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]); + if (dev->transport->get_io_opt && (opt = dev->transport->get_io_opt(dev))) + put_unaligned_be32(opt / dev->dev_attrib.block_size, &buf[12]); + else + put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]); /* * Exit now if we don't support TP. @@ -1250,7 +1257,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size) *size = (cdb[3] << 8) + cdb[4]; /* - * Do implict HEAD_OF_QUEUE processing for INQUIRY. + * Do implicit HEAD_OF_QUEUE processing for INQUIRY. * See spc4r17 section 5.3 */ cmd->sam_task_attr = MSG_HEAD_TAG; @@ -1284,7 +1291,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size) cmd->execute_cmd = spc_emulate_report_luns; *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; /* - * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS + * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS * See spc4r17 section 5.3 */ cmd->sam_task_attr = MSG_HEAD_TAG; diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c index 9c642e02cba1..03538994d2f7 100644 --- a/drivers/target/target_core_stat.c +++ b/drivers/target/target_core_stat.c @@ -32,7 +32,6 @@ #include <linux/utsname.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> -#include <linux/blkdev.h> #include <linux/configfs.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> @@ -214,7 +213,8 @@ static ssize_t target_stat_scsi_tgt_dev_show_attr_resets( struct se_device *dev = container_of(sgrps, struct se_device, dev_stat_grps); - return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets); + return snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&dev->num_resets)); } DEV_STAT_SCSI_TGT_DEV_ATTR_RO(resets); @@ -397,8 +397,8 @@ static ssize_t target_stat_scsi_lu_show_attr_num_cmds( container_of(sgrps, struct se_device, dev_stat_grps); /* scsiLuNumCommands */ - return snprintf(page, PAGE_SIZE, "%llu\n", - (unsigned long long)dev->num_cmds); + return snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&dev->num_cmds)); } DEV_STAT_SCSI_LU_ATTR_RO(num_cmds); @@ -409,7 +409,8 @@ static ssize_t target_stat_scsi_lu_show_attr_read_mbytes( container_of(sgrps, struct se_device, dev_stat_grps); /* scsiLuReadMegaBytes */ - return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->read_bytes >> 20)); + return snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&dev->read_bytes) >> 20); } DEV_STAT_SCSI_LU_ATTR_RO(read_mbytes); @@ -420,7 +421,8 @@ static ssize_t target_stat_scsi_lu_show_attr_write_mbytes( container_of(sgrps, struct se_device, dev_stat_grps); /* scsiLuWrittenMegaBytes */ - return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->write_bytes >> 20)); + return snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&dev->write_bytes) >> 20); } DEV_STAT_SCSI_LU_ATTR_RO(write_mbytes); @@ -431,7 +433,7 @@ static ssize_t target_stat_scsi_lu_show_attr_resets( container_of(sgrps, struct se_device, dev_stat_grps); /* scsiLuInResets */ - return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets); + return snprintf(page, PAGE_SIZE, "%lu\n", atomic_long_read(&dev->num_resets)); } DEV_STAT_SCSI_LU_ATTR_RO(resets); diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 250009909d49..70c638f730af 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -386,9 +386,7 @@ int core_tmr_lun_reset( pr_debug("LUN_RESET: SCSI-2 Released reservation\n"); } - spin_lock_irq(&dev->stats_lock); - dev->num_resets++; - spin_unlock_irq(&dev->stats_lock); + atomic_long_inc(&dev->num_resets); pr_debug("LUN_RESET: %s for [%s] Complete\n", (preempt_and_abort_list) ? "Preempt" : "TMR", diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index b9a6ec0aa5fe..f697f8baec54 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -116,6 +116,7 @@ struct se_node_acl *core_tpg_get_initiator_node_acl( return acl; } +EXPORT_SYMBOL(core_tpg_get_initiator_node_acl); /* core_tpg_add_node_to_devs(): * @@ -633,6 +634,13 @@ int core_tpg_set_initiator_node_tag( } EXPORT_SYMBOL(core_tpg_set_initiator_node_tag); +static void core_tpg_lun_ref_release(struct percpu_ref *ref) +{ + struct se_lun *lun = container_of(ref, struct se_lun, lun_ref); + + complete(&lun->lun_ref_comp); +} + static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) { /* Set in core_dev_setup_virtual_lun0() */ @@ -646,15 +654,20 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) atomic_set(&lun->lun_acl_count, 0); init_completion(&lun->lun_shutdown_comp); INIT_LIST_HEAD(&lun->lun_acl_list); - INIT_LIST_HEAD(&lun->lun_cmd_list); spin_lock_init(&lun->lun_acl_lock); - spin_lock_init(&lun->lun_cmd_lock); spin_lock_init(&lun->lun_sep_lock); + init_completion(&lun->lun_ref_comp); - ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); + ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release); if (ret < 0) return ret; + ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); + if (ret < 0) { + percpu_ref_cancel_init(&lun->lun_ref); + return ret; + } + return 0; } @@ -691,10 +704,9 @@ int core_tpg_register( atomic_set(&lun->lun_acl_count, 0); init_completion(&lun->lun_shutdown_comp); INIT_LIST_HEAD(&lun->lun_acl_list); - INIT_LIST_HEAD(&lun->lun_cmd_list); spin_lock_init(&lun->lun_acl_lock); - spin_lock_init(&lun->lun_cmd_lock); spin_lock_init(&lun->lun_sep_lock); + init_completion(&lun->lun_ref_comp); } se_tpg->se_tpg_type = se_tpg_type; @@ -815,10 +827,16 @@ int core_tpg_post_addlun( { int ret; - ret = core_dev_export(lun_ptr, tpg, lun); + ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release); if (ret < 0) return ret; + ret = core_dev_export(lun_ptr, tpg, lun); + if (ret < 0) { + percpu_ref_cancel_init(&lun->lun_ref); + return ret; + } + spin_lock(&tpg->tpg_lun_lock); lun->lun_access = lun_access; lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE; @@ -827,14 +845,6 @@ int core_tpg_post_addlun( return 0; } -static void core_tpg_shutdown_lun( - struct se_portal_group *tpg, - struct se_lun *lun) -{ - core_clear_lun_from_tpg(lun, tpg); - transport_clear_lun_from_sessions(lun); -} - struct se_lun *core_tpg_pre_dellun( struct se_portal_group *tpg, u32 unpacked_lun) @@ -869,7 +879,8 @@ int core_tpg_post_dellun( struct se_portal_group *tpg, struct se_lun *lun) { - core_tpg_shutdown_lun(tpg, lun); + core_clear_lun_from_tpg(lun, tpg); + transport_clear_lun_ref(lun); core_dev_unexport(lun->lun_se_dev, tpg, lun); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 81e945eefbbd..91953da0f623 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -28,7 +28,6 @@ #include <linux/string.h> #include <linux/timer.h> #include <linux/slab.h> -#include <linux/blkdev.h> #include <linux/spinlock.h> #include <linux/kthread.h> #include <linux/in.h> @@ -473,7 +472,7 @@ void transport_deregister_session(struct se_session *se_sess) pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", se_tpg->se_tpg_tfo->get_fabric_name()); /* - * If last kref is dropping now for an explict NodeACL, awake sleeping + * If last kref is dropping now for an explicit NodeACL, awake sleeping * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group * removal context. */ @@ -515,23 +514,6 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, if (write_pending) cmd->t_state = TRANSPORT_WRITE_PENDING; - /* - * Determine if IOCTL context caller in requesting the stopping of this - * command for LUN shutdown purposes. - */ - if (cmd->transport_state & CMD_T_LUN_STOP) { - pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n", - __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); - - cmd->transport_state &= ~CMD_T_ACTIVE; - if (remove_from_lists) - target_remove_from_state_list(cmd); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - - complete(&cmd->transport_lun_stop_comp); - return 1; - } - if (remove_from_lists) { target_remove_from_state_list(cmd); @@ -585,15 +567,11 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) static void transport_lun_remove_cmd(struct se_cmd *cmd) { struct se_lun *lun = cmd->se_lun; - unsigned long flags; - if (!lun) + if (!lun || !cmd->lun_ref_active) return; - spin_lock_irqsave(&lun->lun_cmd_lock, flags); - if (!list_empty(&cmd->se_lun_node)) - list_del_init(&cmd->se_lun_node); - spin_unlock_irqrestore(&lun->lun_cmd_lock, flags); + percpu_ref_put(&lun->lun_ref); } void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) @@ -668,7 +646,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) cmd->transport_state |= CMD_T_FAILED; /* - * Check for case where an explict ABORT_TASK has been received + * Check for case where an explicit ABORT_TASK has been received * and transport_wait_for_tasks() will be waiting for completion.. */ if (cmd->transport_state & CMD_T_ABORTED && @@ -1092,13 +1070,10 @@ void transport_init_se_cmd( int task_attr, unsigned char *sense_buffer) { - INIT_LIST_HEAD(&cmd->se_lun_node); INIT_LIST_HEAD(&cmd->se_delayed_node); INIT_LIST_HEAD(&cmd->se_qf_node); INIT_LIST_HEAD(&cmd->se_cmd_list); INIT_LIST_HEAD(&cmd->state_list); - init_completion(&cmd->transport_lun_fe_stop_comp); - init_completion(&cmd->transport_lun_stop_comp); init_completion(&cmd->t_transport_stop_comp); init_completion(&cmd->cmd_wait_comp); init_completion(&cmd->task_stop_comp); @@ -1719,29 +1694,14 @@ void target_execute_cmd(struct se_cmd *cmd) /* * If the received CDB has aleady been aborted stop processing it here. */ - if (transport_check_aborted_status(cmd, 1)) { - complete(&cmd->transport_lun_stop_comp); + if (transport_check_aborted_status(cmd, 1)) return; - } /* - * Determine if IOCTL context caller in requesting the stopping of this - * command for LUN shutdown purposes. - */ - spin_lock_irq(&cmd->t_state_lock); - if (cmd->transport_state & CMD_T_LUN_STOP) { - pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n", - __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); - - cmd->transport_state &= ~CMD_T_ACTIVE; - spin_unlock_irq(&cmd->t_state_lock); - complete(&cmd->transport_lun_stop_comp); - return; - } - /* * Determine if frontend context caller is requesting the stopping of * this command for frontend exceptions. */ + spin_lock_irq(&cmd->t_state_lock); if (cmd->transport_state & CMD_T_STOP) { pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n", __func__, __LINE__, @@ -2404,164 +2364,23 @@ void target_wait_for_sess_cmds(struct se_session *se_sess) } EXPORT_SYMBOL(target_wait_for_sess_cmds); -/* transport_lun_wait_for_tasks(): - * - * Called from ConfigFS context to stop the passed struct se_cmd to allow - * an struct se_lun to be successfully shutdown. - */ -static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) -{ - unsigned long flags; - int ret = 0; - - /* - * If the frontend has already requested this struct se_cmd to - * be stopped, we can safely ignore this struct se_cmd. - */ - spin_lock_irqsave(&cmd->t_state_lock, flags); - if (cmd->transport_state & CMD_T_STOP) { - cmd->transport_state &= ~CMD_T_LUN_STOP; - - pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n", - cmd->se_tfo->get_task_tag(cmd)); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - transport_cmd_check_stop(cmd, false, false); - return -EPERM; - } - cmd->transport_state |= CMD_T_LUN_FE_STOP; - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - - // XXX: audit task_flags checks. - spin_lock_irqsave(&cmd->t_state_lock, flags); - if ((cmd->transport_state & CMD_T_BUSY) && - (cmd->transport_state & CMD_T_SENT)) { - if (!target_stop_cmd(cmd, &flags)) - ret++; - } - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - - pr_debug("ConfigFS: cmd: %p stop tasks ret:" - " %d\n", cmd, ret); - if (!ret) { - pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n", - cmd->se_tfo->get_task_tag(cmd)); - wait_for_completion(&cmd->transport_lun_stop_comp); - pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n", - cmd->se_tfo->get_task_tag(cmd)); - } - - return 0; -} - -static void __transport_clear_lun_from_sessions(struct se_lun *lun) -{ - struct se_cmd *cmd = NULL; - unsigned long lun_flags, cmd_flags; - /* - * Do exception processing and return CHECK_CONDITION status to the - * Initiator Port. - */ - spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); - while (!list_empty(&lun->lun_cmd_list)) { - cmd = list_first_entry(&lun->lun_cmd_list, - struct se_cmd, se_lun_node); - list_del_init(&cmd->se_lun_node); - - spin_lock(&cmd->t_state_lock); - pr_debug("SE_LUN[%d] - Setting cmd->transport" - "_lun_stop for ITT: 0x%08x\n", - cmd->se_lun->unpacked_lun, - cmd->se_tfo->get_task_tag(cmd)); - cmd->transport_state |= CMD_T_LUN_STOP; - spin_unlock(&cmd->t_state_lock); - - spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); - - if (!cmd->se_lun) { - pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n", - cmd->se_tfo->get_task_tag(cmd), - cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); - BUG(); - } - /* - * If the Storage engine still owns the iscsi_cmd_t, determine - * and/or stop its context. - */ - pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport" - "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun, - cmd->se_tfo->get_task_tag(cmd)); - - if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) { - spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); - continue; - } - - pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun" - "_wait_for_tasks(): SUCCESS\n", - cmd->se_lun->unpacked_lun, - cmd->se_tfo->get_task_tag(cmd)); - - spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); - if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) { - spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); - goto check_cond; - } - cmd->transport_state &= ~CMD_T_DEV_ACTIVE; - target_remove_from_state_list(cmd); - spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); - - /* - * The Storage engine stopped this struct se_cmd before it was - * send to the fabric frontend for delivery back to the - * Initiator Node. Return this SCSI CDB back with an - * CHECK_CONDITION status. - */ -check_cond: - transport_send_check_condition_and_sense(cmd, - TCM_NON_EXISTENT_LUN, 0); - /* - * If the fabric frontend is waiting for this iscsi_cmd_t to - * be released, notify the waiting thread now that LU has - * finished accessing it. - */ - spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); - if (cmd->transport_state & CMD_T_LUN_FE_STOP) { - pr_debug("SE_LUN[%d] - Detected FE stop for" - " struct se_cmd: %p ITT: 0x%08x\n", - lun->unpacked_lun, - cmd, cmd->se_tfo->get_task_tag(cmd)); - - spin_unlock_irqrestore(&cmd->t_state_lock, - cmd_flags); - transport_cmd_check_stop(cmd, false, false); - complete(&cmd->transport_lun_fe_stop_comp); - spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); - continue; - } - pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n", - lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); - - spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); - spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); - } - spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); -} - -static int transport_clear_lun_thread(void *p) +static int transport_clear_lun_ref_thread(void *p) { struct se_lun *lun = p; - __transport_clear_lun_from_sessions(lun); + percpu_ref_kill(&lun->lun_ref); + + wait_for_completion(&lun->lun_ref_comp); complete(&lun->lun_shutdown_comp); return 0; } -int transport_clear_lun_from_sessions(struct se_lun *lun) +int transport_clear_lun_ref(struct se_lun *lun) { struct task_struct *kt; - kt = kthread_run(transport_clear_lun_thread, lun, + kt = kthread_run(transport_clear_lun_ref_thread, lun, "tcm_cl_%u", lun->unpacked_lun); if (IS_ERR(kt)) { pr_err("Unable to start clear_lun thread\n"); @@ -2595,43 +2414,6 @@ bool transport_wait_for_tasks(struct se_cmd *cmd) spin_unlock_irqrestore(&cmd->t_state_lock, flags); return false; } - /* - * If we are already stopped due to an external event (ie: LUN shutdown) - * sleep until the connection can have the passed struct se_cmd back. - * The cmd->transport_lun_stopped_sem will be upped by - * transport_clear_lun_from_sessions() once the ConfigFS context caller - * has completed its operation on the struct se_cmd. - */ - if (cmd->transport_state & CMD_T_LUN_STOP) { - pr_debug("wait_for_tasks: Stopping" - " wait_for_completion(&cmd->t_tasktransport_lun_fe" - "_stop_comp); for ITT: 0x%08x\n", - cmd->se_tfo->get_task_tag(cmd)); - /* - * There is a special case for WRITES where a FE exception + - * LUN shutdown means ConfigFS context is still sleeping on - * transport_lun_stop_comp in transport_lun_wait_for_tasks(). - * We go ahead and up transport_lun_stop_comp just to be sure - * here. - */ - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - complete(&cmd->transport_lun_stop_comp); - wait_for_completion(&cmd->transport_lun_fe_stop_comp); - spin_lock_irqsave(&cmd->t_state_lock, flags); - - target_remove_from_state_list(cmd); - /* - * At this point, the frontend who was the originator of this - * struct se_cmd, now owns the structure and can be released through - * normal means below. - */ - pr_debug("wait_for_tasks: Stopped" - " wait_for_completion(&cmd->t_tasktransport_lun_fe_" - "stop_comp); for ITT: 0x%08x\n", - cmd->se_tfo->get_task_tag(cmd)); - - cmd->transport_state &= ~CMD_T_LUN_STOP; - } if (!(cmd->transport_state & CMD_T_ACTIVE)) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); @@ -2910,6 +2692,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status) cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd)); cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; + cmd->scsi_status = SAM_STAT_TASK_ABORTED; trace_target_cmd_complete(cmd); cmd->se_tfo->queue_status(cmd); @@ -2938,6 +2721,7 @@ void transport_send_task_abort(struct se_cmd *cmd) if (cmd->se_tfo->write_pending_status(cmd) != 0) { cmd->transport_state |= CMD_T_ABORTED; smp_mb__after_atomic_inc(); + return; } } cmd->scsi_status = SAM_STAT_TASK_ABORTED; diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h index 0204952fe4d3..be912b36daae 100644 --- a/drivers/target/target_core_ua.h +++ b/drivers/target/target_core_ua.h @@ -19,7 +19,7 @@ #define ASCQ_2AH_RESERVATIONS_RELEASED 0x04 #define ASCQ_2AH_REGISTRATIONS_PREEMPTED 0x05 #define ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED 0x06 -#define ASCQ_2AH_IMPLICT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07 +#define ASCQ_2AH_IMPLICIT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07 #define ASCQ_2AH_PRIORITY_CHANGED 0x08 #define ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS 0x09 diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index 474cd44fac14..6b88a9958f61 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c @@ -405,9 +405,6 @@ static void xcopy_pt_release_cmd(struct se_cmd *se_cmd) struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd, struct xcopy_pt_cmd, se_cmd); - if (xpt_cmd->remote_port) - kfree(se_cmd->se_lun); - kfree(xpt_cmd); } @@ -572,22 +569,10 @@ static int target_xcopy_init_pt_lun( return 0; } - pt_cmd->se_lun = kzalloc(sizeof(struct se_lun), GFP_KERNEL); - if (!pt_cmd->se_lun) { - pr_err("Unable to allocate pt_cmd->se_lun\n"); - return -ENOMEM; - } - init_completion(&pt_cmd->se_lun->lun_shutdown_comp); - INIT_LIST_HEAD(&pt_cmd->se_lun->lun_cmd_list); - INIT_LIST_HEAD(&pt_cmd->se_lun->lun_acl_list); - spin_lock_init(&pt_cmd->se_lun->lun_acl_lock); - spin_lock_init(&pt_cmd->se_lun->lun_cmd_lock); - spin_lock_init(&pt_cmd->se_lun->lun_sep_lock); - + pt_cmd->se_lun = &se_dev->xcopy_lun; pt_cmd->se_dev = se_dev; pr_debug("Setup emulated se_dev: %p from se_dev\n", pt_cmd->se_dev); - pt_cmd->se_lun->lun_se_dev = se_dev; pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH; pr_debug("Setup emulated se_dev: %p to pt_cmd->se_lun->lun_se_dev\n", @@ -658,8 +643,6 @@ static int target_xcopy_setup_pt_cmd( return 0; out: - if (remote_port == true) - kfree(cmd->se_lun); return ret; } diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h index 0dd54a44abcf..752863acecb8 100644 --- a/drivers/target/tcm_fc/tcm_fc.h +++ b/drivers/target/tcm_fc/tcm_fc.h @@ -22,6 +22,7 @@ #define FT_NAMELEN 32 /* length of ASCII WWPNs including pad */ #define FT_TPG_NAMELEN 32 /* max length of TPG name */ #define FT_LUN_NAMELEN 32 /* max length of LUN name */ +#define TCM_FC_DEFAULT_TAGS 512 /* tags used for per-session preallocation */ struct ft_transport_id { __u8 format; diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 0e5a1caed176..479ec5621a4e 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -28,6 +28,7 @@ #include <linux/configfs.h> #include <linux/ctype.h> #include <linux/hash.h> +#include <linux/percpu_ida.h> #include <asm/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> @@ -89,16 +90,18 @@ static void ft_free_cmd(struct ft_cmd *cmd) { struct fc_frame *fp; struct fc_lport *lport; + struct se_session *se_sess; if (!cmd) return; + se_sess = cmd->sess->se_sess; fp = cmd->req_frame; lport = fr_dev(fp); if (fr_seq(fp)) lport->tt.seq_release(fr_seq(fp)); fc_frame_free(fp); + percpu_ida_free(&se_sess->sess_tag_pool, cmd->se_cmd.map_tag); ft_sess_put(cmd->sess); /* undo get from lookup at recv */ - kfree(cmd); } void ft_release_cmd(struct se_cmd *se_cmd) @@ -432,14 +435,21 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp) { struct ft_cmd *cmd; struct fc_lport *lport = sess->tport->lport; + struct se_session *se_sess = sess->se_sess; + int tag; - cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); - if (!cmd) + tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC); + if (tag < 0) goto busy; + + cmd = &((struct ft_cmd *)se_sess->sess_cmd_map)[tag]; + memset(cmd, 0, sizeof(struct ft_cmd)); + + cmd->se_cmd.map_tag = tag; cmd->sess = sess; cmd->seq = lport->tt.seq_assign(lport, fp); if (!cmd->seq) { - kfree(cmd); + percpu_ida_free(&se_sess->sess_tag_pool, tag); goto busy; } cmd->req_frame = fp; /* hold frame during cmd */ diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index 4e0050840a72..c6932fb53a8d 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c @@ -571,16 +571,16 @@ int ft_register_configfs(void) /* * Setup default attribute lists for various fabric->tf_cit_tmpl */ - TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = ft_wwn_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = + fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = ft_wwn_attrs; + fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = ft_nacl_base_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; /* * register the fabric for use within TCM */ diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c index 4859505ae2ed..ae52c08dad09 100644 --- a/drivers/target/tcm_fc/tfc_sess.c +++ b/drivers/target/tcm_fc/tfc_sess.c @@ -210,7 +210,8 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id, if (!sess) return NULL; - sess->se_sess = transport_init_session(); + sess->se_sess = transport_init_session_tags(TCM_FC_DEFAULT_TAGS, + sizeof(struct ft_cmd)); if (IS_ERR(sess->se_sess)) { kfree(sess); return NULL; diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 03a567199bbe..f1d511a9475b 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -1608,15 +1608,17 @@ exit: EXPORT_SYMBOL_GPL(thermal_zone_get_zone_by_name); #ifdef CONFIG_NET +static const struct genl_multicast_group thermal_event_mcgrps[] = { + { .name = THERMAL_GENL_MCAST_GROUP_NAME, }, +}; + static struct genl_family thermal_event_genl_family = { .id = GENL_ID_GENERATE, .name = THERMAL_GENL_FAMILY_NAME, .version = THERMAL_GENL_VERSION, .maxattr = THERMAL_GENL_ATTR_MAX, -}; - -static struct genl_multicast_group thermal_event_mcgrp = { - .name = THERMAL_GENL_MCAST_GROUP_NAME, + .mcgrps = thermal_event_mcgrps, + .n_mcgrps = ARRAY_SIZE(thermal_event_mcgrps), }; int thermal_generate_netlink_event(struct thermal_zone_device *tz, @@ -1677,7 +1679,8 @@ int thermal_generate_netlink_event(struct thermal_zone_device *tz, return result; } - result = genlmsg_multicast(skb, 0, thermal_event_mcgrp.id, GFP_ATOMIC); + result = genlmsg_multicast(&thermal_event_genl_family, skb, 0, + 0, GFP_ATOMIC); if (result) dev_err(&tz->device, "Failed to send netlink event:%d", result); @@ -1687,17 +1690,7 @@ EXPORT_SYMBOL_GPL(thermal_generate_netlink_event); static int genetlink_init(void) { - int result; - - result = genl_register_family(&thermal_event_genl_family); - if (result) - return result; - - result = genl_register_mc_group(&thermal_event_genl_family, - &thermal_event_mcgrp); - if (result) - genl_unregister_family(&thermal_event_genl_family); - return result; + return genl_register_family(&thermal_event_genl_family); } static void genetlink_exit(void) diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c index 2b86f8e0fb58..71630a2af42c 100644 --- a/drivers/tty/amiserial.c +++ b/drivers/tty/amiserial.c @@ -1855,6 +1855,9 @@ static struct console sercons = { */ static int __init amiserial_console_init(void) { + if (!MACH_IS_AMIGA) + return -ENODEV; + register_console(&sercons); return 0; } diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 7cdd1eb9406c..268b62768f2b 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -768,7 +768,7 @@ static size_t __process_echoes(struct tty_struct *tty) * data at the tail to prevent a subsequent overrun */ while (ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) { if (echo_buf(ldata, tail) == ECHO_OP_START) { - if (echo_buf(ldata, tail) == ECHO_OP_ERASE_TAB) + if (echo_buf(ldata, tail + 1) == ECHO_OP_ERASE_TAB) tail += 3; else tail += 2; @@ -810,7 +810,8 @@ static void process_echoes(struct tty_struct *tty) struct n_tty_data *ldata = tty->disc_data; size_t echoed; - if (!L_ECHO(tty) || ldata->echo_commit == ldata->echo_tail) + if ((!L_ECHO(tty) && !L_ECHONL(tty)) || + ldata->echo_commit == ldata->echo_tail) return; mutex_lock(&ldata->output_lock); @@ -825,7 +826,8 @@ static void flush_echoes(struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; - if (!L_ECHO(tty) || ldata->echo_commit == ldata->echo_head) + if ((!L_ECHO(tty) && !L_ECHONL(tty)) || + ldata->echo_commit == ldata->echo_head) return; mutex_lock(&ldata->output_lock); @@ -1998,7 +2000,10 @@ static int canon_copy_from_read_buf(struct tty_struct *tty, found = 1; size = N_TTY_BUF_SIZE - tail; - n = (found + eol + size) & (N_TTY_BUF_SIZE - 1); + n = eol - tail; + if (n > 4096) + n += 4096; + n += found; c = n; if (found && read_buf(ldata, eol) == __DISABLED_CHAR) { @@ -2243,18 +2248,19 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, if (time) timeout = time; } - mutex_unlock(&ldata->atomic_read_lock); - remove_wait_queue(&tty->read_wait, &wait); + n_tty_set_room(tty); + up_read(&tty->termios_rwsem); + remove_wait_queue(&tty->read_wait, &wait); if (!waitqueue_active(&tty->read_wait)) ldata->minimum_to_wake = minimum; + mutex_unlock(&ldata->atomic_read_lock); + __set_current_state(TASK_RUNNING); if (b - buf) retval = b - buf; - n_tty_set_room(tty); - up_read(&tty->termios_rwsem); return retval; } diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig index f3b306efaa59..23329918f229 100644 --- a/drivers/tty/serial/8250/Kconfig +++ b/drivers/tty/serial/8250/Kconfig @@ -41,7 +41,7 @@ config SERIAL_8250_DEPRECATED_OPTIONS accept kernel parameters in both forms like 8250_core.nr_uarts=4 and 8250.nr_uarts=4. We now renamed the module back to 8250, but if anybody noticed in 3.7 and changed their userspace we still have to - keep the 8350_core.* options around until they revert the changes + keep the 8250_core.* options around until they revert the changes they already did. If 8250 is built as a module, this adds 8250_core alias instead. diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c index 481b781b26e3..e9d420ff3931 100644 --- a/drivers/tty/serial/pmac_zilog.c +++ b/drivers/tty/serial/pmac_zilog.c @@ -2052,6 +2052,9 @@ static int __init pmz_console_init(void) /* Probe ports */ pmz_probe(); + if (pmz_ports_count == 0) + return -ENODEV; + /* TODO: Autoprobe console based on OF */ /* pmz_console.index = i; */ register_console(&pmz_console); diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 537750261aaa..7d8103cd3e2e 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -1433,7 +1433,7 @@ static void work_fn_rx(struct work_struct *work) desc = s->desc_rx[new]; if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) != - DMA_SUCCESS) { + DMA_COMPLETE) { /* Handle incomplete DMA receive */ struct dma_chan *chan = s->chan_rx; struct shdma_desc *sh_desc = container_of(desc, diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 3a1a01af9a80..c74a00ad7add 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -2086,6 +2086,7 @@ retry_open: filp->f_op = &tty_fops; goto retry_open; } + clear_bit(TTY_HUPPED, &tty->flags); tty_unlock(tty); diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index 67beb8444930..f7beb6eb40c7 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -653,6 +653,8 @@ static int uio_mmap_physical(struct vm_area_struct *vma) return -EINVAL; mem = idev->info->mem + mi; + if (mem->addr & ~PAGE_MASK) + return -ENODEV; if (vma->vm_end - vma->vm_start > mem->size) return -EINVAL; diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 3e7560f004f8..e8404319ca68 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1515,6 +1515,8 @@ static int acm_reset_resume(struct usb_interface *intf) static const struct usb_device_id acm_ids[] = { /* quirky and broken devices */ + { USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */ + .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */ { USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ }, diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 06cec635e703..bd9dc3504b51 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -4832,8 +4832,9 @@ static void hub_events(void) hub->ports[i - 1]->child; dev_dbg(hub_dev, "warm reset port %d\n", i); - if (!udev || !(portstatus & - USB_PORT_STAT_CONNECTION)) { + if (!udev || + !(portstatus & USB_PORT_STAT_CONNECTION) || + udev->state == USB_STATE_NOTATTACHED) { status = hub_port_reset(hub, i, NULL, HUB_BH_RESET_TIME, true); @@ -5501,6 +5502,6 @@ acpi_handle usb_get_hub_port_acpi_handle(struct usb_device *hdev, if (!hub) return NULL; - return DEVICE_ACPI_HANDLE(&hub->ports[port1 - 1]->dev); + return ACPI_HANDLE(&hub->ports[port1 - 1]->dev); } #endif diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c index 255c14464bf2..4e243c37f17f 100644 --- a/drivers/usb/core/usb-acpi.c +++ b/drivers/usb/core/usb-acpi.c @@ -173,7 +173,7 @@ static int usb_acpi_find_device(struct device *dev, acpi_handle *handle) } /* root hub's parent is the usb hcd. */ - parent_handle = DEVICE_ACPI_HANDLE(dev->parent); + parent_handle = ACPI_HANDLE(dev->parent); *handle = acpi_get_child(parent_handle, udev->portnum); if (!*handle) return -ENODEV; @@ -194,7 +194,7 @@ static int usb_acpi_find_device(struct device *dev, acpi_handle *handle) raw_port_num = usb_hcd_find_raw_port_number(hcd, port_num); - *handle = acpi_get_child(DEVICE_ACPI_HANDLE(&udev->dev), + *handle = acpi_get_child(ACPI_HANDLE(&udev->dev), raw_port_num); if (!*handle) return -ENODEV; diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index 95f7649c71a7..21a352079bc2 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -459,6 +459,8 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc, dep = dwc3_wIndex_to_dep(dwc, wIndex); if (!dep) return -EINVAL; + if (set == 0 && (dep->flags & DWC3_EP_WEDGE)) + break; ret = __dwc3_gadget_ep_set_halt(dep, set); if (ret) return -EINVAL; diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 5452c0fce360..02e44fcaf205 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -1200,9 +1200,6 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value) else dep->flags |= DWC3_EP_STALL; } else { - if (dep->flags & DWC3_EP_WEDGE) - return 0; - ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, DWC3_DEPCMD_CLEARSTALL, ¶ms); if (ret) @@ -1210,7 +1207,7 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value) value ? "set" : "clear", dep->name); else - dep->flags &= ~DWC3_EP_STALL; + dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); } return ret; diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index a91e6422f930..f66d96ad1f51 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -682,6 +682,7 @@ config USB_CONFIGFS_PHONET config USB_CONFIGFS_MASS_STORAGE boolean "Mass storage" depends on USB_CONFIGFS + depends on BLOCK select USB_F_MASS_STORAGE help The Mass Storage Gadget acts as a USB Mass Storage disk drive. diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 3e7ae707f691..2018ba1a2172 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -593,6 +593,7 @@ static void reset_config(struct usb_composite_dev *cdev) bitmap_zero(f->endpoints, 32); } cdev->config = NULL; + cdev->delayed_status = 0; } static int set_config(struct usb_composite_dev *cdev, diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c index 774e8b89cdb5..241fc873ffa4 100644 --- a/drivers/usb/gadget/f_fs.c +++ b/drivers/usb/gadget/f_fs.c @@ -1304,7 +1304,7 @@ static struct ffs_data *ffs_data_new(void) { struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL); if (unlikely(!ffs)) - return 0; + return NULL; ENTER(); diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c index a03ba2c83589..b96393908860 100644 --- a/drivers/usb/gadget/f_mass_storage.c +++ b/drivers/usb/gadget/f_mass_storage.c @@ -523,7 +523,7 @@ static int fsg_setup(struct usb_function *f, */ DBG(fsg, "bulk reset request\n"); raise_exception(fsg->common, FSG_STATE_RESET); - return DELAYED_STATUS; + return USB_GADGET_DELAYED_STATUS; case US_BULK_GET_MAX_LUN: if (ctrl->bRequestType != @@ -602,13 +602,14 @@ static bool start_out_transfer(struct fsg_common *common, struct fsg_buffhd *bh) return true; } -static int sleep_thread(struct fsg_common *common) +static int sleep_thread(struct fsg_common *common, bool can_freeze) { int rc = 0; /* Wait until a signal arrives or we are woken up */ for (;;) { - try_to_freeze(); + if (can_freeze) + try_to_freeze(); set_current_state(TASK_INTERRUPTIBLE); if (signal_pending(current)) { rc = -EINTR; @@ -682,7 +683,7 @@ static int do_read(struct fsg_common *common) /* Wait for the next buffer to become available */ bh = common->next_buffhd_to_fill; while (bh->state != BUF_STATE_EMPTY) { - rc = sleep_thread(common); + rc = sleep_thread(common, false); if (rc) return rc; } @@ -937,7 +938,7 @@ static int do_write(struct fsg_common *common) } /* Wait for something to happen */ - rc = sleep_thread(common); + rc = sleep_thread(common, false); if (rc) return rc; } @@ -1504,7 +1505,7 @@ static int throw_away_data(struct fsg_common *common) } /* Otherwise wait for something to happen */ - rc = sleep_thread(common); + rc = sleep_thread(common, true); if (rc) return rc; } @@ -1625,7 +1626,7 @@ static int send_status(struct fsg_common *common) /* Wait for the next buffer to become available */ bh = common->next_buffhd_to_fill; while (bh->state != BUF_STATE_EMPTY) { - rc = sleep_thread(common); + rc = sleep_thread(common, true); if (rc) return rc; } @@ -1828,7 +1829,7 @@ static int do_scsi_command(struct fsg_common *common) bh = common->next_buffhd_to_fill; common->next_buffhd_to_drain = bh; while (bh->state != BUF_STATE_EMPTY) { - rc = sleep_thread(common); + rc = sleep_thread(common, true); if (rc) return rc; } @@ -2174,7 +2175,7 @@ static int get_next_command(struct fsg_common *common) /* Wait for the next buffer to become available */ bh = common->next_buffhd_to_fill; while (bh->state != BUF_STATE_EMPTY) { - rc = sleep_thread(common); + rc = sleep_thread(common, true); if (rc) return rc; } @@ -2193,7 +2194,7 @@ static int get_next_command(struct fsg_common *common) /* Wait for the CBW to arrive */ while (bh->state != BUF_STATE_FULL) { - rc = sleep_thread(common); + rc = sleep_thread(common, true); if (rc) return rc; } @@ -2379,7 +2380,7 @@ static void handle_exception(struct fsg_common *common) } if (num_active == 0) break; - if (sleep_thread(common)) + if (sleep_thread(common, true)) return; } @@ -2516,7 +2517,7 @@ static int fsg_main_thread(void *common_) } if (!common->running) { - sleep_thread(common); + sleep_thread(common, true); continue; } @@ -3111,7 +3112,7 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f) fsg->common->can_stall); if (ret) return ret; - fsg_common_set_inquiry_string(fsg->common, 0, 0); + fsg_common_set_inquiry_string(fsg->common, NULL, NULL); ret = fsg_common_run_thread(fsg->common); if (ret) return ret; diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c index 0ac6064aa3b8..409a3c45a36a 100644 --- a/drivers/usb/gadget/pxa25x_udc.c +++ b/drivers/usb/gadget/pxa25x_udc.c @@ -54,6 +54,7 @@ */ #ifdef CONFIG_ARCH_PXA #include <mach/pxa25x-udc.h> +#include <mach/hardware.h> #endif #ifdef CONFIG_ARCH_LUBBOCK diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c index 9875d9c0823f..e20bc109fdd7 100644 --- a/drivers/usb/gadget/s3c-hsotg.c +++ b/drivers/usb/gadget/s3c-hsotg.c @@ -1180,6 +1180,7 @@ static int s3c_hsotg_process_req_feature(struct s3c_hsotg *hsotg, } static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg); +static void s3c_hsotg_disconnect(struct s3c_hsotg *hsotg); /** * s3c_hsotg_process_control - process a control request @@ -1221,6 +1222,7 @@ static void s3c_hsotg_process_control(struct s3c_hsotg *hsotg, if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { switch (ctrl->bRequest) { case USB_REQ_SET_ADDRESS: + s3c_hsotg_disconnect(hsotg); dcfg = readl(hsotg->regs + DCFG); dcfg &= ~DCFG_DevAddr_MASK; dcfg |= ctrl->wValue << DCFG_DevAddr_SHIFT; @@ -1245,7 +1247,9 @@ static void s3c_hsotg_process_control(struct s3c_hsotg *hsotg, /* as a fallback, try delivering it to the driver to deal with */ if (ret == 0 && hsotg->driver) { + spin_unlock(&hsotg->lock); ret = hsotg->driver->setup(&hsotg->gadget, ctrl); + spin_lock(&hsotg->lock); if (ret < 0) dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret); } @@ -1308,10 +1312,12 @@ static void s3c_hsotg_complete_setup(struct usb_ep *ep, return; } + spin_lock(&hsotg->lock); if (req->actual == 0) s3c_hsotg_enqueue_setup(hsotg); else s3c_hsotg_process_control(hsotg, req->buf); + spin_unlock(&hsotg->lock); } /** @@ -2533,7 +2539,6 @@ irq_retry: writel(GINTSTS_USBSusp, hsotg->regs + GINTSTS); call_gadget(hsotg, suspend); - s3c_hsotg_disconnect(hsotg); } if (gintsts & GINTSTS_WkUpInt) { diff --git a/drivers/usb/gadget/storage_common.h b/drivers/usb/gadget/storage_common.h index c74c2fdbd56e..70c891469f57 100644 --- a/drivers/usb/gadget/storage_common.h +++ b/drivers/usb/gadget/storage_common.h @@ -119,10 +119,6 @@ static inline bool fsg_lun_is_open(struct fsg_lun *curlun) return curlun->filp != NULL; } -/* Big enough to hold our biggest descriptor */ -#define EP0_BUFSIZE 256 -#define DELAYED_STATUS (EP0_BUFSIZE + 999) /* An impossibly large value */ - /* Default size of buffer length. */ #define FSG_BUFLEN ((u32)16384) diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c index eccea1df702d..0f8aad78b54f 100644 --- a/drivers/usb/gadget/tcm_usb_gadget.c +++ b/drivers/usb/gadget/tcm_usb_gadget.c @@ -370,7 +370,7 @@ err: return -ENOMEM; } -void bot_cleanup_old_alt(struct f_uas *fu) +static void bot_cleanup_old_alt(struct f_uas *fu) { if (!(fu->flags & USBG_ENABLED)) return; @@ -1923,15 +1923,15 @@ static int usbg_register_configfs(void) } fabric->tf_ops = usbg_ops; - TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = usbg_wwn_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = usbg_base_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = usbg_wwn_attrs; + fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = usbg_base_attrs; + fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; ret = target_fabric_configfs_register(fabric); if (ret < 0) { printk(KERN_ERR "target_fabric_configfs_register() failed" diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c index 0dd07ae1555d..f49b0b61ecc8 100644 --- a/drivers/usb/gadget/zero.c +++ b/drivers/usb/gadget/zero.c @@ -91,17 +91,17 @@ static struct usb_zero_options gzero_options = { * functional coverage for the "USBCV" test harness from USB-IF. * It's always set if OTG mode is enabled. */ -unsigned autoresume = DEFAULT_AUTORESUME; +static unsigned autoresume = DEFAULT_AUTORESUME; module_param(autoresume, uint, S_IRUGO); MODULE_PARM_DESC(autoresume, "zero, or seconds before remote wakeup"); /* Maximum Autoresume time */ -unsigned max_autoresume; +static unsigned max_autoresume; module_param(max_autoresume, uint, S_IRUGO); MODULE_PARM_DESC(max_autoresume, "maximum seconds before remote wakeup"); /* Interval between two remote wakeups */ -unsigned autoresume_interval_ms; +static unsigned autoresume_interval_ms; module_param(autoresume_interval_ms, uint, S_IRUGO); MODULE_PARM_DESC(autoresume_interval_ms, "milliseconds to increase successive wakeup delays"); diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c index e89ac4d4b87e..9b7435f0dcd6 100644 --- a/drivers/usb/host/ohci-pxa27x.c +++ b/drivers/usb/host/ohci-pxa27x.c @@ -21,6 +21,7 @@ #include <linux/clk.h> #include <linux/device.h> +#include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 1e2f3f495843..53c2e296467f 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -2973,8 +2973,58 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, } while (1) { - if (room_on_ring(xhci, ep_ring, num_trbs)) - break; + if (room_on_ring(xhci, ep_ring, num_trbs)) { + union xhci_trb *trb = ep_ring->enqueue; + unsigned int usable = ep_ring->enq_seg->trbs + + TRBS_PER_SEGMENT - 1 - trb; + u32 nop_cmd; + + /* + * Section 4.11.7.1 TD Fragments states that a link + * TRB must only occur at the boundary between + * data bursts (eg 512 bytes for 480M). + * While it is possible to split a large fragment + * we don't know the size yet. + * Simplest solution is to fill the trb before the + * LINK with nop commands. + */ + if (num_trbs == 1 || num_trbs <= usable || usable == 0) + break; + + if (ep_ring->type != TYPE_BULK) + /* + * While isoc transfers might have a buffer that + * crosses a 64k boundary it is unlikely. + * Since we can't add NOPs without generating + * gaps in the traffic just hope it never + * happens at the end of the ring. + * This could be fixed by writing a LINK TRB + * instead of the first NOP - however the + * TRB_TYPE_LINK_LE32() calls would all need + * changing to check the ring length. + */ + break; + + if (num_trbs >= TRBS_PER_SEGMENT) { + xhci_err(xhci, "Too many fragments %d, max %d\n", + num_trbs, TRBS_PER_SEGMENT - 1); + return -ENOMEM; + } + + nop_cmd = cpu_to_le32(TRB_TYPE(TRB_TR_NOOP) | + ep_ring->cycle_state); + ep_ring->num_trbs_free -= usable; + do { + trb->generic.field[0] = 0; + trb->generic.field[1] = 0; + trb->generic.field[2] = 0; + trb->generic.field[3] = nop_cmd; + trb++; + } while (--usable); + ep_ring->enqueue = trb; + if (room_on_ring(xhci, ep_ring, num_trbs)) + break; + } if (ep_ring == xhci->cmd_ring) { xhci_err(xhci, "Do not support expand command ring\n"); diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 0a43329569d1..4d4499b80449 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -1809,7 +1809,6 @@ static void musb_free(struct musb *musb) disable_irq_wake(musb->nIrq); free_irq(musb->nIrq, musb); } - cancel_work_sync(&musb->irq_work); musb_host_free(musb); } @@ -1896,6 +1895,9 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) musb_platform_disable(musb); musb_generic_disable(musb); + /* Init IRQ workqueue before request_irq */ + INIT_WORK(&musb->irq_work, musb_irq_work); + /* setup musb parts of the core (especially endpoints) */ status = musb_core_init(plat->config->multipoint ? MUSB_CONTROLLER_MHDRC @@ -1905,9 +1907,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb); - /* Init IRQ workqueue before request_irq */ - INIT_WORK(&musb->irq_work, musb_irq_work); - /* attach to the IRQ */ if (request_irq(nIrq, musb->isr, 0, dev_name(dev), musb)) { dev_err(dev, "request_irq %d failed!\n", nIrq); @@ -1981,6 +1980,7 @@ fail4: musb_host_cleanup(musb); fail3: + cancel_work_sync(&musb->irq_work); if (musb->dma_controller) dma_controller_destroy(musb->dma_controller); fail2_5: @@ -2043,6 +2043,7 @@ static int musb_remove(struct platform_device *pdev) if (musb->dma_controller) dma_controller_destroy(musb->dma_controller); + cancel_work_sync(&musb->irq_work); musb_free(musb); device_init_wakeup(dev, 0); return 0; diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c index ff9d6de2b746..a12bd30401e0 100644 --- a/drivers/usb/musb/musb_cppi41.c +++ b/drivers/usb/musb/musb_cppi41.c @@ -38,6 +38,7 @@ struct cppi41_dma_channel { u32 prog_len; u32 transferred; u32 packet_sz; + struct list_head tx_check; }; #define MUSB_DMA_NUM_CHANNELS 15 @@ -47,6 +48,8 @@ struct cppi41_dma_controller { struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS]; struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS]; struct musb *musb; + struct hrtimer early_tx; + struct list_head early_tx_list; u32 rx_mode; u32 tx_mode; u32 auto_req; @@ -96,31 +99,27 @@ static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel) cppi41_channel->usb_toggle = toggle; } -static void cppi41_dma_callback(void *private_data) +static bool musb_is_tx_fifo_empty(struct musb_hw_ep *hw_ep) { - struct dma_channel *channel = private_data; - struct cppi41_dma_channel *cppi41_channel = channel->private_data; - struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; - struct musb *musb = hw_ep->musb; - unsigned long flags; - struct dma_tx_state txstate; - u32 transferred; + u8 epnum = hw_ep->epnum; + struct musb *musb = hw_ep->musb; + void __iomem *epio = musb->endpoints[epnum].regs; + u16 csr; - spin_lock_irqsave(&musb->lock, flags); + csr = musb_readw(epio, MUSB_TXCSR); + if (csr & MUSB_TXCSR_TXPKTRDY) + return false; + return true; +} - dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie, - &txstate); - transferred = cppi41_channel->prog_len - txstate.residue; - cppi41_channel->transferred += transferred; +static void cppi41_dma_callback(void *private_data); - dev_dbg(musb->controller, "DMA transfer done on hw_ep=%d bytes=%d/%d\n", - hw_ep->epnum, cppi41_channel->transferred, - cppi41_channel->total_len); +static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel) +{ + struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; + struct musb *musb = hw_ep->musb; - update_rx_toggle(cppi41_channel); - - if (cppi41_channel->transferred == cppi41_channel->total_len || - transferred < cppi41_channel->packet_sz) { + if (!cppi41_channel->prog_len) { /* done, complete */ cppi41_channel->channel.actual_len = @@ -150,13 +149,11 @@ static void cppi41_dma_callback(void *private_data) remain_bytes, direction, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - if (WARN_ON(!dma_desc)) { - spin_unlock_irqrestore(&musb->lock, flags); + if (WARN_ON(!dma_desc)) return; - } dma_desc->callback = cppi41_dma_callback; - dma_desc->callback_param = channel; + dma_desc->callback_param = &cppi41_channel->channel; cppi41_channel->cookie = dma_desc->tx_submit(dma_desc); dma_async_issue_pending(dc); @@ -166,6 +163,117 @@ static void cppi41_dma_callback(void *private_data) musb_writew(epio, MUSB_RXCSR, csr); } } +} + +static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer) +{ + struct cppi41_dma_controller *controller; + struct cppi41_dma_channel *cppi41_channel, *n; + struct musb *musb; + unsigned long flags; + enum hrtimer_restart ret = HRTIMER_NORESTART; + + controller = container_of(timer, struct cppi41_dma_controller, + early_tx); + musb = controller->musb; + + spin_lock_irqsave(&musb->lock, flags); + list_for_each_entry_safe(cppi41_channel, n, &controller->early_tx_list, + tx_check) { + bool empty; + struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; + + empty = musb_is_tx_fifo_empty(hw_ep); + if (empty) { + list_del_init(&cppi41_channel->tx_check); + cppi41_trans_done(cppi41_channel); + } + } + + if (!list_empty(&controller->early_tx_list)) { + ret = HRTIMER_RESTART; + hrtimer_forward_now(&controller->early_tx, + ktime_set(0, 150 * NSEC_PER_USEC)); + } + + spin_unlock_irqrestore(&musb->lock, flags); + return ret; +} + +static void cppi41_dma_callback(void *private_data) +{ + struct dma_channel *channel = private_data; + struct cppi41_dma_channel *cppi41_channel = channel->private_data; + struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; + struct musb *musb = hw_ep->musb; + unsigned long flags; + struct dma_tx_state txstate; + u32 transferred; + bool empty; + + spin_lock_irqsave(&musb->lock, flags); + + dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie, + &txstate); + transferred = cppi41_channel->prog_len - txstate.residue; + cppi41_channel->transferred += transferred; + + dev_dbg(musb->controller, "DMA transfer done on hw_ep=%d bytes=%d/%d\n", + hw_ep->epnum, cppi41_channel->transferred, + cppi41_channel->total_len); + + update_rx_toggle(cppi41_channel); + + if (cppi41_channel->transferred == cppi41_channel->total_len || + transferred < cppi41_channel->packet_sz) + cppi41_channel->prog_len = 0; + + empty = musb_is_tx_fifo_empty(hw_ep); + if (empty) { + cppi41_trans_done(cppi41_channel); + } else { + struct cppi41_dma_controller *controller; + /* + * On AM335x it has been observed that the TX interrupt fires + * too early that means the TXFIFO is not yet empty but the DMA + * engine says that it is done with the transfer. We don't + * receive a FIFO empty interrupt so the only thing we can do is + * to poll for the bit. On HS it usually takes 2us, on FS around + * 110us - 150us depending on the transfer size. + * We spin on HS (no longer than than 25us and setup a timer on + * FS to check for the bit and complete the transfer. + */ + controller = cppi41_channel->controller; + + if (musb->g.speed == USB_SPEED_HIGH) { + unsigned wait = 25; + + do { + empty = musb_is_tx_fifo_empty(hw_ep); + if (empty) + break; + wait--; + if (!wait) + break; + udelay(1); + } while (1); + + empty = musb_is_tx_fifo_empty(hw_ep); + if (empty) { + cppi41_trans_done(cppi41_channel); + goto out; + } + } + list_add_tail(&cppi41_channel->tx_check, + &controller->early_tx_list); + if (!hrtimer_active(&controller->early_tx)) { + hrtimer_start_range_ns(&controller->early_tx, + ktime_set(0, 140 * NSEC_PER_USEC), + 40 * NSEC_PER_USEC, + HRTIMER_MODE_REL); + } + } +out: spin_unlock_irqrestore(&musb->lock, flags); } @@ -364,6 +472,8 @@ static int cppi41_is_compatible(struct dma_channel *channel, u16 maxpacket, WARN_ON(1); return 1; } + if (cppi41_channel->hw_ep->ep_in.type != USB_ENDPOINT_XFER_BULK) + return 0; if (cppi41_channel->is_tx) return 1; /* AM335x Advisory 1.0.13. No workaround for device RX mode */ @@ -388,6 +498,7 @@ static int cppi41_dma_channel_abort(struct dma_channel *channel) if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE) return 0; + list_del_init(&cppi41_channel->tx_check); if (is_tx) { csr = musb_readw(epio, MUSB_TXCSR); csr &= ~MUSB_TXCSR_DMAENAB; @@ -495,6 +606,7 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller) cppi41_channel->controller = controller; cppi41_channel->port_num = port; cppi41_channel->is_tx = is_tx; + INIT_LIST_HEAD(&cppi41_channel->tx_check); musb_dma = &cppi41_channel->channel; musb_dma->private_data = cppi41_channel; @@ -520,6 +632,7 @@ void dma_controller_destroy(struct dma_controller *c) struct cppi41_dma_controller *controller = container_of(c, struct cppi41_dma_controller, controller); + hrtimer_cancel(&controller->early_tx); cppi41_dma_controller_stop(controller); kfree(controller); } @@ -539,6 +652,9 @@ struct dma_controller *dma_controller_create(struct musb *musb, if (!controller) goto kzalloc_fail; + hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + controller->early_tx.function = cppi41_recheck_tx_req; + INIT_LIST_HEAD(&controller->early_tx_list); controller->musb = musb; controller->controller.channel_alloc = cppi41_dma_channel_allocate; diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index d2d3a173b315..32fb057c03f5 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -1796,7 +1796,11 @@ int musb_gadget_setup(struct musb *musb) /* this "gadget" abstracts/virtualizes the controller */ musb->g.name = musb_driver_name; +#if IS_ENABLED(CONFIG_USB_MUSB_DUAL_ROLE) musb->g.is_otg = 1; +#elif IS_ENABLED(CONFIG_USB_MUSB_GADGET) + musb->g.is_otg = 0; +#endif musb_g_init_endpoints(musb); diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c index 6370e50649d7..0e3c60cb669a 100644 --- a/drivers/usb/phy/phy-am335x.c +++ b/drivers/usb/phy/phy-am335x.c @@ -52,8 +52,7 @@ static int am335x_phy_probe(struct platform_device *pdev) return am_phy->id; } - ret = usb_phy_gen_create_phy(dev, &am_phy->usb_phy_gen, - USB_PHY_TYPE_USB2, 0, false); + ret = usb_phy_gen_create_phy(dev, &am_phy->usb_phy_gen, NULL); if (ret) return ret; @@ -66,8 +65,6 @@ static int am335x_phy_probe(struct platform_device *pdev) platform_set_drvdata(pdev, am_phy); return 0; - - return ret; } static int am335x_phy_remove(struct platform_device *pdev) diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c index fce3a9e9bb5d..aa6d37b3378a 100644 --- a/drivers/usb/phy/phy-generic.c +++ b/drivers/usb/phy/phy-generic.c @@ -48,8 +48,9 @@ void usb_nop_xceiv_register(void) if (pd) return; pd = platform_device_register_simple("usb_phy_gen_xceiv", -1, NULL, 0); - if (!pd) { + if (IS_ERR(pd)) { pr_err("Unable to register generic usb transceiver\n"); + pd = NULL; return; } } @@ -150,10 +151,40 @@ static int nop_set_host(struct usb_otg *otg, struct usb_bus *host) } int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_gen_xceiv *nop, - enum usb_phy_type type, u32 clk_rate, bool needs_vcc) + struct usb_phy_gen_xceiv_platform_data *pdata) { + enum usb_phy_type type = USB_PHY_TYPE_USB2; int err; + u32 clk_rate = 0; + bool needs_vcc = false; + + nop->reset_active_low = true; /* default behaviour */ + + if (dev->of_node) { + struct device_node *node = dev->of_node; + enum of_gpio_flags flags = 0; + + if (of_property_read_u32(node, "clock-frequency", &clk_rate)) + clk_rate = 0; + + needs_vcc = of_property_read_bool(node, "vcc-supply"); + nop->gpio_reset = of_get_named_gpio_flags(node, "reset-gpios", + 0, &flags); + if (nop->gpio_reset == -EPROBE_DEFER) + return -EPROBE_DEFER; + + nop->reset_active_low = flags & OF_GPIO_ACTIVE_LOW; + + } else if (pdata) { + type = pdata->type; + clk_rate = pdata->clk_rate; + needs_vcc = pdata->needs_vcc; + nop->gpio_reset = pdata->gpio_reset; + } else { + nop->gpio_reset = -1; + } + nop->phy.otg = devm_kzalloc(dev, sizeof(*nop->phy.otg), GFP_KERNEL); if (!nop->phy.otg) @@ -218,43 +249,14 @@ EXPORT_SYMBOL_GPL(usb_phy_gen_create_phy); static int usb_phy_gen_xceiv_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct usb_phy_gen_xceiv_platform_data *pdata = - dev_get_platdata(&pdev->dev); struct usb_phy_gen_xceiv *nop; - enum usb_phy_type type = USB_PHY_TYPE_USB2; int err; - u32 clk_rate = 0; - bool needs_vcc = false; nop = devm_kzalloc(dev, sizeof(*nop), GFP_KERNEL); if (!nop) return -ENOMEM; - nop->reset_active_low = true; /* default behaviour */ - - if (dev->of_node) { - struct device_node *node = dev->of_node; - enum of_gpio_flags flags; - - if (of_property_read_u32(node, "clock-frequency", &clk_rate)) - clk_rate = 0; - - needs_vcc = of_property_read_bool(node, "vcc-supply"); - nop->gpio_reset = of_get_named_gpio_flags(node, "reset-gpios", - 0, &flags); - if (nop->gpio_reset == -EPROBE_DEFER) - return -EPROBE_DEFER; - - nop->reset_active_low = flags & OF_GPIO_ACTIVE_LOW; - - } else if (pdata) { - type = pdata->type; - clk_rate = pdata->clk_rate; - needs_vcc = pdata->needs_vcc; - nop->gpio_reset = pdata->gpio_reset; - } - - err = usb_phy_gen_create_phy(dev, nop, type, clk_rate, needs_vcc); + err = usb_phy_gen_create_phy(dev, nop, dev_get_platdata(&pdev->dev)); if (err) return err; @@ -271,8 +273,6 @@ static int usb_phy_gen_xceiv_probe(struct platform_device *pdev) platform_set_drvdata(pdev, nop); return 0; - - return err; } static int usb_phy_gen_xceiv_remove(struct platform_device *pdev) diff --git a/drivers/usb/phy/phy-generic.h b/drivers/usb/phy/phy-generic.h index d2a220d81734..38a81f307b82 100644 --- a/drivers/usb/phy/phy-generic.h +++ b/drivers/usb/phy/phy-generic.h @@ -1,6 +1,8 @@ #ifndef _PHY_GENERIC_H_ #define _PHY_GENERIC_H_ +#include <linux/usb/usb_phy_gen_xceiv.h> + struct usb_phy_gen_xceiv { struct usb_phy phy; struct device *dev; @@ -14,6 +16,6 @@ int usb_gen_phy_init(struct usb_phy *phy); void usb_gen_phy_shutdown(struct usb_phy *phy); int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_gen_xceiv *nop, - enum usb_phy_type type, u32 clk_rate, bool needs_vcc); + struct usb_phy_gen_xceiv_platform_data *pdata); #endif diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c index fdd33b44dbd3..545844b7e796 100644 --- a/drivers/usb/phy/phy-mxs-usb.c +++ b/drivers/usb/phy/phy-mxs-usb.c @@ -164,7 +164,7 @@ static int mxs_phy_probe(struct platform_device *pdev) mxs_phy->clk = clk; - platform_set_drvdata(pdev, &mxs_phy->phy); + platform_set_drvdata(pdev, mxs_phy); ret = usb_add_phy_dev(&mxs_phy->phy); if (ret) diff --git a/drivers/usb/phy/phy-rcar-gen2-usb.c b/drivers/usb/phy/phy-rcar-gen2-usb.c index a99a6953f11c..db3ab34cddb4 100644 --- a/drivers/usb/phy/phy-rcar-gen2-usb.c +++ b/drivers/usb/phy/phy-rcar-gen2-usb.c @@ -107,10 +107,10 @@ static void __rcar_gen2_usb_phy_init(struct rcar_gen2_usb_phy_priv *priv) clk_prepare_enable(priv->clk); /* Set USB channels in the USBHS UGCTRL2 register */ - val = ioread32(priv->base); + val = ioread32(priv->base + USBHS_UGCTRL2_REG); val &= ~(USBHS_UGCTRL2_USB0_HS | USBHS_UGCTRL2_USB2_SS); val |= priv->ugctrl2; - iowrite32(val, priv->base); + iowrite32(val, priv->base + USBHS_UGCTRL2_REG); } /* Shutdown USB channels */ diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 9ced8937a8f3..fb0d537435eb 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -2123,6 +2123,20 @@ static void ftdi_set_termios(struct tty_struct *tty, termios->c_cflag |= CRTSCTS; } + /* + * All FTDI UART chips are limited to CS7/8. We won't pretend to + * support CS5/6 and revert the CSIZE setting instead. + */ + if ((C_CSIZE(tty) != CS8) && (C_CSIZE(tty) != CS7)) { + dev_warn(ddev, "requested CSIZE setting not supported\n"); + + termios->c_cflag &= ~CSIZE; + if (old_termios) + termios->c_cflag |= old_termios->c_cflag & CSIZE; + else + termios->c_cflag |= CS8; + } + cflag = termios->c_cflag; if (!old_termios) @@ -2159,19 +2173,16 @@ no_skip: } else { urb_value |= FTDI_SIO_SET_DATA_PARITY_NONE; } - if (cflag & CSIZE) { - switch (cflag & CSIZE) { - case CS7: - urb_value |= 7; - dev_dbg(ddev, "Setting CS7\n"); - break; - case CS8: - urb_value |= 8; - dev_dbg(ddev, "Setting CS8\n"); - break; - default: - dev_err(ddev, "CSIZE was set but not CS7-CS8\n"); - } + switch (cflag & CSIZE) { + case CS7: + urb_value |= 7; + dev_dbg(ddev, "Setting CS7\n"); + break; + default: + case CS8: + urb_value |= 8; + dev_dbg(ddev, "Setting CS8\n"); + break; } /* This is needed by the break command since it uses the same command diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c index 2b01ec8651c2..b63ce023f96f 100644 --- a/drivers/usb/serial/generic.c +++ b/drivers/usb/serial/generic.c @@ -173,16 +173,8 @@ retry: clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags); return result; } - /* - * Try sending off another urb, unless called from completion handler - * (in which case there will be no free urb or no data). - */ - if (mem_flags != GFP_ATOMIC) - goto retry; - clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags); - - return 0; + goto retry; /* try sending off another urb */ } EXPORT_SYMBOL_GPL(usb_serial_generic_write_start); @@ -208,7 +200,7 @@ int usb_serial_generic_write(struct tty_struct *tty, return 0; count = kfifo_in_locked(&port->write_fifo, buf, count, &port->lock); - result = usb_serial_generic_write_start(port, GFP_KERNEL); + result = usb_serial_generic_write_start(port, GFP_ATOMIC); if (result) return result; diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index e5bdd987b9e8..a69da83604c0 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -1813,25 +1813,25 @@ static void mos7840_change_port_settings(struct tty_struct *tty, iflag = tty->termios.c_iflag; /* Change the number of bits */ - if (cflag & CSIZE) { - switch (cflag & CSIZE) { - case CS5: - lData = LCR_BITS_5; - break; + switch (cflag & CSIZE) { + case CS5: + lData = LCR_BITS_5; + break; - case CS6: - lData = LCR_BITS_6; - break; + case CS6: + lData = LCR_BITS_6; + break; - case CS7: - lData = LCR_BITS_7; - break; - default: - case CS8: - lData = LCR_BITS_8; - break; - } + case CS7: + lData = LCR_BITS_7; + break; + + default: + case CS8: + lData = LCR_BITS_8; + break; } + /* Change the Parity bit */ if (cflag & PARENB) { if (cflag & PARODD) { diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index c3d94853b4ab..496b7e39d5be 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -85,6 +85,7 @@ static void option_instat_callback(struct urb *urb); #define HUAWEI_PRODUCT_K4505 0x1464 #define HUAWEI_PRODUCT_K3765 0x1465 #define HUAWEI_PRODUCT_K4605 0x14C6 +#define HUAWEI_PRODUCT_E173S6 0x1C07 #define QUANTA_VENDOR_ID 0x0408 #define QUANTA_PRODUCT_Q101 0xEA02 @@ -572,6 +573,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t) &net_intf1_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S6, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t) &net_intf1_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t) &net_intf2_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) }, @@ -634,6 +637,10 @@ static const struct usb_device_id option_ids[] = { { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6D) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6E) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6F) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x72) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x73) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x74) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x75) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x78) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x79) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7A) }, @@ -688,6 +695,10 @@ static const struct usb_device_id option_ids[] = { { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6D) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6E) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6F) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x72) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x73) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x74) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x75) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x78) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x79) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) }, @@ -742,6 +753,10 @@ static const struct usb_device_id option_ids[] = { { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6D) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6E) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6F) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x72) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x73) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x74) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x75) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x78) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x79) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7A) }, @@ -796,6 +811,10 @@ static const struct usb_device_id option_ids[] = { { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6D) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6E) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6F) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x72) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x73) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x74) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x75) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x78) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x79) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7A) }, @@ -850,6 +869,10 @@ static const struct usb_device_id option_ids[] = { { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6D) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6E) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6F) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x72) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x73) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x74) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x75) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x78) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x79) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7A) }, @@ -904,6 +927,10 @@ static const struct usb_device_id option_ids[] = { { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6D) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6E) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6F) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x72) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x73) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x74) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x75) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x78) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x79) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7A) }, diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 1e6de4cd079d..1e3318dfa1cb 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -361,23 +361,21 @@ static void pl2303_set_termios(struct tty_struct *tty, 0, 0, buf, 7, 100); dev_dbg(&port->dev, "0xa1:0x21:0:0 %d - %7ph\n", i, buf); - if (C_CSIZE(tty)) { - switch (C_CSIZE(tty)) { - case CS5: - buf[6] = 5; - break; - case CS6: - buf[6] = 6; - break; - case CS7: - buf[6] = 7; - break; - default: - case CS8: - buf[6] = 8; - } - dev_dbg(&port->dev, "data bits = %d\n", buf[6]); + switch (C_CSIZE(tty)) { + case CS5: + buf[6] = 5; + break; + case CS6: + buf[6] = 6; + break; + case CS7: + buf[6] = 7; + break; + default: + case CS8: + buf[6] = 8; } + dev_dbg(&port->dev, "data bits = %d\n", buf[6]); /* For reference buf[0]:buf[3] baud rate value */ pl2303_encode_baudrate(tty, port, &buf[0]); diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c index 4abac28b5992..5b793c352267 100644 --- a/drivers/usb/serial/spcp8x5.c +++ b/drivers/usb/serial/spcp8x5.c @@ -348,22 +348,20 @@ static void spcp8x5_set_termios(struct tty_struct *tty, } /* Set Data Length : 00:5bit, 01:6bit, 10:7bit, 11:8bit */ - if (cflag & CSIZE) { - switch (cflag & CSIZE) { - case CS5: - buf[1] |= SET_UART_FORMAT_SIZE_5; - break; - case CS6: - buf[1] |= SET_UART_FORMAT_SIZE_6; - break; - case CS7: - buf[1] |= SET_UART_FORMAT_SIZE_7; - break; - default: - case CS8: - buf[1] |= SET_UART_FORMAT_SIZE_8; - break; - } + switch (cflag & CSIZE) { + case CS5: + buf[1] |= SET_UART_FORMAT_SIZE_5; + break; + case CS6: + buf[1] |= SET_UART_FORMAT_SIZE_6; + break; + case CS7: + buf[1] |= SET_UART_FORMAT_SIZE_7; + break; + default: + case CS8: + buf[1] |= SET_UART_FORMAT_SIZE_8; + break; } /* Set Stop bit2 : 0:1bit 1:2bit */ diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c index e538b72c4e3a..f14e7929ba22 100644 --- a/drivers/usb/wusbcore/devconnect.c +++ b/drivers/usb/wusbcore/devconnect.c @@ -97,18 +97,12 @@ static void wusbhc_devconnect_acked_work(struct work_struct *work); static void wusb_dev_free(struct wusb_dev *wusb_dev) { - if (wusb_dev) { - kfree(wusb_dev->set_gtk_req); - usb_free_urb(wusb_dev->set_gtk_urb); - kfree(wusb_dev); - } + kfree(wusb_dev); } static struct wusb_dev *wusb_dev_alloc(struct wusbhc *wusbhc) { struct wusb_dev *wusb_dev; - struct urb *urb; - struct usb_ctrlrequest *req; wusb_dev = kzalloc(sizeof(*wusb_dev), GFP_KERNEL); if (wusb_dev == NULL) @@ -118,22 +112,6 @@ static struct wusb_dev *wusb_dev_alloc(struct wusbhc *wusbhc) INIT_WORK(&wusb_dev->devconnect_acked_work, wusbhc_devconnect_acked_work); - urb = usb_alloc_urb(0, GFP_KERNEL); - if (urb == NULL) - goto err; - wusb_dev->set_gtk_urb = urb; - - req = kmalloc(sizeof(*req), GFP_KERNEL); - if (req == NULL) - goto err; - wusb_dev->set_gtk_req = req; - - req->bRequestType = USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE; - req->bRequest = USB_REQ_SET_DESCRIPTOR; - req->wValue = cpu_to_le16(USB_DT_KEY << 8 | wusbhc->gtk_index); - req->wIndex = 0; - req->wLength = cpu_to_le16(wusbhc->gtk.descr.bLength); - return wusb_dev; err: wusb_dev_free(wusb_dev); @@ -411,9 +389,6 @@ static void __wusbhc_dev_disconnect(struct wusbhc *wusbhc, /* * Refresh the list of keep alives to emit in the MMC * - * Some devices don't respond to keep alives unless they've been - * authenticated, so skip unauthenticated devices. - * * We only publish the first four devices that have a coming timeout * condition. Then when we are done processing those, we go for the * next ones. We ignore the ones that have timed out already (they'll @@ -448,7 +423,7 @@ static void __wusbhc_keep_alive(struct wusbhc *wusbhc) if (wusb_dev == NULL) continue; - if (wusb_dev->usb_dev == NULL || !wusb_dev->usb_dev->authenticated) + if (wusb_dev->usb_dev == NULL) continue; if (time_after(jiffies, wusb_dev->entry_ts + tt)) { @@ -524,11 +499,19 @@ static struct wusb_dev *wusbhc_find_dev_by_addr(struct wusbhc *wusbhc, u8 addr) * * @wusbhc shall be referenced and unlocked */ -static void wusbhc_handle_dn_alive(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) +static void wusbhc_handle_dn_alive(struct wusbhc *wusbhc, u8 srcaddr) { + struct wusb_dev *wusb_dev; + mutex_lock(&wusbhc->mutex); - wusb_dev->entry_ts = jiffies; - __wusbhc_keep_alive(wusbhc); + wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr); + if (wusb_dev == NULL) { + dev_dbg(wusbhc->dev, "ignoring DN_Alive from unconnected device %02x\n", + srcaddr); + } else { + wusb_dev->entry_ts = jiffies; + __wusbhc_keep_alive(wusbhc); + } mutex_unlock(&wusbhc->mutex); } @@ -582,14 +565,22 @@ static void wusbhc_handle_dn_connect(struct wusbhc *wusbhc, * * @wusbhc shall be referenced and unlocked */ -static void wusbhc_handle_dn_disconnect(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) +static void wusbhc_handle_dn_disconnect(struct wusbhc *wusbhc, u8 srcaddr) { struct device *dev = wusbhc->dev; - - dev_info(dev, "DN DISCONNECT: device 0x%02x going down\n", wusb_dev->addr); + struct wusb_dev *wusb_dev; mutex_lock(&wusbhc->mutex); - __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, wusb_dev->port_idx)); + wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr); + if (wusb_dev == NULL) { + dev_dbg(dev, "ignoring DN DISCONNECT from unconnected device %02x\n", + srcaddr); + } else { + dev_info(dev, "DN DISCONNECT: device 0x%02x going down\n", + wusb_dev->addr); + __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, + wusb_dev->port_idx)); + } mutex_unlock(&wusbhc->mutex); } @@ -611,30 +602,21 @@ void wusbhc_handle_dn(struct wusbhc *wusbhc, u8 srcaddr, struct wusb_dn_hdr *dn_hdr, size_t size) { struct device *dev = wusbhc->dev; - struct wusb_dev *wusb_dev; if (size < sizeof(struct wusb_dn_hdr)) { dev_err(dev, "DN data shorter than DN header (%d < %d)\n", (int)size, (int)sizeof(struct wusb_dn_hdr)); return; } - - wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr); - if (wusb_dev == NULL && dn_hdr->bType != WUSB_DN_CONNECT) { - dev_dbg(dev, "ignoring DN %d from unconnected device %02x\n", - dn_hdr->bType, srcaddr); - return; - } - switch (dn_hdr->bType) { case WUSB_DN_CONNECT: wusbhc_handle_dn_connect(wusbhc, dn_hdr, size); break; case WUSB_DN_ALIVE: - wusbhc_handle_dn_alive(wusbhc, wusb_dev); + wusbhc_handle_dn_alive(wusbhc, srcaddr); break; case WUSB_DN_DISCONNECT: - wusbhc_handle_dn_disconnect(wusbhc, wusb_dev); + wusbhc_handle_dn_disconnect(wusbhc, srcaddr); break; case WUSB_DN_MASAVAILCHANGED: case WUSB_DN_RWAKE: diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c index dd88441c8f78..4c40d0dbf53d 100644 --- a/drivers/usb/wusbcore/security.c +++ b/drivers/usb/wusbcore/security.c @@ -29,19 +29,16 @@ #include <linux/export.h> #include "wusbhc.h" -static void wusbhc_set_gtk_callback(struct urb *urb); -static void wusbhc_gtk_rekey_done_work(struct work_struct *work); +static void wusbhc_gtk_rekey_work(struct work_struct *work); int wusbhc_sec_create(struct wusbhc *wusbhc) { wusbhc->gtk.descr.bLength = sizeof(wusbhc->gtk.descr) + sizeof(wusbhc->gtk.data); wusbhc->gtk.descr.bDescriptorType = USB_DT_KEY; wusbhc->gtk.descr.bReserved = 0; + wusbhc->gtk_index = 0; - wusbhc->gtk_index = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_GTK, - WUSB_KEY_INDEX_ORIGINATOR_HOST); - - INIT_WORK(&wusbhc->gtk_rekey_done_work, wusbhc_gtk_rekey_done_work); + INIT_WORK(&wusbhc->gtk_rekey_work, wusbhc_gtk_rekey_work); return 0; } @@ -113,7 +110,7 @@ int wusbhc_sec_start(struct wusbhc *wusbhc) wusbhc_generate_gtk(wusbhc); result = wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, - &wusbhc->gtk.descr.bKeyData, key_size); + &wusbhc->gtk.descr.bKeyData, key_size); if (result < 0) dev_err(wusbhc->dev, "cannot set GTK for the host: %d\n", result); @@ -129,7 +126,7 @@ int wusbhc_sec_start(struct wusbhc *wusbhc) */ void wusbhc_sec_stop(struct wusbhc *wusbhc) { - cancel_work_sync(&wusbhc->gtk_rekey_done_work); + cancel_work_sync(&wusbhc->gtk_rekey_work); } @@ -185,12 +182,14 @@ static int wusb_dev_set_encryption(struct usb_device *usb_dev, int value) static int wusb_dev_set_gtk(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) { struct usb_device *usb_dev = wusb_dev->usb_dev; + u8 key_index = wusb_key_index(wusbhc->gtk_index, + WUSB_KEY_INDEX_TYPE_GTK, WUSB_KEY_INDEX_ORIGINATOR_HOST); return usb_control_msg( usb_dev, usb_sndctrlpipe(usb_dev, 0), USB_REQ_SET_DESCRIPTOR, USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, - USB_DT_KEY << 8 | wusbhc->gtk_index, 0, + USB_DT_KEY << 8 | key_index, 0, &wusbhc->gtk.descr, wusbhc->gtk.descr.bLength, 1000); } @@ -520,24 +519,55 @@ error_kzalloc: * Once all connected and authenticated devices have received the new * GTK, switch the host to using it. */ -static void wusbhc_gtk_rekey_done_work(struct work_struct *work) +static void wusbhc_gtk_rekey_work(struct work_struct *work) { - struct wusbhc *wusbhc = container_of(work, struct wusbhc, gtk_rekey_done_work); + struct wusbhc *wusbhc = container_of(work, + struct wusbhc, gtk_rekey_work); size_t key_size = sizeof(wusbhc->gtk.data); + int port_idx; + struct wusb_dev *wusb_dev, *wusb_dev_next; + LIST_HEAD(rekey_list); mutex_lock(&wusbhc->mutex); + /* generate the new key */ + wusbhc_generate_gtk(wusbhc); + /* roll the gtk index. */ + wusbhc->gtk_index = (wusbhc->gtk_index + 1) % (WUSB_KEY_INDEX_MAX + 1); + /* + * Save all connected devices on a list while holding wusbhc->mutex and + * take a reference to each one. Then submit the set key request to + * them after releasing the lock in order to avoid a deadlock. + */ + for (port_idx = 0; port_idx < wusbhc->ports_max; port_idx++) { + wusb_dev = wusbhc->port[port_idx].wusb_dev; + if (!wusb_dev || !wusb_dev->usb_dev + || !wusb_dev->usb_dev->authenticated) + continue; - if (--wusbhc->pending_set_gtks == 0) - wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, &wusbhc->gtk.descr.bKeyData, key_size); - + wusb_dev_get(wusb_dev); + list_add_tail(&wusb_dev->rekey_node, &rekey_list); + } mutex_unlock(&wusbhc->mutex); -} -static void wusbhc_set_gtk_callback(struct urb *urb) -{ - struct wusbhc *wusbhc = urb->context; + /* Submit the rekey requests without holding wusbhc->mutex. */ + list_for_each_entry_safe(wusb_dev, wusb_dev_next, &rekey_list, + rekey_node) { + list_del_init(&wusb_dev->rekey_node); + dev_dbg(&wusb_dev->usb_dev->dev, "%s: rekey device at port %d\n", + __func__, wusb_dev->port_idx); + + if (wusb_dev_set_gtk(wusbhc, wusb_dev) < 0) { + dev_err(&wusb_dev->usb_dev->dev, "%s: rekey device at port %d failed\n", + __func__, wusb_dev->port_idx); + } + wusb_dev_put(wusb_dev); + } - queue_work(wusbd, &wusbhc->gtk_rekey_done_work); + /* Switch the host controller to use the new GTK. */ + mutex_lock(&wusbhc->mutex); + wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, + &wusbhc->gtk.descr.bKeyData, key_size); + mutex_unlock(&wusbhc->mutex); } /** @@ -553,26 +583,12 @@ static void wusbhc_set_gtk_callback(struct urb *urb) */ void wusbhc_gtk_rekey(struct wusbhc *wusbhc) { - static const size_t key_size = sizeof(wusbhc->gtk.data); - int p; - - wusbhc_generate_gtk(wusbhc); - - for (p = 0; p < wusbhc->ports_max; p++) { - struct wusb_dev *wusb_dev; - - wusb_dev = wusbhc->port[p].wusb_dev; - if (!wusb_dev || !wusb_dev->usb_dev || !wusb_dev->usb_dev->authenticated) - continue; - - usb_fill_control_urb(wusb_dev->set_gtk_urb, wusb_dev->usb_dev, - usb_sndctrlpipe(wusb_dev->usb_dev, 0), - (void *)wusb_dev->set_gtk_req, - &wusbhc->gtk.descr, wusbhc->gtk.descr.bLength, - wusbhc_set_gtk_callback, wusbhc); - if (usb_submit_urb(wusb_dev->set_gtk_urb, GFP_KERNEL) == 0) - wusbhc->pending_set_gtks++; - } - if (wusbhc->pending_set_gtks == 0) - wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, &wusbhc->gtk.descr.bKeyData, key_size); + /* + * We need to submit a URB to the downstream WUSB devices in order to + * change the group key. This can't be done while holding the + * wusbhc->mutex since that is also taken in the urb_enqueue routine + * and will cause a deadlock. Instead, queue a work item to do + * it when the lock is not held + */ + queue_work(wusbd, &wusbhc->gtk_rekey_work); } diff --git a/drivers/usb/wusbcore/wusbhc.h b/drivers/usb/wusbcore/wusbhc.h index 711b1952b114..6bd3b819a6b5 100644 --- a/drivers/usb/wusbcore/wusbhc.h +++ b/drivers/usb/wusbcore/wusbhc.h @@ -97,6 +97,7 @@ struct wusb_dev { struct kref refcnt; struct wusbhc *wusbhc; struct list_head cack_node; /* Connect-Ack list */ + struct list_head rekey_node; /* GTK rekey list */ u8 port_idx; u8 addr; u8 beacon_type:4; @@ -107,8 +108,6 @@ struct wusb_dev { struct usb_wireless_cap_descriptor *wusb_cap_descr; struct uwb_mas_bm availability; struct work_struct devconnect_acked_work; - struct urb *set_gtk_urb; - struct usb_ctrlrequest *set_gtk_req; struct usb_device *usb_dev; }; @@ -296,8 +295,7 @@ struct wusbhc { } __attribute__((packed)) gtk; u8 gtk_index; u32 gtk_tkid; - struct work_struct gtk_rekey_done_work; - int pending_set_gtks; + struct work_struct gtk_rekey_work; struct usb_encryption_descriptor *ccm1_etd; }; diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index e663921eebb6..f175629513ed 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -2168,15 +2168,15 @@ static int tcm_vhost_register_configfs(void) /* * Setup default attribute lists for various fabric->tf_cit_tmpl */ - TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs; + fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs; + fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; /* * Register the fabric for use within TCM */ diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c index 8521051cf946..cd961622f9c1 100644 --- a/drivers/video/atmel_lcdfb.c +++ b/drivers/video/atmel_lcdfb.c @@ -131,6 +131,7 @@ static const struct platform_device_id atmel_lcdfb_devtypes[] = { /* terminator */ } }; +MODULE_DEVICE_TABLE(platform, atmel_lcdfb_devtypes); static struct atmel_lcdfb_config * atmel_lcdfb_get_config(struct platform_device *pdev) diff --git a/drivers/video/kyro/fbdev.c b/drivers/video/kyro/fbdev.c index 50c857477e4f..65041e15fd59 100644 --- a/drivers/video/kyro/fbdev.c +++ b/drivers/video/kyro/fbdev.c @@ -624,15 +624,15 @@ static int kyrofb_ioctl(struct fb_info *info, return -EINVAL; } case KYRO_IOCTL_UVSTRIDE: - if (copy_to_user(argp, &deviceInfo.ulOverlayUVStride, sizeof(unsigned long))) + if (copy_to_user(argp, &deviceInfo.ulOverlayUVStride, sizeof(deviceInfo.ulOverlayUVStride))) return -EFAULT; break; case KYRO_IOCTL_STRIDE: - if (copy_to_user(argp, &deviceInfo.ulOverlayStride, sizeof(unsigned long))) + if (copy_to_user(argp, &deviceInfo.ulOverlayStride, sizeof(deviceInfo.ulOverlayStride))) return -EFAULT; break; case KYRO_IOCTL_OVERLAY_OFFSET: - if (copy_to_user(argp, &deviceInfo.ulOverlayOffset, sizeof(unsigned long))) + if (copy_to_user(argp, &deviceInfo.ulOverlayOffset, sizeof(deviceInfo.ulOverlayOffset))) return -EFAULT; break; } diff --git a/drivers/video/offb.c b/drivers/video/offb.c index 9dbea2223401..7d44d669d5b6 100644 --- a/drivers/video/offb.c +++ b/drivers/video/offb.c @@ -91,6 +91,15 @@ extern boot_infos_t *boot_infos; #define AVIVO_DC_LUTB_WHITE_OFFSET_GREEN 0x6cd4 #define AVIVO_DC_LUTB_WHITE_OFFSET_RED 0x6cd8 +#define FB_RIGHT_POS(p, bpp) (fb_be_math(p) ? 0 : (32 - (bpp))) + +static inline u32 offb_cmap_byteswap(struct fb_info *info, u32 value) +{ + u32 bpp = info->var.bits_per_pixel; + + return cpu_to_be32(value) >> FB_RIGHT_POS(info, bpp); +} + /* * Set a single color register. The values supplied are already * rounded down to the hardware's capabilities (according to the @@ -120,7 +129,7 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, mask <<= info->var.transp.offset; value |= mask; } - pal[regno] = value; + pal[regno] = offb_cmap_byteswap(info, value); return 0; } @@ -301,7 +310,7 @@ static struct fb_ops offb_ops = { static void __iomem *offb_map_reg(struct device_node *np, int index, unsigned long offset, unsigned long size) { - const u32 *addrp; + const __be32 *addrp; u64 asize, taddr; unsigned int flags; @@ -369,7 +378,11 @@ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp } of_node_put(pciparent); } else if (dp && of_device_is_compatible(dp, "qemu,std-vga")) { - const u32 io_of_addr[3] = { 0x01000000, 0x0, 0x0 }; +#ifdef __BIG_ENDIAN + const __be32 io_of_addr[3] = { 0x01000000, 0x0, 0x0 }; +#else + const __be32 io_of_addr[3] = { 0x00000001, 0x0, 0x0 }; +#endif u64 io_addr = of_translate_address(dp, io_of_addr); if (io_addr != OF_BAD_ADDR) { par->cmap_adr = ioremap(io_addr + 0x3c8, 2); @@ -535,7 +548,7 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node) unsigned int flags, rsize, addr_prop = 0; unsigned long max_size = 0; u64 rstart, address = OF_BAD_ADDR; - const u32 *pp, *addrp, *up; + const __be32 *pp, *addrp, *up; u64 asize; int foreign_endian = 0; @@ -551,25 +564,25 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node) if (pp == NULL) pp = of_get_property(dp, "depth", &len); if (pp && len == sizeof(u32)) - depth = *pp; + depth = be32_to_cpup(pp); pp = of_get_property(dp, "linux,bootx-width", &len); if (pp == NULL) pp = of_get_property(dp, "width", &len); if (pp && len == sizeof(u32)) - width = *pp; + width = be32_to_cpup(pp); pp = of_get_property(dp, "linux,bootx-height", &len); if (pp == NULL) pp = of_get_property(dp, "height", &len); if (pp && len == sizeof(u32)) - height = *pp; + height = be32_to_cpup(pp); pp = of_get_property(dp, "linux,bootx-linebytes", &len); if (pp == NULL) pp = of_get_property(dp, "linebytes", &len); if (pp && len == sizeof(u32) && (*pp != 0xffffffffu)) - pitch = *pp; + pitch = be32_to_cpup(pp); else pitch = width * ((depth + 7) / 8); diff --git a/drivers/video/omap2/displays-new/panel-sony-acx565akm.c b/drivers/video/omap2/displays-new/panel-sony-acx565akm.c index e6d56f714ae4..d94f35dbd536 100644 --- a/drivers/video/omap2/displays-new/panel-sony-acx565akm.c +++ b/drivers/video/omap2/displays-new/panel-sony-acx565akm.c @@ -526,6 +526,8 @@ static int acx565akm_panel_power_on(struct omap_dss_device *dssdev) struct omap_dss_device *in = ddata->in; int r; + mutex_lock(&ddata->mutex); + dev_dbg(&ddata->spi->dev, "%s\n", __func__); in->ops.sdi->set_timings(in, &ddata->videomode); @@ -614,10 +616,7 @@ static int acx565akm_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - mutex_lock(&ddata->mutex); r = acx565akm_panel_power_on(dssdev); - mutex_unlock(&ddata->mutex); - if (r) return r; diff --git a/drivers/video/sh_mobile_meram.c b/drivers/video/sh_mobile_meram.c index e0f098562a74..a297de5cc859 100644 --- a/drivers/video/sh_mobile_meram.c +++ b/drivers/video/sh_mobile_meram.c @@ -569,6 +569,7 @@ EXPORT_SYMBOL_GPL(sh_mobile_meram_cache_update); * Power management */ +#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_RUNTIME) static int sh_mobile_meram_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); @@ -611,6 +612,7 @@ static int sh_mobile_meram_resume(struct device *dev) meram_write_reg(priv->base, common_regs[i], priv->regs[i]); return 0; } +#endif /* CONFIG_PM_SLEEP || CONFIG_PM_RUNTIME */ static UNIVERSAL_DEV_PM_OPS(sh_mobile_meram_dev_pm_ops, sh_mobile_meram_suspend, diff --git a/drivers/video/vt8500lcdfb.c b/drivers/video/vt8500lcdfb.c index b30e5a439d1f..a8f2b280f796 100644 --- a/drivers/video/vt8500lcdfb.c +++ b/drivers/video/vt8500lcdfb.c @@ -293,8 +293,7 @@ static int vt8500lcd_probe(struct platform_device *pdev) + sizeof(u32) * 16, GFP_KERNEL); if (!fbi) { dev_err(&pdev->dev, "Failed to initialize framebuffer device\n"); - ret = -ENOMEM; - goto failed; + return -ENOMEM; } strcpy(fbi->fb.fix.id, "VT8500 LCD"); @@ -327,15 +326,13 @@ static int vt8500lcd_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "no I/O memory resource defined\n"); - ret = -ENODEV; - goto failed_fbi; + return -ENODEV; } res = request_mem_region(res->start, resource_size(res), "vt8500lcd"); if (res == NULL) { dev_err(&pdev->dev, "failed to request I/O memory\n"); - ret = -EBUSY; - goto failed_fbi; + return -EBUSY; } fbi->regbase = ioremap(res->start, resource_size(res)); @@ -346,17 +343,19 @@ static int vt8500lcd_probe(struct platform_device *pdev) } disp_timing = of_get_display_timings(pdev->dev.of_node); - if (!disp_timing) - return -EINVAL; + if (!disp_timing) { + ret = -EINVAL; + goto failed_free_io; + } ret = of_get_fb_videomode(pdev->dev.of_node, &of_mode, OF_USE_NATIVE_MODE); if (ret) - return ret; + goto failed_free_io; ret = of_property_read_u32(pdev->dev.of_node, "bits-per-pixel", &bpp); if (ret) - return ret; + goto failed_free_io; /* try allocating the framebuffer */ fb_mem_len = of_mode.xres * of_mode.yres * 2 * (bpp / 8); @@ -364,7 +363,8 @@ static int vt8500lcd_probe(struct platform_device *pdev) GFP_KERNEL); if (!fb_mem_virt) { pr_err("%s: Failed to allocate framebuffer\n", __func__); - return -ENOMEM; + ret = -ENOMEM; + goto failed_free_io; } fbi->fb.fix.smem_start = fb_mem_phys; @@ -447,9 +447,6 @@ failed_free_io: iounmap(fbi->regbase); failed_free_res: release_mem_region(res->start, resource_size(res)); -failed_fbi: - kfree(fbi); -failed: return ret; } diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c index a6a2cebb2587..cafa973c43be 100644 --- a/drivers/watchdog/bcm2835_wdt.c +++ b/drivers/watchdog/bcm2835_wdt.c @@ -19,7 +19,6 @@ #include <linux/watchdog.h> #include <linux/platform_device.h> #include <linux/of_address.h> -#include <linux/miscdevice.h> #define PM_RSTC 0x1c #define PM_WDOG 0x24 diff --git a/drivers/watchdog/ep93xx_wdt.c b/drivers/watchdog/ep93xx_wdt.c index 833e81311848..d1d07f2f69df 100644 --- a/drivers/watchdog/ep93xx_wdt.c +++ b/drivers/watchdog/ep93xx_wdt.c @@ -28,7 +28,6 @@ #include <linux/platform_device.h> #include <linux/module.h> -#include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/timer.h> #include <linux/io.h> diff --git a/drivers/watchdog/ie6xx_wdt.c b/drivers/watchdog/ie6xx_wdt.c index 70a240297c6d..07f88f54e5c0 100644 --- a/drivers/watchdog/ie6xx_wdt.c +++ b/drivers/watchdog/ie6xx_wdt.c @@ -28,7 +28,6 @@ #include <linux/kernel.h> #include <linux/types.h> #include <linux/watchdog.h> -#include <linux/miscdevice.h> #include <linux/seq_file.h> #include <linux/debugfs.h> #include <linux/uaccess.h> diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c index 2de486a7eea1..3aa50cfa335f 100644 --- a/drivers/watchdog/jz4740_wdt.c +++ b/drivers/watchdog/jz4740_wdt.c @@ -17,7 +17,6 @@ #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/kernel.h> -#include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/init.h> #include <linux/platform_device.h> diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c index a1a3638c579c..20dc73844737 100644 --- a/drivers/watchdog/kempld_wdt.c +++ b/drivers/watchdog/kempld_wdt.c @@ -26,7 +26,6 @@ #include <linux/module.h> #include <linux/moduleparam.h> -#include <linux/miscdevice.h> #include <linux/uaccess.h> #include <linux/watchdog.h> #include <linux/platform_device.h> diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c index 6d4f3998e1f6..bdb3f4a5b27c 100644 --- a/drivers/watchdog/max63xx_wdt.c +++ b/drivers/watchdog/max63xx_wdt.c @@ -19,7 +19,6 @@ #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/kernel.h> -#include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/init.h> #include <linux/bitops.h> diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c index 44edca66d564..f7722a424676 100644 --- a/drivers/watchdog/orion_wdt.c +++ b/drivers/watchdog/orion_wdt.c @@ -16,7 +16,6 @@ #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/kernel.h> -#include <linux/miscdevice.h> #include <linux/platform_device.h> #include <linux/watchdog.h> #include <linux/init.h> diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c index 1bdcc313e1d9..5bec20f5dc2d 100644 --- a/drivers/watchdog/pnx4008_wdt.c +++ b/drivers/watchdog/pnx4008_wdt.c @@ -23,7 +23,6 @@ #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/kernel.h> -#include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/init.h> #include <linux/platform_device.h> diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c index 53d37fea183e..d92c2d5859ce 100644 --- a/drivers/watchdog/rt2880_wdt.c +++ b/drivers/watchdog/rt2880_wdt.c @@ -16,7 +16,6 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/watchdog.h> -#include <linux/miscdevice.h> #include <linux/moduleparam.h> #include <linux/platform_device.h> diff --git a/drivers/watchdog/sc1200wdt.c b/drivers/watchdog/sc1200wdt.c index 3b9fff9dcf65..131193a7acdf 100644 --- a/drivers/watchdog/sc1200wdt.c +++ b/drivers/watchdog/sc1200wdt.c @@ -409,8 +409,9 @@ static int __init sc1200wdt_init(void) #if defined CONFIG_PNP /* now that the user has specified an IO port and we haven't detected * any devices, disable pnp support */ + if (isapnp) + pnp_unregister_driver(&scl200wdt_pnp_driver); isapnp = 0; - pnp_unregister_driver(&scl200wdt_pnp_driver); #endif if (!request_region(io, io_len, SC1200_MODULE_NAME)) { diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c index f9b8e06f3558..af3528f84d65 100644 --- a/drivers/watchdog/shwdt.c +++ b/drivers/watchdog/shwdt.c @@ -26,7 +26,6 @@ #include <linux/init.h> #include <linux/types.h> #include <linux/spinlock.h> -#include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/pm_runtime.h> #include <linux/fs.h> diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c index ef2638fee4a8..c04a1aa158e2 100644 --- a/drivers/watchdog/softdog.c +++ b/drivers/watchdog/softdog.c @@ -42,7 +42,6 @@ #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/timer.h> -#include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/notifier.h> #include <linux/reboot.h> diff --git a/drivers/watchdog/stmp3xxx_rtc_wdt.c b/drivers/watchdog/stmp3xxx_rtc_wdt.c index d667f6b51d35..bb64ae3f47da 100644 --- a/drivers/watchdog/stmp3xxx_rtc_wdt.c +++ b/drivers/watchdog/stmp3xxx_rtc_wdt.c @@ -12,7 +12,6 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/platform_device.h> #include <linux/stmp3xxx_rtc_wdt.h> diff --git a/drivers/watchdog/txx9wdt.c b/drivers/watchdog/txx9wdt.c index 0fd0e8ae62a8..6a447e321dd0 100644 --- a/drivers/watchdog/txx9wdt.c +++ b/drivers/watchdog/txx9wdt.c @@ -13,7 +13,6 @@ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> -#include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/init.h> #include <linux/platform_device.h> diff --git a/drivers/watchdog/ux500_wdt.c b/drivers/watchdog/ux500_wdt.c index e029b5768f2c..5aed9d7ad47e 100644 --- a/drivers/watchdog/ux500_wdt.c +++ b/drivers/watchdog/ux500_wdt.c @@ -12,7 +12,6 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/moduleparam.h> -#include <linux/miscdevice.h> #include <linux/err.h> #include <linux/uaccess.h> #include <linux/watchdog.h> diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 62ccf5424ba8..028387192b60 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -930,9 +930,10 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, ret = m2p_add_override(mfn, pages[i], kmap_ops ? &kmap_ops[i] : NULL); if (ret) - return ret; + goto out; } + out: if (lazy) arch_leave_lazy_mmu_mode(); @@ -969,9 +970,10 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, ret = m2p_remove_override(pages[i], kmap_ops ? &kmap_ops[i] : NULL); if (ret) - return ret; + goto out; } + out: if (lazy) arch_leave_lazy_mmu_mode(); diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c index d15f6e80479f..188825122aae 100644 --- a/drivers/xen/pci.c +++ b/drivers/xen/pci.c @@ -59,12 +59,12 @@ static int xen_add_device(struct device *dev) add.flags = XEN_PCI_DEV_EXTFN; #ifdef CONFIG_ACPI - handle = DEVICE_ACPI_HANDLE(&pci_dev->dev); + handle = ACPI_HANDLE(&pci_dev->dev); if (!handle && pci_dev->bus->bridge) - handle = DEVICE_ACPI_HANDLE(pci_dev->bus->bridge); + handle = ACPI_HANDLE(pci_dev->bus->bridge); #ifdef CONFIG_PCI_IOV if (!handle && pci_dev->is_virtfn) - handle = DEVICE_ACPI_HANDLE(physfn->bus->bridge); + handle = ACPI_HANDLE(physfn->bus->bridge); #endif if (handle) { acpi_status status; diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index a224bc74b6b9..1eac0731c349 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -555,6 +555,11 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, sg_dma_len(sgl) = 0; return 0; } + xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT), + map & ~PAGE_MASK, + sg->length, + dir, + attrs); sg->dma_address = xen_phys_to_bus(map); } else { /* we are not interested in the dma_addr returned by |