diff options
34 files changed, 1104 insertions, 676 deletions
diff --git a/Documentation/devicetree/bindings/extcon/extcon-arizona.txt b/Documentation/devicetree/bindings/extcon/extcon-arizona.txt index e1705fae63a8..e27341f8a4c7 100644 --- a/Documentation/devicetree/bindings/extcon/extcon-arizona.txt +++ b/Documentation/devicetree/bindings/extcon/extcon-arizona.txt @@ -13,3 +13,63 @@ Optional properties: ARIZONA_ACCDET_MODE_HPR or 2 - Headphone detect mode is set to HPDETR If this node is not mentioned or if the value is unknown, then headphone detection mode is set to HPDETL. + + - wlf,use-jd2 : Use the additional JD input along with JD1 for dual pin jack + detection. + - wlf,use-jd2-nopull : Internal pull on JD2 is disabled when used for + jack detection. + - wlf,jd-invert : Invert the polarity of the jack detection switch + + - wlf,micd-software-compare : Use a software comparison to determine mic + presence + - wlf,micd-detect-debounce : Additional software microphone detection + debounce specified in milliseconds. + - wlf,micd-pol-gpio : GPIO specifier for the GPIO controlling the headset + polarity if one exists. + - wlf,micd-bias-start-time : Time allowed for MICBIAS to startup prior to + performing microphone detection, specified as per the ARIZONA_MICD_TIME_XXX + defines. + - wlf,micd-rate : Delay between successive microphone detection measurements, + specified as per the ARIZONA_MICD_TIME_XXX defines. + - wlf,micd-dbtime : Microphone detection hardware debounces specified as the + number of measurements to take, valid values being 2 and 4. + - wlf,micd-timeout-ms : Timeout for microphone detection, specified in + milliseconds. + - wlf,micd-force-micbias : Force MICBIAS continuously on during microphone + detection. + - wlf,micd-configs : Headset polarity configurations (generally used for + detection of CTIA / OMTP headsets), the field can be of variable length + but should always be a multiple of 3 cells long, each three cell group + represents one polarity configuration. + The first cell defines the accessory detection pin, zero will use MICDET1 + and all other values will use MICDET2. + The second cell represents the MICBIAS to be used. + The third cell represents the value of the micd-pol-gpio pin. + + - wlf,gpsw : Settings for the general purpose switch + +Example: + +codec: wm8280@0 { + compatible = "wlf,wm8280"; + reg = <0>; + ... + + wlf,use-jd2; + wlf,use-jd2-nopull; + wlf,jd-invert; + + wlf,micd-software-compare; + wlf,micd-detect-debounce = <0>; + wlf,micd-pol-gpio = <&codec 2 0>; + wlf,micd-rate = <ARIZONA_MICD_TIME_8MS>; + wlf,micd-dbtime = <4>; + wlf,micd-timeout-ms = <100>; + wlf,micd-force-micbias; + wlf,micd-configs = < + 0 1 0 /* MICDET1 MICBIAS1 GPIO=low */ + 1 2 1 /* MICDET2 MICBIAS2 GPIO=high */ + >; + + wlf,gpsw = <0>; +}; diff --git a/Documentation/devicetree/bindings/extcon/extcon-max3355.txt b/Documentation/devicetree/bindings/extcon/extcon-max3355.txt new file mode 100644 index 000000000000..f2288ea9eb82 --- /dev/null +++ b/Documentation/devicetree/bindings/extcon/extcon-max3355.txt @@ -0,0 +1,21 @@ +Maxim Integrated MAX3355 USB OTG chip +------------------------------------- + +MAX3355 integrates a charge pump and comparators to enable a system with an +integrated USB OTG dual-role transceiver to function as a USB OTG dual-role +device. + +Required properties: +- compatible: should be "maxim,max3355"; +- maxim,shdn-gpios: should contain a phandle and GPIO specifier for the GPIO pin + connected to the MAX3355's SHDN# pin; +- id-gpios: should contain a phandle and GPIO specifier for the GPIO pin + connected to the MAX3355's ID_OUT pin. + +Example: + + usb-otg { + compatible = "maxim,max3355"; + maxim,shdn-gpios = <&gpio2 4 GPIO_ACTIVE_LOW>; + id-gpios = <&gpio5 31 GPIO_ACTIVE_HIGH>; + }; diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig index 0cebbf668886..3d89e60a3e71 100644 --- a/drivers/extcon/Kconfig +++ b/drivers/extcon/Kconfig @@ -52,6 +52,15 @@ config EXTCON_MAX14577 Maxim MAX14577/77836. The MAX14577/77836 MUIC is a USB port accessory detector and switch. +config EXTCON_MAX3355 + tristate "Maxim MAX3355 USB OTG EXTCON Support" + depends on GPIOLIB || COMPILE_TEST + help + If you say yes here you get support for the USB OTG role detection by + MAX3355. The MAX3355 chip integrates a charge pump and comparators to + enable a system with an integrated USB OTG dual-role transceiver to + function as an USB OTG dual-role device. + config EXTCON_MAX77693 tristate "Maxim MAX77693 EXTCON Support" depends on MFD_MAX77693 && INPUT diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile index ba787d04295b..2a0e4f45d5b2 100644 --- a/drivers/extcon/Makefile +++ b/drivers/extcon/Makefile @@ -8,6 +8,7 @@ obj-$(CONFIG_EXTCON_ARIZONA) += extcon-arizona.o obj-$(CONFIG_EXTCON_AXP288) += extcon-axp288.o obj-$(CONFIG_EXTCON_GPIO) += extcon-gpio.o obj-$(CONFIG_EXTCON_MAX14577) += extcon-max14577.o +obj-$(CONFIG_EXTCON_MAX3355) += extcon-max3355.o obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o obj-$(CONFIG_EXTCON_MAX77843) += extcon-max77843.o obj-$(CONFIG_EXTCON_MAX8997) += extcon-max8997.o diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c index e4890dd4fefd..c121d01a5cd6 100644 --- a/drivers/extcon/extcon-arizona.c +++ b/drivers/extcon/extcon-arizona.c @@ -1201,10 +1201,58 @@ static void arizona_micd_set_level(struct arizona *arizona, int index, regmap_update_bits(arizona->regmap, reg, mask, level); } -static int arizona_extcon_device_get_pdata(struct arizona *arizona) +static int arizona_extcon_get_micd_configs(struct device *dev, + struct arizona *arizona) +{ + const char * const prop = "wlf,micd-configs"; + const int entries_per_config = 3; + struct arizona_micd_config *micd_configs; + int nconfs, ret; + int i, j; + u32 *vals; + + nconfs = device_property_read_u32_array(arizona->dev, prop, NULL, 0); + if (nconfs <= 0) + return 0; + + vals = kcalloc(nconfs, sizeof(u32), GFP_KERNEL); + if (!vals) + return -ENOMEM; + + ret = device_property_read_u32_array(arizona->dev, prop, vals, nconfs); + if (ret < 0) + goto out; + + nconfs /= entries_per_config; + + micd_configs = devm_kzalloc(dev, + nconfs * sizeof(struct arizona_micd_range), + GFP_KERNEL); + if (!micd_configs) { + ret = -ENOMEM; + goto out; + } + + for (i = 0, j = 0; i < nconfs; ++i) { + micd_configs[i].src = vals[j++] ? ARIZONA_ACCDET_SRC : 0; + micd_configs[i].bias = vals[j++]; + micd_configs[i].gpio = vals[j++]; + } + + arizona->pdata.micd_configs = micd_configs; + arizona->pdata.num_micd_configs = nconfs; + +out: + kfree(vals); + return ret; +} + +static int arizona_extcon_device_get_pdata(struct device *dev, + struct arizona *arizona) { struct arizona_pdata *pdata = &arizona->pdata; unsigned int val = ARIZONA_ACCDET_MODE_HPL; + int ret; device_property_read_u32(arizona->dev, "wlf,hpdet-channel", &val); switch (val) { @@ -1230,12 +1278,29 @@ static int arizona_extcon_device_get_pdata(struct arizona *arizona) device_property_read_u32(arizona->dev, "wlf,micd-dbtime", &pdata->micd_dbtime); - device_property_read_u32(arizona->dev, "wlf,micd-timeout", + device_property_read_u32(arizona->dev, "wlf,micd-timeout-ms", &pdata->micd_timeout); pdata->micd_force_micbias = device_property_read_bool(arizona->dev, "wlf,micd-force-micbias"); + pdata->micd_software_compare = device_property_read_bool(arizona->dev, + "wlf,micd-software-compare"); + + pdata->jd_invert = device_property_read_bool(arizona->dev, + "wlf,jd-invert"); + + device_property_read_u32(arizona->dev, "wlf,gpsw", &pdata->gpsw); + + pdata->jd_gpio5 = device_property_read_bool(arizona->dev, + "wlf,use-jd2"); + pdata->jd_gpio5_nopull = device_property_read_bool(arizona->dev, + "wlf,use-jd2-nopull"); + + ret = arizona_extcon_get_micd_configs(dev, arizona); + if (ret < 0) + dev_err(arizona->dev, "Failed to read micd configs: %d\n", ret); + return 0; } @@ -1257,7 +1322,7 @@ static int arizona_extcon_probe(struct platform_device *pdev) return -ENOMEM; if (!dev_get_platdata(arizona->dev)) - arizona_extcon_device_get_pdata(arizona); + arizona_extcon_device_get_pdata(&pdev->dev, arizona); info->micvdd = devm_regulator_get(&pdev->dev, "MICVDD"); if (IS_ERR(info->micvdd)) { diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c index 601dbd996487..b30ab97ce75f 100644 --- a/drivers/extcon/extcon-max14577.c +++ b/drivers/extcon/extcon-max14577.c @@ -692,7 +692,7 @@ static int max14577_muic_probe(struct platform_device *pdev) /* Support irq domain for max14577 MUIC device */ for (i = 0; i < info->muic_irqs_num; i++) { struct max14577_muic_irq *muic_irq = &info->muic_irqs[i]; - unsigned int virq = 0; + int virq = 0; virq = regmap_irq_get_virq(max14577->irq_data, muic_irq->irq); if (virq <= 0) diff --git a/drivers/extcon/extcon-max3355.c b/drivers/extcon/extcon-max3355.c new file mode 100644 index 000000000000..c24abec5d06c --- /dev/null +++ b/drivers/extcon/extcon-max3355.c @@ -0,0 +1,146 @@ +/* + * Maxim Integrated MAX3355 USB OTG chip extcon driver + * + * Copyright (C) 2014-2015 Cogent Embedded, Inc. + * Author: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + */ + +#include <linux/extcon.h> +#include <linux/gpio.h> +#include <linux/gpio/consumer.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/platform_device.h> + +struct max3355_data { + struct extcon_dev *edev; + struct gpio_desc *id_gpiod; + struct gpio_desc *shdn_gpiod; +}; + +static const unsigned int max3355_cable[] = { + EXTCON_USB, + EXTCON_USB_HOST, + EXTCON_NONE, +}; + +static irqreturn_t max3355_id_irq(int irq, void *dev_id) +{ + struct max3355_data *data = dev_id; + int id = gpiod_get_value_cansleep(data->id_gpiod); + + if (id) { + /* + * ID = 1 means USB HOST cable detached. + * As we don't have event for USB peripheral cable attached, + * we simulate USB peripheral attach here. + */ + extcon_set_cable_state_(data->edev, EXTCON_USB_HOST, false); + extcon_set_cable_state_(data->edev, EXTCON_USB, true); + } else { + /* + * ID = 0 means USB HOST cable attached. + * As we don't have event for USB peripheral cable detached, + * we simulate USB peripheral detach here. + */ + extcon_set_cable_state_(data->edev, EXTCON_USB, false); + extcon_set_cable_state_(data->edev, EXTCON_USB_HOST, true); + } + + return IRQ_HANDLED; +} + +static int max3355_probe(struct platform_device *pdev) +{ + struct max3355_data *data; + struct gpio_desc *gpiod; + int irq, err; + + data = devm_kzalloc(&pdev->dev, sizeof(struct max3355_data), + GFP_KERNEL); + if (!data) + return -ENOMEM; + + gpiod = devm_gpiod_get(&pdev->dev, "id", GPIOD_IN); + if (IS_ERR(gpiod)) { + dev_err(&pdev->dev, "failed to get ID_OUT GPIO\n"); + return PTR_ERR(gpiod); + } + data->id_gpiod = gpiod; + + gpiod = devm_gpiod_get(&pdev->dev, "maxim,shdn", GPIOD_OUT_HIGH); + if (IS_ERR(gpiod)) { + dev_err(&pdev->dev, "failed to get SHDN# GPIO\n"); + return PTR_ERR(gpiod); + } + data->shdn_gpiod = gpiod; + + data->edev = devm_extcon_dev_allocate(&pdev->dev, max3355_cable); + if (IS_ERR(data->edev)) { + dev_err(&pdev->dev, "failed to allocate extcon device\n"); + return PTR_ERR(data->edev); + } + + err = devm_extcon_dev_register(&pdev->dev, data->edev); + if (err < 0) { + dev_err(&pdev->dev, "failed to register extcon device\n"); + return err; + } + + irq = gpiod_to_irq(data->id_gpiod); + if (irq < 0) { + dev_err(&pdev->dev, "failed to translate ID_OUT GPIO to IRQ\n"); + return irq; + } + + err = devm_request_threaded_irq(&pdev->dev, irq, NULL, max3355_id_irq, + IRQF_ONESHOT | IRQF_NO_SUSPEND | + IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING, + pdev->name, data); + if (err < 0) { + dev_err(&pdev->dev, "failed to request ID_OUT IRQ\n"); + return err; + } + + platform_set_drvdata(pdev, data); + + /* Perform initial detection */ + max3355_id_irq(irq, data); + + return 0; +} + +static int max3355_remove(struct platform_device *pdev) +{ + struct max3355_data *data = platform_get_drvdata(pdev); + + gpiod_set_value_cansleep(data->shdn_gpiod, 0); + + return 0; +} + +static const struct of_device_id max3355_match_table[] = { + { .compatible = "maxim,max3355", }, + { } +}; +MODULE_DEVICE_TABLE(of, max3355_match_table); + +static struct platform_driver max3355_driver = { + .probe = max3355_probe, + .remove = max3355_remove, + .driver = { + .name = "extcon-max3355", + .of_match_table = max3355_match_table, + }, +}; + +module_platform_driver(max3355_driver); + +MODULE_AUTHOR("Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>"); +MODULE_DESCRIPTION("Maxim MAX3355 extcon driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c index 44c499e1beee..fdf8f5d4d4e9 100644 --- a/drivers/extcon/extcon-max77693.c +++ b/drivers/extcon/extcon-max77693.c @@ -1127,11 +1127,11 @@ static int max77693_muic_probe(struct platform_device *pdev) /* Support irq domain for MAX77693 MUIC device */ for (i = 0; i < ARRAY_SIZE(muic_irqs); i++) { struct max77693_muic_irq *muic_irq = &muic_irqs[i]; - unsigned int virq = 0; + int virq; virq = regmap_irq_get_virq(max77693->irq_data_muic, muic_irq->irq); - if (!virq) + if (virq <= 0) return -EINVAL; muic_irq->virq = virq; diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c index 9f9ea334399c..74dfb7f4f277 100644 --- a/drivers/extcon/extcon-max77843.c +++ b/drivers/extcon/extcon-max77843.c @@ -811,7 +811,7 @@ static int max77843_muic_probe(struct platform_device *pdev) for (i = 0; i < ARRAY_SIZE(max77843_muic_irqs); i++) { struct max77843_muic_irq *muic_irq = &max77843_muic_irqs[i]; - unsigned int virq = 0; + int virq = 0; virq = regmap_irq_get_virq(max77843->irq_data_muic, muic_irq->irq); diff --git a/drivers/extcon/extcon-rt8973a.c b/drivers/extcon/extcon-rt8973a.c index 36bf1d63791c..e1bb82809bef 100644 --- a/drivers/extcon/extcon-rt8973a.c +++ b/drivers/extcon/extcon-rt8973a.c @@ -603,7 +603,7 @@ static int rt8973a_muic_i2c_probe(struct i2c_client *i2c, ret = devm_request_threaded_irq(info->dev, virq, NULL, rt8973a_muic_irq_handler, - IRQF_NO_SUSPEND, + IRQF_NO_SUSPEND | IRQF_ONESHOT, muic_irq->name, info); if (ret) { dev_err(info->dev, diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index c4dcab048cb8..1161d68a1863 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -28,6 +28,7 @@ #include <linux/module.h> #include <linux/hyperv.h> #include <linux/uio.h> +#include <linux/interrupt.h> #include "hyperv_vmbus.h" @@ -496,8 +497,33 @@ static void reset_channel_cb(void *arg) static int vmbus_close_internal(struct vmbus_channel *channel) { struct vmbus_channel_close_channel *msg; + struct tasklet_struct *tasklet; int ret; + /* + * process_chn_event(), running in the tasklet, can race + * with vmbus_close_internal() in the case of SMP guest, e.g., when + * the former is accessing channel->inbound.ring_buffer, the latter + * could be freeing the ring_buffer pages. + * + * To resolve the race, we can serialize them by disabling the + * tasklet when the latter is running here. + */ + tasklet = hv_context.event_dpc[channel->target_cpu]; + tasklet_disable(tasklet); + + /* + * In case a device driver's probe() fails (e.g., + * util_probe() -> vmbus_open() returns -ENOMEM) and the device is + * rescinded later (e.g., we dynamically disble an Integrated Service + * in Hyper-V Manager), the driver's remove() invokes vmbus_close(): + * here we should skip most of the below cleanup work. + */ + if (channel->state != CHANNEL_OPENED_STATE) { + ret = -EINVAL; + goto out; + } + channel->state = CHANNEL_OPEN_STATE; channel->sc_creation_callback = NULL; /* Stop callback and cancel the timer asap */ @@ -525,7 +551,7 @@ static int vmbus_close_internal(struct vmbus_channel *channel) * If we failed to post the close msg, * it is perhaps better to leak memory. */ - return ret; + goto out; } /* Tear down the gpadl for the channel's ring buffer */ @@ -538,7 +564,7 @@ static int vmbus_close_internal(struct vmbus_channel *channel) * If we failed to teardown gpadl, * it is perhaps better to leak memory. */ - return ret; + goto out; } } @@ -549,12 +575,9 @@ static int vmbus_close_internal(struct vmbus_channel *channel) free_pages((unsigned long)channel->ringbuffer_pages, get_order(channel->ringbuffer_pagecount * PAGE_SIZE)); - /* - * If the channel has been rescinded; process device removal. - */ - if (channel->rescind) - hv_process_channel_removal(channel, - channel->offermsg.child_relid); +out: + tasklet_enable(tasklet); + return ret; } @@ -630,10 +653,19 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer, * on the ring. We will not signal if more data is * to be placed. * + * Based on the channel signal state, we will decide + * which signaling policy will be applied. + * * If we cannot write to the ring-buffer; signal the host * even if we may not have written anything. This is a rare * enough condition that it should not matter. */ + + if (channel->signal_policy) + signal = true; + else + kick_q = true; + if (((ret == 0) && kick_q && signal) || (ret)) vmbus_setevent(channel); @@ -733,10 +765,19 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, * on the ring. We will not signal if more data is * to be placed. * + * Based on the channel signal state, we will decide + * which signaling policy will be applied. + * * If we cannot write to the ring-buffer; signal the host * even if we may not have written anything. This is a rare * enough condition that it should not matter. */ + + if (channel->signal_policy) + signal = true; + else + kick_q = true; + if (((ret == 0) && kick_q && signal) || (ret)) vmbus_setevent(channel); @@ -881,46 +922,29 @@ EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer); * * Mainly used by Hyper-V drivers. */ -int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer, - u32 bufferlen, u32 *buffer_actual_len, u64 *requestid) +static inline int +__vmbus_recvpacket(struct vmbus_channel *channel, void *buffer, + u32 bufferlen, u32 *buffer_actual_len, u64 *requestid, + bool raw) { - struct vmpacket_descriptor desc; - u32 packetlen; - u32 userlen; int ret; bool signal = false; - *buffer_actual_len = 0; - *requestid = 0; - - - ret = hv_ringbuffer_peek(&channel->inbound, &desc, - sizeof(struct vmpacket_descriptor)); - if (ret != 0) - return 0; - - packetlen = desc.len8 << 3; - userlen = packetlen - (desc.offset8 << 3); - - *buffer_actual_len = userlen; - - if (userlen > bufferlen) { - - pr_err("Buffer too small - got %d needs %d\n", - bufferlen, userlen); - return -ETOOSMALL; - } - - *requestid = desc.trans_id; - - /* Copy over the packet to the user buffer */ - ret = hv_ringbuffer_read(&channel->inbound, buffer, userlen, - (desc.offset8 << 3), &signal); + ret = hv_ringbuffer_read(&channel->inbound, buffer, bufferlen, + buffer_actual_len, requestid, &signal, raw); if (signal) vmbus_setevent(channel); - return 0; + return ret; +} + +int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer, + u32 bufferlen, u32 *buffer_actual_len, + u64 *requestid) +{ + return __vmbus_recvpacket(channel, buffer, bufferlen, + buffer_actual_len, requestid, false); } EXPORT_SYMBOL(vmbus_recvpacket); @@ -931,37 +955,7 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer, u32 bufferlen, u32 *buffer_actual_len, u64 *requestid) { - struct vmpacket_descriptor desc; - u32 packetlen; - int ret; - bool signal = false; - - *buffer_actual_len = 0; - *requestid = 0; - - - ret = hv_ringbuffer_peek(&channel->inbound, &desc, - sizeof(struct vmpacket_descriptor)); - if (ret != 0) - return 0; - - - packetlen = desc.len8 << 3; - - *buffer_actual_len = packetlen; - - if (packetlen > bufferlen) - return -ENOBUFS; - - *requestid = desc.trans_id; - - /* Copy over the entire packet to the user buffer */ - ret = hv_ringbuffer_read(&channel->inbound, buffer, packetlen, 0, - &signal); - - if (signal) - vmbus_setevent(channel); - - return ret; + return __vmbus_recvpacket(channel, buffer, bufferlen, + buffer_actual_len, requestid, true); } EXPORT_SYMBOL_GPL(vmbus_recvpacket_raw); diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 652afd11a9ef..1c1ad47042c5 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -177,19 +177,24 @@ static void percpu_channel_deq(void *arg) } -void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) +static void vmbus_release_relid(u32 relid) { struct vmbus_channel_relid_released msg; - unsigned long flags; - struct vmbus_channel *primary_channel; memset(&msg, 0, sizeof(struct vmbus_channel_relid_released)); msg.child_relid = relid; msg.header.msgtype = CHANNELMSG_RELID_RELEASED; vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released)); +} - if (channel == NULL) - return; +void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) +{ + unsigned long flags; + struct vmbus_channel *primary_channel; + + vmbus_release_relid(relid); + + BUG_ON(!channel->rescind); if (channel->target_cpu != get_cpu()) { put_cpu(); @@ -201,9 +206,9 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) } if (channel->primary_channel == NULL) { - spin_lock_irqsave(&vmbus_connection.channel_lock, flags); + mutex_lock(&vmbus_connection.channel_mutex); list_del(&channel->listentry); - spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); + mutex_unlock(&vmbus_connection.channel_mutex); primary_channel = channel; } else { @@ -230,9 +235,7 @@ void vmbus_free_channels(void) list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list, listentry) { - /* if we don't set rescind to true, vmbus_close_internal() - * won't invoke hv_process_channel_removal(). - */ + /* hv_process_channel_removal() needs this */ channel->rescind = true; vmbus_device_unregister(channel->device_obj); @@ -250,7 +253,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) unsigned long flags; /* Make sure this is a new offer */ - spin_lock_irqsave(&vmbus_connection.channel_lock, flags); + mutex_lock(&vmbus_connection.channel_mutex); list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { if (!uuid_le_cmp(channel->offermsg.offer.if_type, @@ -266,7 +269,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) list_add_tail(&newchannel->listentry, &vmbus_connection.chn_list); - spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); + mutex_unlock(&vmbus_connection.channel_mutex); if (!fnew) { /* @@ -336,9 +339,11 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) return; err_deq_chan: - spin_lock_irqsave(&vmbus_connection.channel_lock, flags); + vmbus_release_relid(newchannel->offermsg.child_relid); + + mutex_lock(&vmbus_connection.channel_mutex); list_del(&newchannel->listentry); - spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); + mutex_unlock(&vmbus_connection.channel_mutex); if (newchannel->target_cpu != get_cpu()) { put_cpu(); @@ -356,8 +361,10 @@ err_free_chan: enum { IDE = 0, SCSI, + FC, NIC, ND_NIC, + PCIE, MAX_PERF_CHN, }; @@ -371,10 +378,14 @@ static const struct hv_vmbus_device_id hp_devs[] = { { HV_IDE_GUID, }, /* Storage - SCSI */ { HV_SCSI_GUID, }, + /* Storage - FC */ + { HV_SYNTHFC_GUID, }, /* Network */ { HV_NIC_GUID, }, /* NetworkDirect Guest RDMA */ { HV_ND_GUID, }, + /* PCI Express Pass Through */ + { HV_PCIE_GUID, }, }; @@ -405,8 +416,7 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui struct cpumask *alloced_mask; for (i = IDE; i < MAX_PERF_CHN; i++) { - if (!memcmp(type_guid->b, hp_devs[i].guid, - sizeof(uuid_le))) { + if (!uuid_le_cmp(*type_guid, hp_devs[i].guid)) { perf_chn = true; break; } @@ -585,7 +595,11 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) channel = relid2channel(rescind->child_relid); if (channel == NULL) { - hv_process_channel_removal(NULL, rescind->child_relid); + /* + * This is very impossible, because in + * vmbus_process_offer(), we have already invoked + * vmbus_release_relid() on error. + */ return; } diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index 4fc2e8836e60..3dc5a9c7fad6 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c @@ -83,10 +83,13 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, msg->interrupt_page = virt_to_phys(vmbus_connection.int_page); msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]); msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]); - if (version >= VERSION_WIN8_1) { - msg->target_vcpu = hv_context.vp_index[get_cpu()]; - put_cpu(); - } + /* + * We want all channel messages to be delivered on CPU 0. + * This has been the behavior pre-win8. This is not + * perf issue and having all channel messages delivered on CPU 0 + * would be ok. + */ + msg->target_vcpu = 0; /* * Add to list before we send the request since we may @@ -146,7 +149,7 @@ int vmbus_connect(void) spin_lock_init(&vmbus_connection.channelmsg_lock); INIT_LIST_HEAD(&vmbus_connection.chn_list); - spin_lock_init(&vmbus_connection.channel_lock); + mutex_init(&vmbus_connection.channel_mutex); /* * Setup the vmbus event connection for channel interrupt @@ -282,11 +285,10 @@ struct vmbus_channel *relid2channel(u32 relid) { struct vmbus_channel *channel; struct vmbus_channel *found_channel = NULL; - unsigned long flags; struct list_head *cur, *tmp; struct vmbus_channel *cur_sc; - spin_lock_irqsave(&vmbus_connection.channel_lock, flags); + mutex_lock(&vmbus_connection.channel_mutex); list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { if (channel->offermsg.child_relid == relid) { found_channel = channel; @@ -305,7 +307,7 @@ struct vmbus_channel *relid2channel(u32 relid) } } } - spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); + mutex_unlock(&vmbus_connection.channel_mutex); return found_channel; } diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c index 6341be8739ae..11bca51ef5ff 100644 --- a/drivers/hv/hv.c +++ b/drivers/hv/hv.c @@ -89,9 +89,9 @@ static int query_hypervisor_info(void) } /* - * do_hypercall- Invoke the specified hypercall + * hv_do_hypercall- Invoke the specified hypercall */ -static u64 do_hypercall(u64 control, void *input, void *output) +u64 hv_do_hypercall(u64 control, void *input, void *output) { u64 input_address = (input) ? virt_to_phys(input) : 0; u64 output_address = (output) ? virt_to_phys(output) : 0; @@ -132,6 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output) return hv_status_lo | ((u64)hv_status_hi << 32); #endif /* !x86_64 */ } +EXPORT_SYMBOL_GPL(hv_do_hypercall); #ifdef CONFIG_X86_64 static cycle_t read_hv_clock_tsc(struct clocksource *arg) @@ -139,7 +140,7 @@ static cycle_t read_hv_clock_tsc(struct clocksource *arg) cycle_t current_tick; struct ms_hyperv_tsc_page *tsc_pg = hv_context.tsc_page; - if (tsc_pg->tsc_sequence != -1) { + if (tsc_pg->tsc_sequence != 0) { /* * Use the tsc page to compute the value. */ @@ -161,7 +162,7 @@ static cycle_t read_hv_clock_tsc(struct clocksource *arg) if (tsc_pg->tsc_sequence == sequence) return current_tick; - if (tsc_pg->tsc_sequence != -1) + if (tsc_pg->tsc_sequence != 0) continue; /* * Fallback using MSR method. @@ -192,9 +193,7 @@ int hv_init(void) { int max_leaf; union hv_x64_msr_hypercall_contents hypercall_msr; - union hv_x64_msr_hypercall_contents tsc_msr; void *virtaddr = NULL; - void *va_tsc = NULL; memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS); memset(hv_context.synic_message_page, 0, @@ -240,6 +239,9 @@ int hv_init(void) #ifdef CONFIG_X86_64 if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) { + union hv_x64_msr_hypercall_contents tsc_msr; + void *va_tsc; + va_tsc = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL); if (!va_tsc) goto cleanup; @@ -315,7 +317,7 @@ int hv_post_message(union hv_connection_id connection_id, { struct hv_input_post_message *aligned_msg; - u16 status; + u64 status; if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) return -EMSGSIZE; @@ -329,11 +331,10 @@ int hv_post_message(union hv_connection_id connection_id, aligned_msg->payload_size = payload_size; memcpy((void *)aligned_msg->payload, payload, payload_size); - status = do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL) - & 0xFFFF; + status = hv_do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL); put_cpu(); - return status; + return status & 0xFFFF; } @@ -343,13 +344,13 @@ int hv_post_message(union hv_connection_id connection_id, * * This involves a hypercall. */ -u16 hv_signal_event(void *con_id) +int hv_signal_event(void *con_id) { - u16 status; + u64 status; - status = (do_hypercall(HVCALL_SIGNAL_EVENT, con_id, NULL) & 0xFFFF); + status = hv_do_hypercall(HVCALL_SIGNAL_EVENT, con_id, NULL); - return status; + return status & 0xFFFF; } static int hv_ce_set_next_event(unsigned long delta, diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c index db4b887b889d..c37a71e13de0 100644 --- a/drivers/hv/hv_fcopy.c +++ b/drivers/hv/hv_fcopy.c @@ -51,7 +51,6 @@ static struct { struct hv_fcopy_hdr *fcopy_msg; /* current message */ struct vmbus_channel *recv_channel; /* chn we got the request */ u64 recv_req_id; /* request ID. */ - void *fcopy_context; /* for the channel callback */ } fcopy_transaction; static void fcopy_respond_to_host(int error); @@ -67,6 +66,13 @@ static struct hvutil_transport *hvt; */ static int dm_reg_value; +static void fcopy_poll_wrapper(void *channel) +{ + /* Transaction is finished, reset the state here to avoid races. */ + fcopy_transaction.state = HVUTIL_READY; + hv_fcopy_onchannelcallback(channel); +} + static void fcopy_timeout_func(struct work_struct *dummy) { /* @@ -74,13 +80,7 @@ static void fcopy_timeout_func(struct work_struct *dummy) * process the pending transaction. */ fcopy_respond_to_host(HV_E_FAIL); - - /* Transaction is finished, reset the state. */ - if (fcopy_transaction.state > HVUTIL_READY) - fcopy_transaction.state = HVUTIL_READY; - - hv_poll_channel(fcopy_transaction.fcopy_context, - hv_fcopy_onchannelcallback); + hv_poll_channel(fcopy_transaction.recv_channel, fcopy_poll_wrapper); } static int fcopy_handle_handshake(u32 version) @@ -108,9 +108,7 @@ static int fcopy_handle_handshake(u32 version) return -EINVAL; } pr_debug("FCP: userspace daemon ver. %d registered\n", version); - fcopy_transaction.state = HVUTIL_READY; - hv_poll_channel(fcopy_transaction.fcopy_context, - hv_fcopy_onchannelcallback); + hv_poll_channel(fcopy_transaction.recv_channel, fcopy_poll_wrapper); return 0; } @@ -227,15 +225,8 @@ void hv_fcopy_onchannelcallback(void *context) int util_fw_version; int fcopy_srv_version; - if (fcopy_transaction.state > HVUTIL_READY) { - /* - * We will defer processing this callback once - * the current transaction is complete. - */ - fcopy_transaction.fcopy_context = context; + if (fcopy_transaction.state > HVUTIL_READY) return; - } - fcopy_transaction.fcopy_context = NULL; vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen, &requestid); @@ -275,7 +266,8 @@ void hv_fcopy_onchannelcallback(void *context) * Send the information to the user-level daemon. */ schedule_work(&fcopy_send_work); - schedule_delayed_work(&fcopy_timeout_work, 5*HZ); + schedule_delayed_work(&fcopy_timeout_work, + HV_UTIL_TIMEOUT * HZ); return; } icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE; @@ -304,9 +296,8 @@ static int fcopy_on_msg(void *msg, int len) if (cancel_delayed_work_sync(&fcopy_timeout_work)) { fcopy_transaction.state = HVUTIL_USERSPACE_RECV; fcopy_respond_to_host(*val); - fcopy_transaction.state = HVUTIL_READY; - hv_poll_channel(fcopy_transaction.fcopy_context, - hv_fcopy_onchannelcallback); + hv_poll_channel(fcopy_transaction.recv_channel, + fcopy_poll_wrapper); } return 0; diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c index 74c38a9f34a6..d4ab81bcd515 100644 --- a/drivers/hv/hv_kvp.c +++ b/drivers/hv/hv_kvp.c @@ -66,7 +66,6 @@ static struct { struct hv_kvp_msg *kvp_msg; /* current message */ struct vmbus_channel *recv_channel; /* chn we got the request */ u64 recv_req_id; /* request ID. */ - void *kvp_context; /* for the channel callback */ } kvp_transaction; /* @@ -94,6 +93,13 @@ static struct hvutil_transport *hvt; */ #define HV_DRV_VERSION "3.1" +static void kvp_poll_wrapper(void *channel) +{ + /* Transaction is finished, reset the state here to avoid races. */ + kvp_transaction.state = HVUTIL_READY; + hv_kvp_onchannelcallback(channel); +} + static void kvp_register(int reg_value) { @@ -121,12 +127,7 @@ static void kvp_timeout_func(struct work_struct *dummy) */ kvp_respond_to_host(NULL, HV_E_FAIL); - /* Transaction is finished, reset the state. */ - if (kvp_transaction.state > HVUTIL_READY) - kvp_transaction.state = HVUTIL_READY; - - hv_poll_channel(kvp_transaction.kvp_context, - hv_kvp_onchannelcallback); + hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper); } static int kvp_handle_handshake(struct hv_kvp_msg *msg) @@ -153,7 +154,7 @@ static int kvp_handle_handshake(struct hv_kvp_msg *msg) pr_debug("KVP: userspace daemon ver. %d registered\n", KVP_OP_REGISTER); kvp_register(dm_reg_value); - kvp_transaction.state = HVUTIL_READY; + hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper); return 0; } @@ -218,9 +219,7 @@ static int kvp_on_msg(void *msg, int len) */ if (cancel_delayed_work_sync(&kvp_timeout_work)) { kvp_respond_to_host(message, error); - kvp_transaction.state = HVUTIL_READY; - hv_poll_channel(kvp_transaction.kvp_context, - hv_kvp_onchannelcallback); + hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper); } return 0; @@ -596,15 +595,8 @@ void hv_kvp_onchannelcallback(void *context) int util_fw_version; int kvp_srv_version; - if (kvp_transaction.state > HVUTIL_READY) { - /* - * We will defer processing this callback once - * the current transaction is complete. - */ - kvp_transaction.kvp_context = context; + if (kvp_transaction.state > HVUTIL_READY) return; - } - kvp_transaction.kvp_context = NULL; vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen, &requestid); @@ -668,7 +660,8 @@ void hv_kvp_onchannelcallback(void *context) * user-mode not responding. */ schedule_work(&kvp_sendkey_work); - schedule_delayed_work(&kvp_timeout_work, 5*HZ); + schedule_delayed_work(&kvp_timeout_work, + HV_UTIL_TIMEOUT * HZ); return; diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c index 815405f2e777..67def4a831c8 100644 --- a/drivers/hv/hv_snapshot.c +++ b/drivers/hv/hv_snapshot.c @@ -53,7 +53,6 @@ static struct { struct vmbus_channel *recv_channel; /* chn we got the request */ u64 recv_req_id; /* request ID. */ struct hv_vss_msg *msg; /* current message */ - void *vss_context; /* for the channel callback */ } vss_transaction; @@ -74,6 +73,13 @@ static void vss_timeout_func(struct work_struct *dummy); static DECLARE_DELAYED_WORK(vss_timeout_work, vss_timeout_func); static DECLARE_WORK(vss_send_op_work, vss_send_op); +static void vss_poll_wrapper(void *channel) +{ + /* Transaction is finished, reset the state here to avoid races. */ + vss_transaction.state = HVUTIL_READY; + hv_vss_onchannelcallback(channel); +} + /* * Callback when data is received from user mode. */ @@ -86,12 +92,7 @@ static void vss_timeout_func(struct work_struct *dummy) pr_warn("VSS: timeout waiting for daemon to reply\n"); vss_respond_to_host(HV_E_FAIL); - /* Transaction is finished, reset the state. */ - if (vss_transaction.state > HVUTIL_READY) - vss_transaction.state = HVUTIL_READY; - - hv_poll_channel(vss_transaction.vss_context, - hv_vss_onchannelcallback); + hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper); } static int vss_handle_handshake(struct hv_vss_msg *vss_msg) @@ -112,7 +113,7 @@ static int vss_handle_handshake(struct hv_vss_msg *vss_msg) default: return -EINVAL; } - vss_transaction.state = HVUTIL_READY; + hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper); pr_debug("VSS: userspace daemon ver. %d registered\n", dm_reg_value); return 0; } @@ -138,9 +139,8 @@ static int vss_on_msg(void *msg, int len) if (cancel_delayed_work_sync(&vss_timeout_work)) { vss_respond_to_host(vss_msg->error); /* Transaction is finished, reset the state. */ - vss_transaction.state = HVUTIL_READY; - hv_poll_channel(vss_transaction.vss_context, - hv_vss_onchannelcallback); + hv_poll_channel(vss_transaction.recv_channel, + vss_poll_wrapper); } } else { /* This is a spurious call! */ @@ -238,15 +238,8 @@ void hv_vss_onchannelcallback(void *context) struct icmsg_hdr *icmsghdrp; struct icmsg_negotiate *negop = NULL; - if (vss_transaction.state > HVUTIL_READY) { - /* - * We will defer processing this callback once - * the current transaction is complete. - */ - vss_transaction.vss_context = context; + if (vss_transaction.state > HVUTIL_READY) return; - } - vss_transaction.vss_context = NULL; vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen, &requestid); @@ -338,6 +331,11 @@ static void vss_on_reset(void) int hv_vss_init(struct hv_util_service *srv) { + if (vmbus_proto_version < VERSION_WIN8_1) { + pr_warn("Integration service 'Backup (volume snapshot)'" + " not supported on this host version.\n"); + return -ENOTSUPP; + } recv_buffer = srv->recv_buffer; /* diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c index 6a9d80a5332d..4f42c0e20c20 100644 --- a/drivers/hv/hv_utils_transport.c +++ b/drivers/hv/hv_utils_transport.c @@ -27,11 +27,9 @@ static struct list_head hvt_list = LIST_HEAD_INIT(hvt_list); static void hvt_reset(struct hvutil_transport *hvt) { - mutex_lock(&hvt->outmsg_lock); kfree(hvt->outmsg); hvt->outmsg = NULL; hvt->outmsg_len = 0; - mutex_unlock(&hvt->outmsg_lock); if (hvt->on_reset) hvt->on_reset(); } @@ -44,10 +42,17 @@ static ssize_t hvt_op_read(struct file *file, char __user *buf, hvt = container_of(file->f_op, struct hvutil_transport, fops); - if (wait_event_interruptible(hvt->outmsg_q, hvt->outmsg_len > 0)) + if (wait_event_interruptible(hvt->outmsg_q, hvt->outmsg_len > 0 || + hvt->mode != HVUTIL_TRANSPORT_CHARDEV)) return -EINTR; - mutex_lock(&hvt->outmsg_lock); + mutex_lock(&hvt->lock); + + if (hvt->mode == HVUTIL_TRANSPORT_DESTROY) { + ret = -EBADF; + goto out_unlock; + } + if (!hvt->outmsg) { ret = -EAGAIN; goto out_unlock; @@ -68,7 +73,7 @@ static ssize_t hvt_op_read(struct file *file, char __user *buf, hvt->outmsg_len = 0; out_unlock: - mutex_unlock(&hvt->outmsg_lock); + mutex_unlock(&hvt->lock); return ret; } @@ -77,19 +82,22 @@ static ssize_t hvt_op_write(struct file *file, const char __user *buf, { struct hvutil_transport *hvt; u8 *inmsg; + int ret; hvt = container_of(file->f_op, struct hvutil_transport, fops); - inmsg = kzalloc(count, GFP_KERNEL); - if (copy_from_user(inmsg, buf, count)) { - kfree(inmsg); - return -EFAULT; - } - if (hvt->on_msg(inmsg, count)) - return -EFAULT; + inmsg = memdup_user(buf, count); + if (IS_ERR(inmsg)) + return PTR_ERR(inmsg); + + if (hvt->mode == HVUTIL_TRANSPORT_DESTROY) + ret = -EBADF; + else + ret = hvt->on_msg(inmsg, count); + kfree(inmsg); - return count; + return ret ? ret : count; } static unsigned int hvt_op_poll(struct file *file, poll_table *wait) @@ -99,6 +107,10 @@ static unsigned int hvt_op_poll(struct file *file, poll_table *wait) hvt = container_of(file->f_op, struct hvutil_transport, fops); poll_wait(file, &hvt->outmsg_q, wait); + + if (hvt->mode == HVUTIL_TRANSPORT_DESTROY) + return POLLERR | POLLHUP; + if (hvt->outmsg_len > 0) return POLLIN | POLLRDNORM; @@ -108,40 +120,68 @@ static unsigned int hvt_op_poll(struct file *file, poll_table *wait) static int hvt_op_open(struct inode *inode, struct file *file) { struct hvutil_transport *hvt; + int ret = 0; + bool issue_reset = false; hvt = container_of(file->f_op, struct hvutil_transport, fops); - /* - * Switching to CHARDEV mode. We switch bach to INIT when device - * gets released. - */ - if (hvt->mode == HVUTIL_TRANSPORT_INIT) + mutex_lock(&hvt->lock); + + if (hvt->mode == HVUTIL_TRANSPORT_DESTROY) { + ret = -EBADF; + } else if (hvt->mode == HVUTIL_TRANSPORT_INIT) { + /* + * Switching to CHARDEV mode. We switch bach to INIT when + * device gets released. + */ hvt->mode = HVUTIL_TRANSPORT_CHARDEV; + } else if (hvt->mode == HVUTIL_TRANSPORT_NETLINK) { /* * We're switching from netlink communication to using char * device. Issue the reset first. */ - hvt_reset(hvt); + issue_reset = true; hvt->mode = HVUTIL_TRANSPORT_CHARDEV; - } else - return -EBUSY; + } else { + ret = -EBUSY; + } - return 0; + if (issue_reset) + hvt_reset(hvt); + + mutex_unlock(&hvt->lock); + + return ret; +} + +static void hvt_transport_free(struct hvutil_transport *hvt) +{ + misc_deregister(&hvt->mdev); + kfree(hvt->outmsg); + kfree(hvt); } static int hvt_op_release(struct inode *inode, struct file *file) { struct hvutil_transport *hvt; + int mode_old; hvt = container_of(file->f_op, struct hvutil_transport, fops); - hvt->mode = HVUTIL_TRANSPORT_INIT; + mutex_lock(&hvt->lock); + mode_old = hvt->mode; + if (hvt->mode != HVUTIL_TRANSPORT_DESTROY) + hvt->mode = HVUTIL_TRANSPORT_INIT; /* * Cleanup message buffers to avoid spurious messages when the daemon * connects back. */ hvt_reset(hvt); + mutex_unlock(&hvt->lock); + + if (mode_old == HVUTIL_TRANSPORT_DESTROY) + hvt_transport_free(hvt); return 0; } @@ -168,6 +208,7 @@ static void hvt_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp) * Switching to NETLINK mode. Switching to CHARDEV happens when someone * opens the device. */ + mutex_lock(&hvt->lock); if (hvt->mode == HVUTIL_TRANSPORT_INIT) hvt->mode = HVUTIL_TRANSPORT_NETLINK; @@ -175,6 +216,7 @@ static void hvt_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp) hvt_found->on_msg(msg->data, msg->len); else pr_warn("hvt_cn_callback: unexpected netlink message!\n"); + mutex_unlock(&hvt->lock); } int hvutil_transport_send(struct hvutil_transport *hvt, void *msg, int len) @@ -182,7 +224,8 @@ int hvutil_transport_send(struct hvutil_transport *hvt, void *msg, int len) struct cn_msg *cn_msg; int ret = 0; - if (hvt->mode == HVUTIL_TRANSPORT_INIT) { + if (hvt->mode == HVUTIL_TRANSPORT_INIT || + hvt->mode == HVUTIL_TRANSPORT_DESTROY) { return -EINVAL; } else if (hvt->mode == HVUTIL_TRANSPORT_NETLINK) { cn_msg = kzalloc(sizeof(*cn_msg) + len, GFP_ATOMIC); @@ -197,18 +240,26 @@ int hvutil_transport_send(struct hvutil_transport *hvt, void *msg, int len) return ret; } /* HVUTIL_TRANSPORT_CHARDEV */ - mutex_lock(&hvt->outmsg_lock); + mutex_lock(&hvt->lock); + if (hvt->mode != HVUTIL_TRANSPORT_CHARDEV) { + ret = -EINVAL; + goto out_unlock; + } + if (hvt->outmsg) { /* Previous message wasn't received */ ret = -EFAULT; goto out_unlock; } hvt->outmsg = kzalloc(len, GFP_KERNEL); - memcpy(hvt->outmsg, msg, len); - hvt->outmsg_len = len; - wake_up_interruptible(&hvt->outmsg_q); + if (hvt->outmsg) { + memcpy(hvt->outmsg, msg, len); + hvt->outmsg_len = len; + wake_up_interruptible(&hvt->outmsg_q); + } else + ret = -ENOMEM; out_unlock: - mutex_unlock(&hvt->outmsg_lock); + mutex_unlock(&hvt->lock); return ret; } @@ -239,7 +290,7 @@ struct hvutil_transport *hvutil_transport_init(const char *name, hvt->mdev.fops = &hvt->fops; init_waitqueue_head(&hvt->outmsg_q); - mutex_init(&hvt->outmsg_lock); + mutex_init(&hvt->lock); spin_lock(&hvt_list_lock); list_add(&hvt->list, &hvt_list); @@ -265,12 +316,25 @@ err_free_hvt: void hvutil_transport_destroy(struct hvutil_transport *hvt) { + int mode_old; + + mutex_lock(&hvt->lock); + mode_old = hvt->mode; + hvt->mode = HVUTIL_TRANSPORT_DESTROY; + wake_up_interruptible(&hvt->outmsg_q); + mutex_unlock(&hvt->lock); + + /* + * In case we were in 'chardev' mode we still have an open fd so we + * have to defer freeing the device. Netlink interface can be freed + * now. + */ spin_lock(&hvt_list_lock); list_del(&hvt->list); spin_unlock(&hvt_list_lock); if (hvt->cn_id.idx > 0 && hvt->cn_id.val > 0) cn_del_callback(&hvt->cn_id); - misc_deregister(&hvt->mdev); - kfree(hvt->outmsg); - kfree(hvt); + + if (mode_old != HVUTIL_TRANSPORT_CHARDEV) + hvt_transport_free(hvt); } diff --git a/drivers/hv/hv_utils_transport.h b/drivers/hv/hv_utils_transport.h index 314c76ce1b07..06254a165a18 100644 --- a/drivers/hv/hv_utils_transport.h +++ b/drivers/hv/hv_utils_transport.h @@ -25,6 +25,7 @@ enum hvutil_transport_mode { HVUTIL_TRANSPORT_INIT = 0, HVUTIL_TRANSPORT_NETLINK, HVUTIL_TRANSPORT_CHARDEV, + HVUTIL_TRANSPORT_DESTROY, }; struct hvutil_transport { @@ -38,7 +39,7 @@ struct hvutil_transport { u8 *outmsg; /* message to the userspace */ int outmsg_len; /* its length */ wait_queue_head_t outmsg_q; /* poll/read wait queue */ - struct mutex outmsg_lock; /* protects outmsg */ + struct mutex lock; /* protects struct members */ }; struct hvutil_transport *hvutil_transport_init(const char *name, diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index 678663e2085f..4ebc796b4f33 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h @@ -31,6 +31,11 @@ #include <linux/hyperv.h> /* + * Timeout for services such as KVP and fcopy. + */ +#define HV_UTIL_TIMEOUT 30 + +/* * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent * is set by CPUID(HVCPUID_VERSION_FEATURES). */ @@ -496,7 +501,7 @@ extern int hv_post_message(union hv_connection_id connection_id, enum hv_message_type message_type, void *payload, size_t payload_size); -extern u16 hv_signal_event(void *con_id); +extern int hv_signal_event(void *con_id); extern int hv_synic_alloc(void); @@ -528,14 +533,9 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info, struct kvec *kv_list, u32 kv_count, bool *signal); -int hv_ringbuffer_peek(struct hv_ring_buffer_info *ring_info, void *buffer, - u32 buflen); - -int hv_ringbuffer_read(struct hv_ring_buffer_info *ring_info, - void *buffer, - u32 buflen, - u32 offset, bool *signal); - +int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, + void *buffer, u32 buflen, u32 *buffer_actual_len, + u64 *requestid, bool *signal, bool raw); void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info, struct hv_ring_buffer_debug_info *debug_info); @@ -592,7 +592,7 @@ struct vmbus_connection { /* List of channels */ struct list_head chn_list; - spinlock_t channel_lock; + struct mutex channel_mutex; struct workqueue_struct *work_queue; }; @@ -673,11 +673,7 @@ static inline void hv_poll_channel(struct vmbus_channel *channel, if (!channel) return; - if (channel->target_cpu != smp_processor_id()) - smp_call_function_single(channel->target_cpu, - cb, channel, true); - else - cb(channel); + smp_call_function_single(channel->target_cpu, cb, channel, true); } enum hvutil_device_state { diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index 70a1a9a22f87..b53702ce692f 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c @@ -112,9 +112,7 @@ static bool hv_need_to_signal_on_read(u32 prev_write_sz, u32 read_loc = rbi->ring_buffer->read_index; u32 pending_sz = rbi->ring_buffer->pending_send_sz; - /* - * If the other end is not blocked on write don't bother. - */ + /* If the other end is not blocked on write don't bother. */ if (pending_sz == 0) return false; @@ -128,12 +126,7 @@ static bool hv_need_to_signal_on_read(u32 prev_write_sz, return false; } -/* - * hv_get_next_write_location() - * - * Get the next write location for the specified ring buffer - * - */ +/* Get the next write location for the specified ring buffer. */ static inline u32 hv_get_next_write_location(struct hv_ring_buffer_info *ring_info) { @@ -142,12 +135,7 @@ hv_get_next_write_location(struct hv_ring_buffer_info *ring_info) return next; } -/* - * hv_set_next_write_location() - * - * Set the next write location for the specified ring buffer - * - */ +/* Set the next write location for the specified ring buffer. */ static inline void hv_set_next_write_location(struct hv_ring_buffer_info *ring_info, u32 next_write_location) @@ -155,11 +143,7 @@ hv_set_next_write_location(struct hv_ring_buffer_info *ring_info, ring_info->ring_buffer->write_index = next_write_location; } -/* - * hv_get_next_read_location() - * - * Get the next read location for the specified ring buffer - */ +/* Get the next read location for the specified ring buffer. */ static inline u32 hv_get_next_read_location(struct hv_ring_buffer_info *ring_info) { @@ -169,10 +153,8 @@ hv_get_next_read_location(struct hv_ring_buffer_info *ring_info) } /* - * hv_get_next_readlocation_withoffset() - * * Get the next read location + offset for the specified ring buffer. - * This allows the caller to skip + * This allows the caller to skip. */ static inline u32 hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info, @@ -186,13 +168,7 @@ hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info, return next; } -/* - * - * hv_set_next_read_location() - * - * Set the next read location for the specified ring buffer - * - */ +/* Set the next read location for the specified ring buffer. */ static inline void hv_set_next_read_location(struct hv_ring_buffer_info *ring_info, u32 next_read_location) @@ -201,12 +177,7 @@ hv_set_next_read_location(struct hv_ring_buffer_info *ring_info, } -/* - * - * hv_get_ring_buffer() - * - * Get the start of the ring buffer - */ +/* Get the start of the ring buffer. */ static inline void * hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info) { @@ -214,25 +185,14 @@ hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info) } -/* - * - * hv_get_ring_buffersize() - * - * Get the size of the ring buffer - */ +/* Get the size of the ring buffer. */ static inline u32 hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info) { return ring_info->ring_datasize; } -/* - * - * hv_get_ring_bufferindices() - * - * Get the read and write indices as u64 of the specified ring buffer - * - */ +/* Get the read and write indices as u64 of the specified ring buffer. */ static inline u64 hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info) { @@ -240,12 +200,8 @@ hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info) } /* - * - * hv_copyfrom_ringbuffer() - * * Helper routine to copy to source from ring buffer. * Assume there is enough room. Handles wrap-around in src case only!! - * */ static u32 hv_copyfrom_ringbuffer( struct hv_ring_buffer_info *ring_info, @@ -277,12 +233,8 @@ static u32 hv_copyfrom_ringbuffer( /* - * - * hv_copyto_ringbuffer() - * * Helper routine to copy from source to ring buffer. * Assume there is enough room. Handles wrap-around in dest case only!! - * */ static u32 hv_copyto_ringbuffer( struct hv_ring_buffer_info *ring_info, @@ -308,13 +260,7 @@ static u32 hv_copyto_ringbuffer( return start_write_offset; } -/* - * - * hv_ringbuffer_get_debuginfo() - * - * Get various debug metrics for the specified ring buffer - * - */ +/* Get various debug metrics for the specified ring buffer. */ void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info, struct hv_ring_buffer_debug_info *debug_info) { @@ -337,13 +283,7 @@ void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info, } } -/* - * - * hv_ringbuffer_init() - * - *Initialize the ring buffer - * - */ +/* Initialize the ring buffer. */ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, void *buffer, u32 buflen) { @@ -356,9 +296,7 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, ring_info->ring_buffer->read_index = ring_info->ring_buffer->write_index = 0; - /* - * Set the feature bit for enabling flow control. - */ + /* Set the feature bit for enabling flow control. */ ring_info->ring_buffer->feature_bits.value = 1; ring_info->ring_size = buflen; @@ -369,24 +307,12 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, return 0; } -/* - * - * hv_ringbuffer_cleanup() - * - * Cleanup the ring buffer - * - */ +/* Cleanup the ring buffer. */ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) { } -/* - * - * hv_ringbuffer_write() - * - * Write to the ring buffer - * - */ +/* Write to the ring buffer. */ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, struct kvec *kv_list, u32 kv_count, bool *signal) { @@ -411,10 +337,11 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, &bytes_avail_toread, &bytes_avail_towrite); - - /* If there is only room for the packet, assume it is full. */ - /* Otherwise, the next time around, we think the ring buffer */ - /* is empty since the read index == write index */ + /* + * If there is only room for the packet, assume it is full. + * Otherwise, the next time around, we think the ring buffer + * is empty since the read index == write index. + */ if (bytes_avail_towrite <= totalbytes_towrite) { spin_unlock_irqrestore(&outring_info->ring_lock, flags); return -EAGAIN; @@ -453,80 +380,59 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, return 0; } - -/* - * - * hv_ringbuffer_peek() - * - * Read without advancing the read index - * - */ -int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info, - void *Buffer, u32 buflen) -{ - u32 bytes_avail_towrite; - u32 bytes_avail_toread; - u32 next_read_location = 0; - unsigned long flags; - - spin_lock_irqsave(&Inring_info->ring_lock, flags); - - hv_get_ringbuffer_availbytes(Inring_info, - &bytes_avail_toread, - &bytes_avail_towrite); - - /* Make sure there is something to read */ - if (bytes_avail_toread < buflen) { - - spin_unlock_irqrestore(&Inring_info->ring_lock, flags); - - return -EAGAIN; - } - - /* Convert to byte offset */ - next_read_location = hv_get_next_read_location(Inring_info); - - next_read_location = hv_copyfrom_ringbuffer(Inring_info, - Buffer, - buflen, - next_read_location); - - spin_unlock_irqrestore(&Inring_info->ring_lock, flags); - - return 0; -} - - -/* - * - * hv_ringbuffer_read() - * - * Read and advance the read index - * - */ -int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer, - u32 buflen, u32 offset, bool *signal) +int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, + void *buffer, u32 buflen, u32 *buffer_actual_len, + u64 *requestid, bool *signal, bool raw) { u32 bytes_avail_towrite; u32 bytes_avail_toread; u32 next_read_location = 0; u64 prev_indices = 0; unsigned long flags; + struct vmpacket_descriptor desc; + u32 offset; + u32 packetlen; + int ret = 0; if (buflen <= 0) return -EINVAL; spin_lock_irqsave(&inring_info->ring_lock, flags); + *buffer_actual_len = 0; + *requestid = 0; + hv_get_ringbuffer_availbytes(inring_info, &bytes_avail_toread, &bytes_avail_towrite); /* Make sure there is something to read */ - if (bytes_avail_toread < buflen) { - spin_unlock_irqrestore(&inring_info->ring_lock, flags); + if (bytes_avail_toread < sizeof(desc)) { + /* + * No error is set when there is even no header, drivers are + * supposed to analyze buffer_actual_len. + */ + goto out_unlock; + } - return -EAGAIN; + next_read_location = hv_get_next_read_location(inring_info); + next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc, + sizeof(desc), + next_read_location); + + offset = raw ? 0 : (desc.offset8 << 3); + packetlen = (desc.len8 << 3) - offset; + *buffer_actual_len = packetlen; + *requestid = desc.trans_id; + + if (bytes_avail_toread < packetlen + offset) { + ret = -EAGAIN; + goto out_unlock; + } + + if (packetlen > buflen) { + ret = -ENOBUFS; + goto out_unlock; } next_read_location = @@ -534,7 +440,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer, next_read_location = hv_copyfrom_ringbuffer(inring_info, buffer, - buflen, + packetlen, next_read_location); next_read_location = hv_copyfrom_ringbuffer(inring_info, @@ -542,17 +448,19 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer, sizeof(u64), next_read_location); - /* Make sure all reads are done before we update the read index since */ - /* the writer may start writing to the read area once the read index */ - /*is updated */ + /* + * Make sure all reads are done before we update the read index since + * the writer may start writing to the read area once the read index + * is updated. + */ mb(); /* Update the read index */ hv_set_next_read_location(inring_info, next_read_location); - spin_unlock_irqrestore(&inring_info->ring_lock, flags); - *signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info); - return 0; +out_unlock: + spin_unlock_irqrestore(&inring_info->ring_lock, flags); + return ret; } diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index f19b6f7a467a..328e4c3808e0 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -47,7 +47,6 @@ static struct acpi_device *hv_acpi_dev; static struct tasklet_struct msg_dpc; static struct completion probe_event; -static int irq; static void hyperv_report_panic(struct pt_regs *regs) @@ -531,9 +530,9 @@ static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env) static const uuid_le null_guid; -static inline bool is_null_guid(const __u8 *guid) +static inline bool is_null_guid(const uuid_le *guid) { - if (memcmp(guid, &null_guid, sizeof(uuid_le))) + if (uuid_le_cmp(*guid, null_guid)) return false; return true; } @@ -544,10 +543,10 @@ static inline bool is_null_guid(const __u8 *guid) */ static const struct hv_vmbus_device_id *hv_vmbus_get_id( const struct hv_vmbus_device_id *id, - const __u8 *guid) + const uuid_le *guid) { - for (; !is_null_guid(id->guid); id++) - if (!memcmp(&id->guid, guid, sizeof(uuid_le))) + for (; !is_null_guid(&id->guid); id++) + if (!uuid_le_cmp(id->guid, *guid)) return id; return NULL; @@ -563,7 +562,7 @@ static int vmbus_match(struct device *device, struct device_driver *driver) struct hv_driver *drv = drv_to_hv_drv(driver); struct hv_device *hv_dev = device_to_hv_device(device); - if (hv_vmbus_get_id(drv->id_table, hv_dev->dev_type.b)) + if (hv_vmbus_get_id(drv->id_table, &hv_dev->dev_type)) return 1; return 0; @@ -580,7 +579,7 @@ static int vmbus_probe(struct device *child_device) struct hv_device *dev = device_to_hv_device(child_device); const struct hv_vmbus_device_id *dev_id; - dev_id = hv_vmbus_get_id(drv->id_table, dev->dev_type.b); + dev_id = hv_vmbus_get_id(drv->id_table, &dev->dev_type); if (drv->probe) { ret = drv->probe(dev, dev_id); if (ret != 0) @@ -602,23 +601,11 @@ static int vmbus_remove(struct device *child_device) { struct hv_driver *drv; struct hv_device *dev = device_to_hv_device(child_device); - u32 relid = dev->channel->offermsg.child_relid; if (child_device->driver) { drv = drv_to_hv_drv(child_device->driver); if (drv->remove) drv->remove(dev); - else { - hv_process_channel_removal(dev->channel, relid); - pr_err("remove not set for driver %s\n", - dev_name(child_device)); - } - } else { - /* - * We don't have a driver for this device; deal with the - * rescind message by removing the channel. - */ - hv_process_channel_removal(dev->channel, relid); } return 0; @@ -653,7 +640,10 @@ static void vmbus_shutdown(struct device *child_device) static void vmbus_device_release(struct device *device) { struct hv_device *hv_dev = device_to_hv_device(device); + struct vmbus_channel *channel = hv_dev->channel; + hv_process_channel_removal(channel, + channel->offermsg.child_relid); kfree(hv_dev); } @@ -835,10 +825,9 @@ static void vmbus_isr(void) * Here, we * - initialize the vmbus driver context * - invoke the vmbus hv main init routine - * - get the irq resource * - retrieve the channel offers */ -static int vmbus_bus_init(int irq) +static int vmbus_bus_init(void) { int ret; @@ -867,7 +856,7 @@ static int vmbus_bus_init(int irq) on_each_cpu(hv_synic_init, NULL, 1); ret = vmbus_connect(); if (ret) - goto err_alloc; + goto err_connect; if (vmbus_proto_version > VERSION_WIN7) cpu_hotplug_disable(); @@ -885,6 +874,8 @@ static int vmbus_bus_init(int irq) return 0; +err_connect: + on_each_cpu(hv_synic_cleanup, NULL, 1); err_alloc: hv_synic_free(); hv_remove_vmbus_irq(); @@ -1031,9 +1022,6 @@ static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx) struct resource **prev_res = NULL; switch (res->type) { - case ACPI_RESOURCE_TYPE_IRQ: - irq = res->data.irq.interrupts[0]; - return AE_OK; /* * "Address" descriptors are for bus windows. Ignore @@ -1075,12 +1063,28 @@ static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx) new_res->start = start; new_res->end = end; + /* + * Stick ranges from higher in address space at the front of the list. + * If two ranges are adjacent, merge them. + */ do { if (!*old_res) { *old_res = new_res; break; } + if (((*old_res)->end + 1) == new_res->start) { + (*old_res)->end = new_res->end; + kfree(new_res); + break; + } + + if ((*old_res)->start == new_res->end + 1) { + (*old_res)->start = new_res->start; + kfree(new_res); + break; + } + if ((*old_res)->end < new_res->start) { new_res->sibling = *old_res; if (prev_res) @@ -1191,6 +1195,23 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, } EXPORT_SYMBOL_GPL(vmbus_allocate_mmio); +/** + * vmbus_cpu_number_to_vp_number() - Map CPU to VP. + * @cpu_number: CPU number in Linux terms + * + * This function returns the mapping between the Linux processor + * number and the hypervisor's virtual processor number, useful + * in making hypercalls and such that talk about specific + * processors. + * + * Return: Virtual processor number in Hyper-V terms + */ +int vmbus_cpu_number_to_vp_number(int cpu_number) +{ + return hv_context.vp_index[cpu_number]; +} +EXPORT_SYMBOL_GPL(vmbus_cpu_number_to_vp_number); + static int vmbus_acpi_add(struct acpi_device *device) { acpi_status result; @@ -1275,7 +1296,7 @@ static int __init hv_acpi_init(void) init_completion(&probe_event); /* - * Get irq resources first. + * Get ACPI resources first. */ ret = acpi_bus_register_driver(&vmbus_acpi_driver); @@ -1288,12 +1309,7 @@ static int __init hv_acpi_init(void) goto cleanup; } - if (irq <= 0) { - ret = -ENODEV; - goto cleanup; - } - - ret = vmbus_bus_init(irq); + ret = vmbus_bus_init(); if (ret) goto cleanup; diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig index 6c8921140f02..c85935f3525a 100644 --- a/drivers/hwtracing/coresight/Kconfig +++ b/drivers/hwtracing/coresight/Kconfig @@ -8,7 +8,7 @@ menuconfig CORESIGHT This framework provides a kernel interface for the CoreSight debug and trace drivers to register themselves with. It's intended to build a topological view of the CoreSight components based on a DT - specification and configure the right serie of components when a + specification and configure the right series of components when a trace source gets enabled. if CORESIGHT diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c index e25492137d8b..93738dfbf631 100644 --- a/drivers/hwtracing/coresight/coresight.c +++ b/drivers/hwtracing/coresight/coresight.c @@ -548,7 +548,7 @@ static int coresight_name_match(struct device *dev, void *data) to_match = data; i_csdev = to_coresight_device(dev); - if (!strcmp(to_match, dev_name(&i_csdev->dev))) + if (to_match && !strcmp(to_match, dev_name(&i_csdev->dev))) return 1; return 0; diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c index e74e5d6e5f9f..c948866edf87 100644 --- a/drivers/input/serio/hyperv-keyboard.c +++ b/drivers/input/serio/hyperv-keyboard.c @@ -412,16 +412,6 @@ static int hv_kbd_remove(struct hv_device *hv_dev) return 0; } -/* - * Keyboard GUID - * {f912ad6d-2b17-48ea-bd65-f927a61c7684} - */ -#define HV_KBD_GUID \ - .guid = { \ - 0x6d, 0xad, 0x12, 0xf9, 0x17, 0x2b, 0xea, 0x48, \ - 0xbd, 0x65, 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84 \ - } - static const struct hv_vmbus_device_id id_table[] = { /* Keyboard guid */ { HV_KBD_GUID, }, diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index b2f2486b3d75..677d0362f334 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -657,7 +657,9 @@ out: * @file: pointer to file structure * @band: band bitmap * - * Return: poll mask + * Return: negative on error, + * 0 if it did no changes, + * and positive a process was added or deleted */ static int mei_fasync(int fd, struct file *file, int band) { @@ -665,7 +667,7 @@ static int mei_fasync(int fd, struct file *file, int band) struct mei_cl *cl = file->private_data; if (!mei_cl_is_connected(cl)) - return POLLERR; + return -ENODEV; return fasync_helper(fd, file, band, &cl->ev_async); } diff --git a/drivers/parport/share.c b/drivers/parport/share.c index 5ce5ef211bdb..3308427ed9f7 100644 --- a/drivers/parport/share.c +++ b/drivers/parport/share.c @@ -1,6 +1,6 @@ /* * Parallel-port resource manager code. - * + * * Authors: David Campbell <campbell@tirian.che.curtin.edu.au> * Tim Waugh <tim@cyberelk.demon.co.uk> * Jose Renau <renau@acm.org> @@ -54,16 +54,16 @@ static LIST_HEAD(drivers); static DEFINE_MUTEX(registration_lock); /* What you can do to a port that's gone away.. */ -static void dead_write_lines (struct parport *p, unsigned char b){} -static unsigned char dead_read_lines (struct parport *p) { return 0; } -static unsigned char dead_frob_lines (struct parport *p, unsigned char b, +static void dead_write_lines(struct parport *p, unsigned char b){} +static unsigned char dead_read_lines(struct parport *p) { return 0; } +static unsigned char dead_frob_lines(struct parport *p, unsigned char b, unsigned char c) { return 0; } -static void dead_onearg (struct parport *p){} -static void dead_initstate (struct pardevice *d, struct parport_state *s) { } -static void dead_state (struct parport *p, struct parport_state *s) { } -static size_t dead_write (struct parport *p, const void *b, size_t l, int f) +static void dead_onearg(struct parport *p){} +static void dead_initstate(struct pardevice *d, struct parport_state *s) { } +static void dead_state(struct parport *p, struct parport_state *s) { } +static size_t dead_write(struct parport *p, const void *b, size_t l, int f) { return 0; } -static size_t dead_read (struct parport *p, void *b, size_t l, int f) +static size_t dead_read(struct parport *p, void *b, size_t l, int f) { return 0; } static struct parport_operations dead_ops = { .write_data = dead_write_lines, /* data */ @@ -93,7 +93,7 @@ static struct parport_operations dead_ops = { .ecp_write_data = dead_write, /* ecp */ .ecp_read_data = dead_read, .ecp_write_addr = dead_write, - + .compat_write_data = dead_write, /* compat */ .nibble_read_data = dead_read, /* nibble */ .byte_read_data = dead_read, /* byte */ @@ -148,7 +148,7 @@ void parport_bus_exit(void) /* * iterates through all the drivers registered with the bus and sends the port * details to the match_port callback of the driver, so that the driver can - * know about the new port that just regsitered with the bus and decide if it + * know about the new port that just registered with the bus and decide if it * wants to use this new port. */ static int driver_check(struct device_driver *dev_drv, void *_port) @@ -194,7 +194,7 @@ static void detach_driver_chain(struct parport *port) struct parport_driver *drv; /* caller has exclusive registration_lock */ list_for_each_entry(drv, &drivers, list) - drv->detach (port); + drv->detach(port); /* * call the detach function of the drivers registered in @@ -205,11 +205,13 @@ static void detach_driver_chain(struct parport *port) } /* Ask kmod for some lowlevel drivers. */ -static void get_lowlevel_driver (void) +static void get_lowlevel_driver(void) { - /* There is no actual module called this: you should set - * up an alias for modutils. */ - request_module ("parport_lowlevel"); + /* + * There is no actual module called this: you should set + * up an alias for modutils. + */ + request_module("parport_lowlevel"); } /* @@ -265,7 +267,7 @@ int __parport_register_driver(struct parport_driver *drv, struct module *owner, const char *mod_name) { if (list_empty(&portlist)) - get_lowlevel_driver (); + get_lowlevel_driver(); if (drv->devmodel) { /* using device model */ @@ -328,7 +330,7 @@ static int port_detach(struct device *dev, void *_drv) * finished by the time this function returns. **/ -void parport_unregister_driver (struct parport_driver *drv) +void parport_unregister_driver(struct parport_driver *drv) { struct parport *port; @@ -343,6 +345,7 @@ void parport_unregister_driver (struct parport_driver *drv) } mutex_unlock(®istration_lock); } +EXPORT_SYMBOL(parport_unregister_driver); static void free_port(struct device *dev) { @@ -372,12 +375,13 @@ static void free_port(struct device *dev) * until the matching parport_put_port() call. **/ -struct parport *parport_get_port (struct parport *port) +struct parport *parport_get_port(struct parport *port) { struct device *dev = get_device(&port->bus_dev); return to_parport_dev(dev); } +EXPORT_SYMBOL(parport_get_port); void parport_del_port(struct parport *port) { @@ -394,10 +398,11 @@ EXPORT_SYMBOL(parport_del_port); * zero (port is no longer used), free_port is called. **/ -void parport_put_port (struct parport *port) +void parport_put_port(struct parport *port) { put_device(&port->bus_dev); } +EXPORT_SYMBOL(parport_put_port); /** * parport_register_port - register a parallel port @@ -439,10 +444,8 @@ struct parport *parport_register_port(unsigned long base, int irq, int dma, int ret; tmp = kzalloc(sizeof(struct parport), GFP_KERNEL); - if (!tmp) { - printk(KERN_WARNING "parport: memory squeeze\n"); + if (!tmp) return NULL; - } /* Init our structure */ tmp->base = base; @@ -450,12 +453,12 @@ struct parport *parport_register_port(unsigned long base, int irq, int dma, tmp->dma = dma; tmp->muxport = tmp->daisy = tmp->muxsel = -1; tmp->modes = 0; - INIT_LIST_HEAD(&tmp->list); + INIT_LIST_HEAD(&tmp->list); tmp->devices = tmp->cad = NULL; tmp->flags = 0; tmp->ops = ops; tmp->physport = tmp; - memset (tmp->probe_info, 0, 5 * sizeof (struct parport_device_info)); + memset(tmp->probe_info, 0, 5 * sizeof(struct parport_device_info)); rwlock_init(&tmp->cad_lock); spin_lock_init(&tmp->waitlist_lock); spin_lock_init(&tmp->pardevice_lock); @@ -463,12 +466,11 @@ struct parport *parport_register_port(unsigned long base, int irq, int dma, tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE; sema_init(&tmp->ieee1284.irq, 0); tmp->spintime = parport_default_spintime; - atomic_set (&tmp->ref_count, 1); + atomic_set(&tmp->ref_count, 1); INIT_LIST_HEAD(&tmp->full_list); name = kmalloc(15, GFP_KERNEL); if (!name) { - printk(KERN_ERR "parport: memory squeeze\n"); kfree(tmp); return NULL; } @@ -508,6 +510,7 @@ struct parport *parport_register_port(unsigned long base, int irq, int dma, return tmp; } +EXPORT_SYMBOL(parport_register_port); /** * parport_announce_port - tell device drivers about a parallel port @@ -521,7 +524,7 @@ struct parport *parport_register_port(unsigned long base, int irq, int dma, * functions will be called, with @port as the parameter. **/ -void parport_announce_port (struct parport *port) +void parport_announce_port(struct parport *port) { int i; @@ -531,9 +534,8 @@ void parport_announce_port (struct parport *port) #endif if (!port->dev) - printk(KERN_WARNING "%s: fix this legacy " - "no-device port driver!\n", - port->name); + printk(KERN_WARNING "%s: fix this legacy no-device port driver!\n", + port->name); parport_proc_register(port); mutex_lock(®istration_lock); @@ -547,7 +549,7 @@ void parport_announce_port (struct parport *port) spin_unlock_irq(&parportlist_lock); /* Let drivers know that new port(s) has arrived. */ - attach_driver_chain (port); + attach_driver_chain(port); for (i = 1; i < 3; i++) { struct parport *slave = port->slaves[i-1]; if (slave) @@ -555,6 +557,7 @@ void parport_announce_port (struct parport *port) } mutex_unlock(®istration_lock); } +EXPORT_SYMBOL(parport_announce_port); /** * parport_remove_port - deregister a parallel port @@ -582,7 +585,7 @@ void parport_remove_port(struct parport *port) mutex_lock(®istration_lock); /* Spread the word. */ - detach_driver_chain (port); + detach_driver_chain(port); #ifdef CONFIG_PARPORT_1284 /* Forget the IEEE1284.3 topology of the port. */ @@ -616,6 +619,7 @@ void parport_remove_port(struct parport *port) parport_put_port(slave); } } +EXPORT_SYMBOL(parport_remove_port); /** * parport_register_device - register a device on a parallel port @@ -689,14 +693,14 @@ void parport_remove_port(struct parport *port) struct pardevice * parport_register_device(struct parport *port, const char *name, int (*pf)(void *), void (*kf)(void *), - void (*irq_func)(void *), + void (*irq_func)(void *), int flags, void *handle) { struct pardevice *tmp; if (port->physport->flags & PARPORT_FLAG_EXCL) { /* An exclusive device is registered. */ - printk (KERN_DEBUG "%s: no more devices allowed\n", + printk(KERN_DEBUG "%s: no more devices allowed\n", port->name); return NULL; } @@ -722,28 +726,24 @@ parport_register_device(struct parport *port, const char *name, } } - /* We up our own module reference count, and that of the port - on which a device is to be registered, to ensure that - neither of us gets unloaded while we sleep in (e.g.) - kmalloc. - */ - if (!try_module_get(port->ops->owner)) { + /* + * We up our own module reference count, and that of the port + * on which a device is to be registered, to ensure that + * neither of us gets unloaded while we sleep in (e.g.) + * kmalloc. + */ + if (!try_module_get(port->ops->owner)) return NULL; - } - - parport_get_port (port); + + parport_get_port(port); tmp = kmalloc(sizeof(struct pardevice), GFP_KERNEL); - if (tmp == NULL) { - printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name); + if (!tmp) goto out; - } tmp->state = kmalloc(sizeof(struct parport_state), GFP_KERNEL); - if (tmp->state == NULL) { - printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name); + if (!tmp->state) goto out_free_pardevice; - } tmp->name = name; tmp->port = port; @@ -767,19 +767,21 @@ parport_register_device(struct parport *port, const char *name, if (flags & PARPORT_DEV_EXCL) { if (port->physport->devices) { - spin_unlock (&port->physport->pardevice_lock); - printk (KERN_DEBUG - "%s: cannot grant exclusive access for " - "device %s\n", port->name, name); + spin_unlock(&port->physport->pardevice_lock); + printk(KERN_DEBUG + "%s: cannot grant exclusive access for device %s\n", + port->name, name); goto out_free_all; } port->flags |= PARPORT_FLAG_EXCL; } tmp->next = port->physport->devices; - wmb(); /* Make sure that tmp->next is written before it's - added to the list; see comments marked 'no locking - required' */ + wmb(); /* + * Make sure that tmp->next is written before it's + * added to the list; see comments marked 'no locking + * required' + */ if (port->physport->devices) port->physport->devices->prev = tmp; port->physport->devices = tmp; @@ -805,11 +807,12 @@ parport_register_device(struct parport *port, const char *name, out_free_pardevice: kfree(tmp); out: - parport_put_port (port); + parport_put_port(port); module_put(port->ops->owner); return NULL; } +EXPORT_SYMBOL(parport_register_device); static void free_pardevice(struct device *dev) { @@ -968,7 +971,7 @@ void parport_unregister_device(struct pardevice *dev) struct parport *port; #ifdef PARPORT_PARANOID - if (dev == NULL) { + if (!dev) { printk(KERN_ERR "parport_unregister_device: passed NULL\n"); return; } @@ -985,7 +988,7 @@ void parport_unregister_device(struct pardevice *dev) if (port->cad == dev) { printk(KERN_DEBUG "%s: %s forgot to release port\n", port->name, dev->name); - parport_release (dev); + parport_release(dev); } spin_lock(&port->pardevice_lock); @@ -1001,8 +1004,10 @@ void parport_unregister_device(struct pardevice *dev) spin_unlock(&port->pardevice_lock); - /* Make sure we haven't left any pointers around in the wait - * list. */ + /* + * Make sure we haven't left any pointers around in the wait + * list. + */ spin_lock_irq(&port->waitlist_lock); if (dev->waitprev || dev->waitnext || port->waithead == dev) { if (dev->waitprev) @@ -1023,8 +1028,9 @@ void parport_unregister_device(struct pardevice *dev) kfree(dev); module_put(port->ops->owner); - parport_put_port (port); + parport_put_port(port); } +EXPORT_SYMBOL(parport_unregister_device); /** * parport_find_number - find a parallel port by number @@ -1038,23 +1044,24 @@ void parport_unregister_device(struct pardevice *dev) * gives you, use parport_put_port(). */ -struct parport *parport_find_number (int number) +struct parport *parport_find_number(int number) { struct parport *port, *result = NULL; if (list_empty(&portlist)) - get_lowlevel_driver (); + get_lowlevel_driver(); - spin_lock (&parportlist_lock); + spin_lock(&parportlist_lock); list_for_each_entry(port, &portlist, list) { if (port->number == number) { - result = parport_get_port (port); + result = parport_get_port(port); break; } } - spin_unlock (&parportlist_lock); + spin_unlock(&parportlist_lock); return result; } +EXPORT_SYMBOL(parport_find_number); /** * parport_find_base - find a parallel port by base address @@ -1068,23 +1075,24 @@ struct parport *parport_find_number (int number) * gives you, use parport_put_port(). */ -struct parport *parport_find_base (unsigned long base) +struct parport *parport_find_base(unsigned long base) { struct parport *port, *result = NULL; if (list_empty(&portlist)) - get_lowlevel_driver (); + get_lowlevel_driver(); - spin_lock (&parportlist_lock); + spin_lock(&parportlist_lock); list_for_each_entry(port, &portlist, list) { if (port->base == base) { - result = parport_get_port (port); + result = parport_get_port(port); break; } } - spin_unlock (&parportlist_lock); + spin_unlock(&parportlist_lock); return result; } +EXPORT_SYMBOL(parport_find_base); /** * parport_claim - claim access to a parallel port device @@ -1111,8 +1119,9 @@ int parport_claim(struct pardevice *dev) } /* Preempt any current device */ - write_lock_irqsave (&port->cad_lock, flags); - if ((oldcad = port->cad) != NULL) { + write_lock_irqsave(&port->cad_lock, flags); + oldcad = port->cad; + if (oldcad) { if (oldcad->preempt) { if (oldcad->preempt(oldcad->private)) goto blocked; @@ -1121,8 +1130,10 @@ int parport_claim(struct pardevice *dev) goto blocked; if (port->cad != oldcad) { - /* I think we'll actually deadlock rather than - get here, but just in case.. */ + /* + * I think we'll actually deadlock rather than + * get here, but just in case.. + */ printk(KERN_WARNING "%s: %s released port when preempted!\n", port->name, oldcad->name); @@ -1136,7 +1147,7 @@ int parport_claim(struct pardevice *dev) dev->waiting = 0; /* Take ourselves out of the wait list again. */ - spin_lock_irq (&port->waitlist_lock); + spin_lock_irq(&port->waitlist_lock); if (dev->waitprev) dev->waitprev->waitnext = dev->waitnext; else @@ -1145,7 +1156,7 @@ int parport_claim(struct pardevice *dev) dev->waitnext->waitprev = dev->waitprev; else port->waittail = dev->waitprev; - spin_unlock_irq (&port->waitlist_lock); + spin_unlock_irq(&port->waitlist_lock); dev->waitprev = dev->waitnext = NULL; } @@ -1162,7 +1173,7 @@ int parport_claim(struct pardevice *dev) /* If it's a daisy chain device, select it. */ if (dev->daisy >= 0) { /* This could be lazier. */ - if (!parport_daisy_select (port, dev->daisy, + if (!parport_daisy_select(port, dev->daisy, IEEE1284_MODE_COMPAT)) port->daisy = dev->daisy; } @@ -1175,13 +1186,15 @@ int parport_claim(struct pardevice *dev) return 0; blocked: - /* If this is the first time we tried to claim the port, register an - interest. This is only allowed for devices sleeping in - parport_claim_or_block(), or those with a wakeup function. */ + /* + * If this is the first time we tried to claim the port, register an + * interest. This is only allowed for devices sleeping in + * parport_claim_or_block(), or those with a wakeup function. + */ /* The cad_lock is still held for writing here */ if (dev->waiting & 2 || dev->wakeup) { - spin_lock (&port->waitlist_lock); + spin_lock(&port->waitlist_lock); if (test_and_set_bit(0, &dev->waiting) == 0) { /* First add ourselves to the end of the wait list. */ dev->waitnext = NULL; @@ -1192,11 +1205,12 @@ blocked: } else port->waithead = port->waittail = dev; } - spin_unlock (&port->waitlist_lock); + spin_unlock(&port->waitlist_lock); } - write_unlock_irqrestore (&port->cad_lock, flags); + write_unlock_irqrestore(&port->cad_lock, flags); return -EAGAIN; } +EXPORT_SYMBOL(parport_claim); /** * parport_claim_or_block - claim access to a parallel port device @@ -1212,8 +1226,10 @@ int parport_claim_or_block(struct pardevice *dev) { int r; - /* Signal to parport_claim() that we can wait even without a - wakeup function. */ + /* + * Signal to parport_claim() that we can wait even without a + * wakeup function. + */ dev->waiting = 2; /* Try to claim the port. If this fails, we need to sleep. */ @@ -1231,14 +1247,15 @@ int parport_claim_or_block(struct pardevice *dev) * See also parport_release() */ - /* If dev->waiting is clear now, an interrupt - gave us the port and we would deadlock if we slept. */ + /* + * If dev->waiting is clear now, an interrupt + * gave us the port and we would deadlock if we slept. + */ if (dev->waiting) { wait_event_interruptible(dev->wait_q, !dev->waiting); - if (signal_pending (current)) { + if (signal_pending(current)) return -EINTR; - } r = 1; } else { r = 0; @@ -1250,15 +1267,15 @@ int parport_claim_or_block(struct pardevice *dev) #ifdef PARPORT_DEBUG_SHARING if (dev->port->physport->cad != dev) - printk(KERN_DEBUG "%s: exiting parport_claim_or_block " - "but %s owns port!\n", dev->name, - dev->port->physport->cad ? + printk(KERN_DEBUG "%s: exiting parport_claim_or_block but %s owns port!\n", + dev->name, dev->port->physport->cad ? dev->port->physport->cad->name:"nobody"); #endif } dev->waiting = 0; return r; } +EXPORT_SYMBOL(parport_claim_or_block); /** * parport_release - give up access to a parallel port device @@ -1278,9 +1295,9 @@ void parport_release(struct pardevice *dev) /* Make sure that dev is the current device */ write_lock_irqsave(&port->cad_lock, flags); if (port->cad != dev) { - write_unlock_irqrestore (&port->cad_lock, flags); - printk(KERN_WARNING "%s: %s tried to release parport " - "when not owner\n", port->name, dev->name); + write_unlock_irqrestore(&port->cad_lock, flags); + printk(KERN_WARNING "%s: %s tried to release parport when not owner\n", + port->name, dev->name); return; } @@ -1293,7 +1310,7 @@ void parport_release(struct pardevice *dev) /* If this is a daisy device, deselect it. */ if (dev->daisy >= 0) { - parport_daisy_deselect_all (port); + parport_daisy_deselect_all(port); port->daisy = -1; } #endif @@ -1304,8 +1321,10 @@ void parport_release(struct pardevice *dev) /* Save control registers */ port->ops->save_state(port, dev->state); - /* If anybody is waiting, find out who's been there longest and - then wake them up. (Note: no locking required) */ + /* + * If anybody is waiting, find out who's been there longest and + * then wake them up. (Note: no locking required) + */ /* !!! LOCKING IS NEEDED HERE */ for (pd = port->waithead; pd; pd = pd->waitnext) { if (pd->waiting & 2) { /* sleeping in claim_or_block */ @@ -1322,14 +1341,17 @@ void parport_release(struct pardevice *dev) } } - /* Nobody was waiting, so walk the list to see if anyone is - interested in being woken up. (Note: no locking required) */ + /* + * Nobody was waiting, so walk the list to see if anyone is + * interested in being woken up. (Note: no locking required) + */ /* !!! LOCKING IS NEEDED HERE */ - for (pd = port->devices; (port->cad == NULL) && pd; pd = pd->next) { + for (pd = port->devices; !port->cad && pd; pd = pd->next) { if (pd->wakeup && pd != dev) pd->wakeup(pd->private); } } +EXPORT_SYMBOL(parport_release); irqreturn_t parport_irq_handler(int irq, void *dev_id) { @@ -1339,22 +1361,6 @@ irqreturn_t parport_irq_handler(int irq, void *dev_id) return IRQ_HANDLED; } - -/* Exported symbols for modules. */ - -EXPORT_SYMBOL(parport_claim); -EXPORT_SYMBOL(parport_claim_or_block); -EXPORT_SYMBOL(parport_release); -EXPORT_SYMBOL(parport_register_port); -EXPORT_SYMBOL(parport_announce_port); -EXPORT_SYMBOL(parport_remove_port); -EXPORT_SYMBOL(parport_unregister_driver); -EXPORT_SYMBOL(parport_register_device); -EXPORT_SYMBOL(parport_unregister_device); -EXPORT_SYMBOL(parport_get_port); -EXPORT_SYMBOL(parport_put_port); -EXPORT_SYMBOL(parport_find_number); -EXPORT_SYMBOL(parport_find_base); EXPORT_SYMBOL(parport_irq_handler); MODULE_LICENSE("GPL"); diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 8fdc17b84739..753dbad0bf94 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -141,8 +141,6 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi, { u32 read_loc, write_loc, dsize; - smp_read_barrier_depends(); - /* Capture the read/write indices before they changed */ read_loc = rbi->ring_buffer->read_index; write_loc = rbi->ring_buffer->write_index; @@ -630,6 +628,11 @@ struct hv_input_signal_event_buffer { struct hv_input_signal_event event; }; +enum hv_signal_policy { + HV_SIGNAL_POLICY_DEFAULT = 0, + HV_SIGNAL_POLICY_EXPLICIT, +}; + struct vmbus_channel { /* Unique channel id */ int id; @@ -757,8 +760,21 @@ struct vmbus_channel { * link up channels based on their CPU affinity. */ struct list_head percpu_list; + /* + * Host signaling policy: The default policy will be + * based on the ring buffer state. We will also support + * a policy where the client driver can have explicit + * signaling control. + */ + enum hv_signal_policy signal_policy; }; +static inline void set_channel_signal_state(struct vmbus_channel *c, + enum hv_signal_policy policy) +{ + c->signal_policy = policy; +} + static inline void set_channel_read_state(struct vmbus_channel *c, bool state) { c->batched_reading = state; @@ -983,16 +999,8 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, resource_size_t size, resource_size_t align, bool fb_overlap_ok); -/** - * VMBUS_DEVICE - macro used to describe a specific hyperv vmbus device - * - * This macro is used to create a struct hv_vmbus_device_id that matches a - * specific device. - */ -#define VMBUS_DEVICE(g0, g1, g2, g3, g4, g5, g6, g7, \ - g8, g9, ga, gb, gc, gd, ge, gf) \ - .guid = { g0, g1, g2, g3, g4, g5, g6, g7, \ - g8, g9, ga, gb, gc, gd, ge, gf }, +int vmbus_cpu_number_to_vp_number(int cpu_number); +u64 hv_do_hypercall(u64 control, void *input, void *output); /* * GUID definitions of various offer types - services offered to the guest. @@ -1003,118 +1011,102 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, * {f8615163-df3e-46c5-913f-f2d2f965ed0e} */ #define HV_NIC_GUID \ - .guid = { \ - 0x63, 0x51, 0x61, 0xf8, 0x3e, 0xdf, 0xc5, 0x46, \ - 0x91, 0x3f, 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e \ - } + .guid = UUID_LE(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \ + 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e) /* * IDE GUID * {32412632-86cb-44a2-9b5c-50d1417354f5} */ #define HV_IDE_GUID \ - .guid = { \ - 0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44, \ - 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5 \ - } + .guid = UUID_LE(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \ + 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5) /* * SCSI GUID * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */ #define HV_SCSI_GUID \ - .guid = { \ - 0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d, \ - 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f \ - } + .guid = UUID_LE(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \ + 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f) /* * Shutdown GUID * {0e0b6031-5213-4934-818b-38d90ced39db} */ #define HV_SHUTDOWN_GUID \ - .guid = { \ - 0x31, 0x60, 0x0b, 0x0e, 0x13, 0x52, 0x34, 0x49, \ - 0x81, 0x8b, 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb \ - } + .guid = UUID_LE(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \ + 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb) /* * Time Synch GUID * {9527E630-D0AE-497b-ADCE-E80AB0175CAF} */ #define HV_TS_GUID \ - .guid = { \ - 0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49, \ - 0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf \ - } + .guid = UUID_LE(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \ + 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf) /* * Heartbeat GUID * {57164f39-9115-4e78-ab55-382f3bd5422d} */ #define HV_HEART_BEAT_GUID \ - .guid = { \ - 0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e, \ - 0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d \ - } + .guid = UUID_LE(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \ + 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d) /* * KVP GUID * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6} */ #define HV_KVP_GUID \ - .guid = { \ - 0xe7, 0xf4, 0xa0, 0xa9, 0x45, 0x5a, 0x96, 0x4d, \ - 0xb8, 0x27, 0x8a, 0x84, 0x1e, 0x8c, 0x3, 0xe6 \ - } + .guid = UUID_LE(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \ + 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6) /* * Dynamic memory GUID * {525074dc-8985-46e2-8057-a307dc18a502} */ #define HV_DM_GUID \ - .guid = { \ - 0xdc, 0x74, 0x50, 0X52, 0x85, 0x89, 0xe2, 0x46, \ - 0x80, 0x57, 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02 \ - } + .guid = UUID_LE(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \ + 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02) /* * Mouse GUID * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a} */ #define HV_MOUSE_GUID \ - .guid = { \ - 0x9e, 0xb6, 0xa8, 0xcf, 0x4a, 0x5b, 0xc0, 0x4c, \ - 0xb9, 0x8b, 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a \ - } + .guid = UUID_LE(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \ + 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a) + +/* + * Keyboard GUID + * {f912ad6d-2b17-48ea-bd65-f927a61c7684} + */ +#define HV_KBD_GUID \ + .guid = UUID_LE(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \ + 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84) /* * VSS (Backup/Restore) GUID */ #define HV_VSS_GUID \ - .guid = { \ - 0x29, 0x2e, 0xfa, 0x35, 0x23, 0xea, 0x36, 0x42, \ - 0x96, 0xae, 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40 \ - } + .guid = UUID_LE(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \ + 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40) /* * Synthetic Video GUID * {DA0A7802-E377-4aac-8E77-0558EB1073F8} */ #define HV_SYNTHVID_GUID \ - .guid = { \ - 0x02, 0x78, 0x0a, 0xda, 0x77, 0xe3, 0xac, 0x4a, \ - 0x8e, 0x77, 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8 \ - } + .guid = UUID_LE(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \ + 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8) /* * Synthetic FC GUID * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda} */ #define HV_SYNTHFC_GUID \ - .guid = { \ - 0x4A, 0xCC, 0x9B, 0x2F, 0x69, 0x00, 0xF3, 0x4A, \ - 0xB7, 0x6B, 0x6F, 0xD0, 0xBE, 0x52, 0x8C, 0xDA \ - } + .guid = UUID_LE(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \ + 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda) /* * Guest File Copy Service @@ -1122,20 +1114,25 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, */ #define HV_FCOPY_GUID \ - .guid = { \ - 0xE3, 0x4B, 0xD1, 0x34, 0xE4, 0xDE, 0xC8, 0x41, \ - 0x9A, 0xE7, 0x6B, 0x17, 0x49, 0x77, 0xC1, 0x92 \ - } + .guid = UUID_LE(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \ + 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92) /* * NetworkDirect. This is the guest RDMA service. * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501} */ #define HV_ND_GUID \ - .guid = { \ - 0x3d, 0xaf, 0x2e, 0x8c, 0xa7, 0x32, 0x09, 0x4b, \ - 0xab, 0x99, 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01 \ - } + .guid = UUID_LE(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \ + 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01) + +/* + * PCI Express Pass Through + * {44C4F61D-4444-4400-9D52-802E27EDE19F} + */ + +#define HV_PCIE_GUID \ + .guid = UUID_LE(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \ + 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f) /* * Common header for Hyper-V ICs diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 64f36e09a790..6e4c645e1c0d 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -404,7 +404,7 @@ struct virtio_device_id { * For Hyper-V devices we use the device guid as the id. */ struct hv_vmbus_device_id { - __u8 guid[16]; + uuid_le guid; kernel_ulong_t driver_data; /* Data private to the driver */ }; diff --git a/include/uapi/linux/hyperv.h b/include/uapi/linux/hyperv.h index e4c0a35d6417..e347b24ef9fb 100644 --- a/include/uapi/linux/hyperv.h +++ b/include/uapi/linux/hyperv.h @@ -313,6 +313,7 @@ enum hv_kvp_exchg_pool { #define HV_INVALIDARG 0x80070057 #define HV_GUID_NOTFOUND 0x80041002 #define HV_ERROR_ALREADY_EXISTS 0x80070050 +#define HV_ERROR_DISK_FULL 0x80070070 #define ADDR_FAMILY_NONE 0x00 #define ADDR_FAMILY_IPV4 0x01 diff --git a/scripts/checkkconfigsymbols.py b/scripts/checkkconfigsymbols.py index 2f4b7ffd5570..d8f6c094cce5 100755 --- a/scripts/checkkconfigsymbols.py +++ b/scripts/checkkconfigsymbols.py @@ -8,11 +8,14 @@ # Licensed under the terms of the GNU GPL License version 2 +import difflib import os import re +import signal import sys -from subprocess import Popen, PIPE, STDOUT +from multiprocessing import Pool, cpu_count from optparse import OptionParser +from subprocess import Popen, PIPE, STDOUT # regex expressions @@ -26,7 +29,7 @@ SOURCE_FEATURE = r"(?:\W|\b)+[D]{,1}CONFIG_(" + FEATURE + r")" # regex objects REGEX_FILE_KCONFIG = re.compile(r".*Kconfig[\.\w+\-]*$") -REGEX_FEATURE = re.compile(r'(?!\B"[^"]*)' + FEATURE + r'(?![^"]*"\B)') +REGEX_FEATURE = re.compile(r'(?!\B)' + FEATURE + r'(?!\B)') REGEX_SOURCE_FEATURE = re.compile(SOURCE_FEATURE) REGEX_KCONFIG_DEF = re.compile(DEF) REGEX_KCONFIG_EXPR = re.compile(EXPR) @@ -34,6 +37,7 @@ REGEX_KCONFIG_STMT = re.compile(STMT) REGEX_KCONFIG_HELP = re.compile(r"^\s+(help|---help---)\s*$") REGEX_FILTER_FEATURES = re.compile(r"[A-Za-z0-9]$") REGEX_NUMERIC = re.compile(r"0[xX][0-9a-fA-F]+|[0-9]+") +REGEX_QUOTES = re.compile("(\"(.*?)\")") def parse_options(): @@ -71,6 +75,9 @@ def parse_options(): "the pattern needs to be a Python regex. To " "ignore defconfigs, specify -i '.*defconfig'.") + parser.add_option('-s', '--sim', dest='sim', action='store', default="", + help="Print a list of maximum 10 string-similar symbols.") + parser.add_option('', '--force', dest='force', action='store_true', default=False, help="Reset current Git tree even when it's dirty.") @@ -109,6 +116,18 @@ def main(): """Main function of this module.""" opts = parse_options() + if opts.sim and not opts.commit and not opts.diff: + sims = find_sims(opts.sim, opts.ignore) + if sims: + print "%s: %s" % (yel("Similar symbols"), ', '.join(sims)) + else: + print "%s: no similar symbols found" % yel("Similar symbols") + sys.exit(0) + + # dictionary of (un)defined symbols + defined = {} + undefined = {} + if opts.commit or opts.diff: head = get_head() @@ -127,40 +146,56 @@ def main(): # get undefined items before the commit execute("git reset --hard %s" % commit_a) - undefined_a = check_symbols(opts.ignore) + undefined_a, _ = check_symbols(opts.ignore) # get undefined items for the commit execute("git reset --hard %s" % commit_b) - undefined_b = check_symbols(opts.ignore) + undefined_b, defined = check_symbols(opts.ignore) # report cases that are present for the commit but not before for feature in sorted(undefined_b): # feature has not been undefined before if not feature in undefined_a: files = sorted(undefined_b.get(feature)) - print "%s\t%s" % (yel(feature), ", ".join(files)) - if opts.find: - commits = find_commits(feature, opts.diff) - print red(commits) + undefined[feature] = files # check if there are new files that reference the undefined feature else: files = sorted(undefined_b.get(feature) - undefined_a.get(feature)) if files: - print "%s\t%s" % (yel(feature), ", ".join(files)) - if opts.find: - commits = find_commits(feature, opts.diff) - print red(commits) + undefined[feature] = files # reset to head execute("git reset --hard %s" % head) # default to check the entire tree else: - undefined = check_symbols(opts.ignore) - for feature in sorted(undefined): - files = sorted(undefined.get(feature)) - print "%s\t%s" % (yel(feature), ", ".join(files)) + undefined, defined = check_symbols(opts.ignore) + + # now print the output + for feature in sorted(undefined): + print red(feature) + + files = sorted(undefined.get(feature)) + print "%s: %s" % (yel("Referencing files"), ", ".join(files)) + + sims = find_sims(feature, opts.ignore, defined) + sims_out = yel("Similar symbols") + if sims: + print "%s: %s" % (sims_out, ', '.join(sims)) + else: + print "%s: %s" % (sims_out, "no similar symbols found") + + if opts.find: + print "%s:" % yel("Commits changing symbol") + commits = find_commits(feature, opts.diff) + if commits: + for commit in commits: + commit = commit.split(" ", 1) + print "\t- %s (\"%s\")" % (yel(commit[0]), commit[1]) + else: + print "\t- no commit found" + print # new line def yel(string): @@ -190,7 +225,7 @@ def find_commits(symbol, diff): """Find commits changing %symbol in the given range of %diff.""" commits = execute("git log --pretty=oneline --abbrev-commit -G %s %s" % (symbol, diff)) - return commits + return [x for x in commits.split("\n") if x] def tree_is_dirty(): @@ -209,43 +244,107 @@ def get_head(): return stdout.strip('\n') -def check_symbols(ignore): - """Find undefined Kconfig symbols and return a dict with the symbol as key - and a list of referencing files as value. Files matching %ignore are not - checked for undefined symbols.""" - source_files = [] - kconfig_files = [] - defined_features = set() - referenced_features = dict() # {feature: [files]} +def partition(lst, size): + """Partition list @lst into eveni-sized lists of size @size.""" + return [lst[i::size] for i in xrange(size)] + + +def init_worker(): + """Set signal handler to ignore SIGINT.""" + signal.signal(signal.SIGINT, signal.SIG_IGN) + + +def find_sims(symbol, ignore, defined = []): + """Return a list of max. ten Kconfig symbols that are string-similar to + @symbol.""" + if defined: + return sorted(difflib.get_close_matches(symbol, set(defined), 10)) + + pool = Pool(cpu_count(), init_worker) + kfiles = [] + for gitfile in get_files(): + if REGEX_FILE_KCONFIG.match(gitfile): + kfiles.append(gitfile) + arglist = [] + for part in partition(kfiles, cpu_count()): + arglist.append((part, ignore)) + + for res in pool.map(parse_kconfig_files, arglist): + defined.extend(res[0]) + + return sorted(difflib.get_close_matches(symbol, set(defined), 10)) + + +def get_files(): + """Return a list of all files in the current git directory.""" # use 'git ls-files' to get the worklist stdout = execute("git ls-files") if len(stdout) > 0 and stdout[-1] == "\n": stdout = stdout[:-1] + files = [] for gitfile in stdout.rsplit("\n"): if ".git" in gitfile or "ChangeLog" in gitfile or \ ".log" in gitfile or os.path.isdir(gitfile) or \ gitfile.startswith("tools/"): continue + files.append(gitfile) + return files + + +def check_symbols(ignore): + """Find undefined Kconfig symbols and return a dict with the symbol as key + and a list of referencing files as value. Files matching %ignore are not + checked for undefined symbols.""" + pool = Pool(cpu_count(), init_worker) + try: + return check_symbols_helper(pool, ignore) + except KeyboardInterrupt: + pool.terminate() + pool.join() + sys.exit(1) + + +def check_symbols_helper(pool, ignore): + """Helper method for check_symbols(). Used to catch keyboard interrupts in + check_symbols() in order to properly terminate running worker processes.""" + source_files = [] + kconfig_files = [] + defined_features = [] + referenced_features = dict() # {file: [features]} + + for gitfile in get_files(): if REGEX_FILE_KCONFIG.match(gitfile): kconfig_files.append(gitfile) else: - # all non-Kconfig files are checked for consistency + if ignore and not re.match(ignore, gitfile): + continue + # add source files that do not match the ignore pattern source_files.append(gitfile) - for sfile in source_files: - if ignore and re.match(ignore, sfile): - # do not check files matching %ignore - continue - parse_source_file(sfile, referenced_features) + # parse source files + arglist = partition(source_files, cpu_count()) + for res in pool.map(parse_source_files, arglist): + referenced_features.update(res) - for kfile in kconfig_files: - if ignore and re.match(ignore, kfile): - # do not collect references for files matching %ignore - parse_kconfig_file(kfile, defined_features, dict()) - else: - parse_kconfig_file(kfile, defined_features, referenced_features) + + # parse kconfig files + arglist = [] + for part in partition(kconfig_files, cpu_count()): + arglist.append((part, ignore)) + for res in pool.map(parse_kconfig_files, arglist): + defined_features.extend(res[0]) + referenced_features.update(res[1]) + defined_features = set(defined_features) + + # inverse mapping of referenced_features to dict(feature: [files]) + inv_map = dict() + for _file, features in referenced_features.iteritems(): + for feature in features: + inv_map[feature] = inv_map.get(feature, set()) + inv_map[feature].add(_file) + referenced_features = inv_map undefined = {} # {feature: [files]} for feature in sorted(referenced_features): @@ -259,12 +358,26 @@ def check_symbols(ignore): if feature[:-len("_MODULE")] in defined_features: continue undefined[feature] = referenced_features.get(feature) - return undefined + return undefined, defined_features -def parse_source_file(sfile, referenced_features): - """Parse @sfile for referenced Kconfig features.""" +def parse_source_files(source_files): + """Parse each source file in @source_files and return dictionary with source + files as keys and lists of references Kconfig symbols as values.""" + referenced_features = dict() + for sfile in source_files: + referenced_features[sfile] = parse_source_file(sfile) + return referenced_features + + +def parse_source_file(sfile): + """Parse @sfile and return a list of referenced Kconfig features.""" lines = [] + references = [] + + if not os.path.exists(sfile): + return references + with open(sfile, "r") as stream: lines = stream.readlines() @@ -275,9 +388,9 @@ def parse_source_file(sfile, referenced_features): for feature in features: if not REGEX_FILTER_FEATURES.search(feature): continue - sfiles = referenced_features.get(feature, set()) - sfiles.add(sfile) - referenced_features[feature] = sfiles + references.append(feature) + + return references def get_features_in_line(line): @@ -285,11 +398,35 @@ def get_features_in_line(line): return REGEX_FEATURE.findall(line) -def parse_kconfig_file(kfile, defined_features, referenced_features): +def parse_kconfig_files(args): + """Parse kconfig files and return tuple of defined and references Kconfig + symbols. Note, @args is a tuple of a list of files and the @ignore + pattern.""" + kconfig_files = args[0] + ignore = args[1] + defined_features = [] + referenced_features = dict() + + for kfile in kconfig_files: + defined, references = parse_kconfig_file(kfile) + defined_features.extend(defined) + if ignore and re.match(ignore, kfile): + # do not collect references for files that match the ignore pattern + continue + referenced_features[kfile] = references + return (defined_features, referenced_features) + + +def parse_kconfig_file(kfile): """Parse @kfile and update feature definitions and references.""" lines = [] + defined = [] + references = [] skip = False + if not os.path.exists(kfile): + return defined, references + with open(kfile, "r") as stream: lines = stream.readlines() @@ -300,7 +437,7 @@ def parse_kconfig_file(kfile, defined_features, referenced_features): if REGEX_KCONFIG_DEF.match(line): feature_def = REGEX_KCONFIG_DEF.findall(line) - defined_features.add(feature_def[0]) + defined.append(feature_def[0]) skip = False elif REGEX_KCONFIG_HELP.match(line): skip = True @@ -308,6 +445,7 @@ def parse_kconfig_file(kfile, defined_features, referenced_features): # ignore content of help messages pass elif REGEX_KCONFIG_STMT.match(line): + line = REGEX_QUOTES.sub("", line) features = get_features_in_line(line) # multi-line statements while line.endswith("\\"): @@ -319,9 +457,9 @@ def parse_kconfig_file(kfile, defined_features, referenced_features): if REGEX_NUMERIC.match(feature): # ignore numeric values continue - paths = referenced_features.get(feature, set()) - paths.add(kfile) - referenced_features[feature] = paths + references.append(feature) + + return defined, references if __name__ == "__main__": diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index 5b96206e9aab..8adca4406198 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c @@ -917,7 +917,7 @@ static int do_vmbus_entry(const char *filename, void *symval, char guid_name[(sizeof(*guid) + 1) * 2]; for (i = 0; i < (sizeof(*guid) * 2); i += 2) - sprintf(&guid_name[i], "%02x", TO_NATIVE((*guid)[i/2])); + sprintf(&guid_name[i], "%02x", TO_NATIVE((guid->b)[i/2])); strcpy(alias, "vmbus:"); strcat(alias, guid_name); diff --git a/tools/hv/hv_fcopy_daemon.c b/tools/hv/hv_fcopy_daemon.c index 5480e4e424eb..fdc9ca4c0356 100644 --- a/tools/hv/hv_fcopy_daemon.c +++ b/tools/hv/hv_fcopy_daemon.c @@ -37,12 +37,14 @@ static int target_fd; static char target_fname[W_MAX_PATH]; +static unsigned long long filesize; static int hv_start_fcopy(struct hv_start_fcopy *smsg) { int error = HV_E_FAIL; char *q, *p; + filesize = 0; p = (char *)smsg->path_name; snprintf(target_fname, sizeof(target_fname), "%s/%s", (char *)smsg->path_name, (char *)smsg->file_name); @@ -98,14 +100,26 @@ done: static int hv_copy_data(struct hv_do_fcopy *cpmsg) { ssize_t bytes_written; + int ret = 0; bytes_written = pwrite(target_fd, cpmsg->data, cpmsg->size, cpmsg->offset); - if (bytes_written != cpmsg->size) - return HV_E_FAIL; + filesize += cpmsg->size; + if (bytes_written != cpmsg->size) { + switch (errno) { + case ENOSPC: + ret = HV_ERROR_DISK_FULL; + break; + default: + ret = HV_E_FAIL; + break; + } + syslog(LOG_ERR, "pwrite failed to write %llu bytes: %ld (%s)", + filesize, (long)bytes_written, strerror(errno)); + } - return 0; + return ret; } static int hv_copy_finished(void) @@ -165,7 +179,7 @@ int main(int argc, char *argv[]) } openlog("HV_FCOPY", 0, LOG_USER); - syslog(LOG_INFO, "HV_FCOPY starting; pid is:%d", getpid()); + syslog(LOG_INFO, "starting; pid is:%d", getpid()); fcopy_fd = open("/dev/vmbus/hv_fcopy", O_RDWR); @@ -201,7 +215,7 @@ int main(int argc, char *argv[]) } kernel_modver = *(__u32 *)buffer; in_handshake = 0; - syslog(LOG_INFO, "HV_FCOPY: kernel module version: %d", + syslog(LOG_INFO, "kernel module version: %d", kernel_modver); continue; } diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c index 96234b638249..5d51d6ff08e6 100644 --- a/tools/hv/hv_vss_daemon.c +++ b/tools/hv/hv_vss_daemon.c @@ -254,7 +254,7 @@ int main(int argc, char *argv[]) syslog(LOG_ERR, "Illegal op:%d\n", op); } vss_msg->error = error; - len = write(vss_fd, &error, sizeof(struct hv_vss_msg)); + len = write(vss_fd, vss_msg, sizeof(struct hv_vss_msg)); if (len != sizeof(struct hv_vss_msg)) { syslog(LOG_ERR, "write failed; error: %d %s", errno, strerror(errno)); |