summaryrefslogtreecommitdiffstats
path: root/drivers/hv
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/hv')
-rw-r--r--drivers/hv/channel.c93
-rw-r--r--drivers/hv/channel_mgmt.c2
-rw-r--r--drivers/hv/hv_balloon.c44
-rw-r--r--drivers/hv/hv_snapshot.c33
-rw-r--r--drivers/hv/hv_util.c9
-rw-r--r--drivers/hv/hyperv_vmbus.h12
-rw-r--r--drivers/hv/ring_buffer.c44
7 files changed, 113 insertions, 124 deletions
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 16f91c8490fe..5fb4c6d9209b 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -39,7 +39,7 @@
* vmbus_setevent- Trigger an event notification on the specified
* channel.
*/
-static void vmbus_setevent(struct vmbus_channel *channel)
+void vmbus_setevent(struct vmbus_channel *channel)
{
struct hv_monitor_page *monitorpage;
@@ -65,6 +65,7 @@ static void vmbus_setevent(struct vmbus_channel *channel)
vmbus_set_event(channel);
}
}
+EXPORT_SYMBOL_GPL(vmbus_setevent);
/*
* vmbus_open - Open the specified channel.
@@ -635,8 +636,6 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
struct kvec bufferlist[3];
u64 aligned_data = 0;
- int ret;
- bool signal = false;
bool lock = channel->acquire_ring_lock;
int num_vecs = ((bufferlen != 0) ? 3 : 1);
@@ -656,33 +655,9 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs,
- &signal, lock, channel->signal_policy);
-
- /*
- * Signalling the host is conditional on many factors:
- * 1. The ring state changed from being empty to non-empty.
- * This is tracked by the variable "signal".
- * 2. The variable kick_q tracks if more data will be placed
- * on the ring. We will not signal if more data is
- * to be placed.
- *
- * Based on the channel signal state, we will decide
- * which signaling policy will be applied.
- *
- * If we cannot write to the ring-buffer; signal the host
- * even if we may not have written anything. This is a rare
- * enough condition that it should not matter.
- * NOTE: in this case, the hvsock channel is an exception, because
- * it looks the host side's hvsock implementation has a throttling
- * mechanism which can hurt the performance otherwise.
- */
-
- if (((ret == 0) && kick_q && signal) ||
- (ret && !is_hvsock_channel(channel)))
- vmbus_setevent(channel);
+ return hv_ringbuffer_write(channel, bufferlist, num_vecs,
+ lock, kick_q);
- return ret;
}
EXPORT_SYMBOL(vmbus_sendpacket_ctl);
@@ -723,7 +698,6 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
u32 flags,
bool kick_q)
{
- int ret;
int i;
struct vmbus_channel_packet_page_buffer desc;
u32 descsize;
@@ -731,7 +705,6 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
u32 packetlen_aligned;
struct kvec bufferlist[3];
u64 aligned_data = 0;
- bool signal = false;
bool lock = channel->acquire_ring_lock;
if (pagecount > MAX_PAGE_BUFFER_COUNT)
@@ -769,29 +742,8 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
- &signal, lock, channel->signal_policy);
-
- /*
- * Signalling the host is conditional on many factors:
- * 1. The ring state changed from being empty to non-empty.
- * This is tracked by the variable "signal".
- * 2. The variable kick_q tracks if more data will be placed
- * on the ring. We will not signal if more data is
- * to be placed.
- *
- * Based on the channel signal state, we will decide
- * which signaling policy will be applied.
- *
- * If we cannot write to the ring-buffer; signal the host
- * even if we may not have written anything. This is a rare
- * enough condition that it should not matter.
- */
-
- if (((ret == 0) && kick_q && signal) || (ret))
- vmbus_setevent(channel);
-
- return ret;
+ return hv_ringbuffer_write(channel, bufferlist, 3,
+ lock, kick_q);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl);
@@ -822,12 +774,10 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
u32 desc_size,
void *buffer, u32 bufferlen, u64 requestid)
{
- int ret;
u32 packetlen;
u32 packetlen_aligned;
struct kvec bufferlist[3];
u64 aligned_data = 0;
- bool signal = false;
bool lock = channel->acquire_ring_lock;
packetlen = desc_size + bufferlen;
@@ -848,13 +798,8 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
- &signal, lock, channel->signal_policy);
-
- if (ret == 0 && signal)
- vmbus_setevent(channel);
-
- return ret;
+ return hv_ringbuffer_write(channel, bufferlist, 3,
+ lock, true);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
@@ -866,14 +811,12 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
struct hv_multipage_buffer *multi_pagebuffer,
void *buffer, u32 bufferlen, u64 requestid)
{
- int ret;
struct vmbus_channel_packet_multipage_buffer desc;
u32 descsize;
u32 packetlen;
u32 packetlen_aligned;
struct kvec bufferlist[3];
u64 aligned_data = 0;
- bool signal = false;
bool lock = channel->acquire_ring_lock;
u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
multi_pagebuffer->len);
@@ -913,13 +856,8 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
- &signal, lock, channel->signal_policy);
-
- if (ret == 0 && signal)
- vmbus_setevent(channel);
-
- return ret;
+ return hv_ringbuffer_write(channel, bufferlist, 3,
+ lock, true);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer);
@@ -941,16 +879,9 @@ __vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
u32 bufferlen, u32 *buffer_actual_len, u64 *requestid,
bool raw)
{
- int ret;
- bool signal = false;
+ return hv_ringbuffer_read(channel, buffer, bufferlen,
+ buffer_actual_len, requestid, raw);
- ret = hv_ringbuffer_read(&channel->inbound, buffer, bufferlen,
- buffer_actual_len, requestid, &signal, raw);
-
- if (signal)
- vmbus_setevent(channel);
-
- return ret;
}
int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 96a85cd39580..cbb96f2f0d1a 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -447,8 +447,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
}
dev_type = hv_get_dev_type(newchannel);
- if (dev_type == HV_NIC)
- set_channel_signal_state(newchannel, HV_SIGNAL_POLICY_EXPLICIT);
init_vp_index(newchannel, dev_type);
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index fdf8da929cbe..14c3dc4bd23c 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -564,6 +564,11 @@ struct hv_dynmem_device {
* next version to try.
*/
__u32 next_version;
+
+ /*
+ * The negotiated version agreed by host.
+ */
+ __u32 version;
};
static struct hv_dynmem_device dm_device;
@@ -645,6 +650,7 @@ static void hv_bring_pgs_online(struct hv_hotadd_state *has,
{
int i;
+ pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn);
for (i = 0; i < size; i++)
hv_page_online_one(has, pfn_to_page(start_pfn + i));
}
@@ -685,7 +691,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
(HA_CHUNK << PAGE_SHIFT));
if (ret) {
- pr_info("hot_add memory failed error is %d\n", ret);
+ pr_warn("hot_add memory failed error is %d\n", ret);
if (ret == -EEXIST) {
/*
* This error indicates that the error
@@ -814,6 +820,9 @@ static unsigned long handle_pg_range(unsigned long pg_start,
unsigned long old_covered_state;
unsigned long res = 0, flags;
+ pr_debug("Hot adding %lu pages starting at pfn 0x%lx.\n", pg_count,
+ pg_start);
+
spin_lock_irqsave(&dm_device.ha_lock, flags);
list_for_each_entry(has, &dm_device.ha_region_list, list) {
/*
@@ -1025,8 +1034,13 @@ static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
switch (info_hdr->type) {
case INFO_TYPE_MAX_PAGE_CNT:
- pr_info("Received INFO_TYPE_MAX_PAGE_CNT\n");
- pr_info("Data Size is %d\n", info_hdr->data_size);
+ if (info_hdr->data_size == sizeof(__u64)) {
+ __u64 *max_page_count = (__u64 *)&info_hdr[1];
+
+ pr_info("INFO_TYPE_MAX_PAGE_CNT = %llu\n",
+ *max_page_count);
+ }
+
break;
default:
pr_info("Received Unknown type: %d\n", info_hdr->type);
@@ -1196,8 +1210,6 @@ static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
return num_pages;
}
-
-
static void balloon_up(struct work_struct *dummy)
{
unsigned int num_pages = dm_device.balloon_wrk.num_pages;
@@ -1224,6 +1236,10 @@ static void balloon_up(struct work_struct *dummy)
/* Refuse to balloon below the floor, keep the 2M granularity. */
if (avail_pages < num_pages || avail_pages - num_pages < floor) {
+ pr_warn("Balloon request will be partially fulfilled. %s\n",
+ avail_pages < num_pages ? "Not enough memory." :
+ "Balloon floor reached.");
+
num_pages = avail_pages > floor ? (avail_pages - floor) : 0;
num_pages -= num_pages % PAGES_IN_2M;
}
@@ -1245,6 +1261,9 @@ static void balloon_up(struct work_struct *dummy)
}
if (num_ballooned == 0 || num_ballooned == num_pages) {
+ pr_debug("Ballooned %u out of %u requested pages.\n",
+ num_pages, dm_device.balloon_wrk.num_pages);
+
bl_resp->more_pages = 0;
done = true;
dm_device.state = DM_INITIALIZED;
@@ -1292,12 +1311,16 @@ static void balloon_down(struct hv_dynmem_device *dm,
int range_count = req->range_count;
struct dm_unballoon_response resp;
int i;
+ unsigned int prev_pages_ballooned = dm->num_pages_ballooned;
for (i = 0; i < range_count; i++) {
free_balloon_pages(dm, &range_array[i]);
complete(&dm_device.config_event);
}
+ pr_debug("Freed %u ballooned pages.\n",
+ prev_pages_ballooned - dm->num_pages_ballooned);
+
if (req->more_pages == 1)
return;
@@ -1365,6 +1388,7 @@ static void version_resp(struct hv_dynmem_device *dm,
version_req.hdr.size = sizeof(struct dm_version_request);
version_req.hdr.trans_id = atomic_inc_return(&trans_id);
version_req.version.version = dm->next_version;
+ dm->version = version_req.version.version;
/*
* Set the next version to try in case current version fails.
@@ -1501,7 +1525,11 @@ static int balloon_probe(struct hv_device *dev,
struct dm_version_request version_req;
struct dm_capabilities cap_msg;
+#ifdef CONFIG_MEMORY_HOTPLUG
do_hot_add = hot_add;
+#else
+ do_hot_add = false;
+#endif
/*
* First allocate a send buffer.
@@ -1553,6 +1581,7 @@ static int balloon_probe(struct hv_device *dev,
version_req.hdr.trans_id = atomic_inc_return(&trans_id);
version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10;
version_req.is_last_attempt = 0;
+ dm_device.version = version_req.version.version;
ret = vmbus_sendpacket(dev->channel, &version_req,
sizeof(struct dm_version_request),
@@ -1575,6 +1604,11 @@ static int balloon_probe(struct hv_device *dev,
ret = -ETIMEDOUT;
goto probe_error2;
}
+
+ pr_info("Using Dynamic Memory protocol version %u.%u\n",
+ DYNMEM_MAJOR_VERSION(dm_device.version),
+ DYNMEM_MINOR_VERSION(dm_device.version));
+
/*
* Now submit our capabilities to the host.
*/
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index a6707133c297..eee238cc60bd 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -31,7 +31,10 @@
#define VSS_MINOR 0
#define VSS_VERSION (VSS_MAJOR << 16 | VSS_MINOR)
-#define VSS_USERSPACE_TIMEOUT (msecs_to_jiffies(10 * 1000))
+/*
+ * Timeout values are based on expecations from host
+ */
+#define VSS_FREEZE_TIMEOUT (15 * 60)
/*
* Global state maintained for transaction that is being processed. For a class
@@ -120,7 +123,7 @@ static int vss_handle_handshake(struct hv_vss_msg *vss_msg)
default:
return -EINVAL;
}
- pr_debug("VSS: userspace daemon ver. %d connected\n", dm_reg_value);
+ pr_info("VSS: userspace daemon ver. %d connected\n", dm_reg_value);
return 0;
}
@@ -128,8 +131,10 @@ static int vss_on_msg(void *msg, int len)
{
struct hv_vss_msg *vss_msg = (struct hv_vss_msg *)msg;
- if (len != sizeof(*vss_msg))
+ if (len != sizeof(*vss_msg)) {
+ pr_debug("VSS: Message size does not match length\n");
return -EINVAL;
+ }
if (vss_msg->vss_hdr.operation == VSS_OP_REGISTER ||
vss_msg->vss_hdr.operation == VSS_OP_REGISTER1) {
@@ -137,8 +142,11 @@ static int vss_on_msg(void *msg, int len)
* Don't process registration messages if we're in the middle
* of a transaction processing.
*/
- if (vss_transaction.state > HVUTIL_READY)
+ if (vss_transaction.state > HVUTIL_READY) {
+ pr_debug("VSS: Got unexpected registration request\n");
return -EINVAL;
+ }
+
return vss_handle_handshake(vss_msg);
} else if (vss_transaction.state == HVUTIL_USERSPACE_REQ) {
vss_transaction.state = HVUTIL_USERSPACE_RECV;
@@ -155,7 +163,7 @@ static int vss_on_msg(void *msg, int len)
}
} else {
/* This is a spurious call! */
- pr_warn("VSS: Transaction not active\n");
+ pr_debug("VSS: Transaction not active\n");
return -EINVAL;
}
return 0;
@@ -168,8 +176,10 @@ static void vss_send_op(void)
struct hv_vss_msg *vss_msg;
/* The transaction state is wrong. */
- if (vss_transaction.state != HVUTIL_HOSTMSG_RECEIVED)
+ if (vss_transaction.state != HVUTIL_HOSTMSG_RECEIVED) {
+ pr_debug("VSS: Unexpected attempt to send to daemon\n");
return;
+ }
vss_msg = kzalloc(sizeof(*vss_msg), GFP_KERNEL);
if (!vss_msg)
@@ -179,7 +189,8 @@ static void vss_send_op(void)
vss_transaction.state = HVUTIL_USERSPACE_REQ;
- schedule_delayed_work(&vss_timeout_work, VSS_USERSPACE_TIMEOUT);
+ schedule_delayed_work(&vss_timeout_work, op == VSS_OP_FREEZE ?
+ VSS_FREEZE_TIMEOUT * HZ : HV_UTIL_TIMEOUT * HZ);
rc = hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg), NULL);
if (rc) {
@@ -210,9 +221,13 @@ static void vss_handle_request(struct work_struct *dummy)
case VSS_OP_HOT_BACKUP:
if (vss_transaction.state < HVUTIL_READY) {
/* Userspace is not registered yet */
+ pr_debug("VSS: Not ready for request.\n");
vss_respond_to_host(HV_E_FAIL);
return;
}
+
+ pr_debug("VSS: Received request for op code: %d\n",
+ vss_transaction.msg->vss_hdr.operation);
vss_transaction.state = HVUTIL_HOSTMSG_RECEIVED;
vss_send_op();
return;
@@ -353,8 +368,10 @@ hv_vss_init(struct hv_util_service *srv)
hvt = hvutil_transport_init(vss_devname, CN_VSS_IDX, CN_VSS_VAL,
vss_on_msg, vss_on_reset);
- if (!hvt)
+ if (!hvt) {
+ pr_warn("VSS: Failed to initialize transport\n");
return -EFAULT;
+ }
return 0;
}
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index bcd06306f3e8..e7707747f56d 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -389,16 +389,19 @@ static int util_probe(struct hv_device *dev,
ts_srv_version = TS_VERSION_1;
hb_srv_version = HB_VERSION_1;
break;
- case(VERSION_WIN10):
+ case VERSION_WIN7:
+ case VERSION_WIN8:
+ case VERSION_WIN8_1:
util_fw_version = UTIL_FW_VERSION;
sd_srv_version = SD_VERSION;
- ts_srv_version = TS_VERSION;
+ ts_srv_version = TS_VERSION_3;
hb_srv_version = HB_VERSION;
break;
+ case VERSION_WIN10:
default:
util_fw_version = UTIL_FW_VERSION;
sd_srv_version = SD_VERSION;
- ts_srv_version = TS_VERSION_3;
+ ts_srv_version = TS_VERSION;
hb_srv_version = HB_VERSION;
}
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index a5b4442433c8..0675b395ce5c 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -38,7 +38,7 @@
/*
* Timeout for guest-host handshake for services.
*/
-#define HV_UTIL_NEGO_TIMEOUT 60
+#define HV_UTIL_NEGO_TIMEOUT 55
/*
* The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
@@ -527,14 +527,14 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
-int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info,
+int hv_ringbuffer_write(struct vmbus_channel *channel,
struct kvec *kv_list,
- u32 kv_count, bool *signal, bool lock,
- enum hv_signal_policy policy);
+ u32 kv_count, bool lock,
+ bool kick_q);
-int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
+int hv_ringbuffer_read(struct vmbus_channel *channel,
void *buffer, u32 buflen, u32 *buffer_actual_len,
- u64 *requestid, bool *signal, bool raw);
+ u64 *requestid, bool raw);
void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
struct hv_ring_buffer_debug_info *debug_info);
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 08043da1a61c..cd49cb17eb7f 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -66,21 +66,25 @@ u32 hv_end_read(struct hv_ring_buffer_info *rbi)
* once the ring buffer is empty, it will clear the
* interrupt_mask and re-check to see if new data has
* arrived.
+ *
+ * KYS: Oct. 30, 2016:
+ * It looks like Windows hosts have logic to deal with DOS attacks that
+ * can be triggered if it receives interrupts when it is not expecting
+ * the interrupt. The host expects interrupts only when the ring
+ * transitions from empty to non-empty (or full to non full on the guest
+ * to host ring).
+ * So, base the signaling decision solely on the ring state until the
+ * host logic is fixed.
*/
-static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi,
- enum hv_signal_policy policy)
+static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel,
+ bool kick_q)
{
+ struct hv_ring_buffer_info *rbi = &channel->outbound;
+
virt_mb();
if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
- return false;
-
- /*
- * When the client wants to control signaling,
- * we only honour the host interrupt mask.
- */
- if (policy == HV_SIGNAL_POLICY_EXPLICIT)
- return true;
+ return;
/* check interrupt_mask before read_index */
virt_rmb();
@@ -89,9 +93,9 @@ static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi,
* ring transitions from being empty to non-empty.
*/
if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
- return true;
+ vmbus_setevent(channel);
- return false;
+ return;
}
/* Get the next write location for the specified ring buffer. */
@@ -280,9 +284,9 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
}
/* Write to the ring buffer. */
-int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
- struct kvec *kv_list, u32 kv_count, bool *signal, bool lock,
- enum hv_signal_policy policy)
+int hv_ringbuffer_write(struct vmbus_channel *channel,
+ struct kvec *kv_list, u32 kv_count, bool lock,
+ bool kick_q)
{
int i = 0;
u32 bytes_avail_towrite;
@@ -292,6 +296,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
u32 old_write;
u64 prev_indices = 0;
unsigned long flags = 0;
+ struct hv_ring_buffer_info *outring_info = &channel->outbound;
for (i = 0; i < kv_count; i++)
totalbytes_towrite += kv_list[i].iov_len;
@@ -344,13 +349,13 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
if (lock)
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
- *signal = hv_need_to_signal(old_write, outring_info, policy);
+ hv_signal_on_write(old_write, channel, kick_q);
return 0;
}
-int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
+int hv_ringbuffer_read(struct vmbus_channel *channel,
void *buffer, u32 buflen, u32 *buffer_actual_len,
- u64 *requestid, bool *signal, bool raw)
+ u64 *requestid, bool raw)
{
u32 bytes_avail_toread;
u32 next_read_location = 0;
@@ -359,6 +364,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
u32 offset;
u32 packetlen;
int ret = 0;
+ struct hv_ring_buffer_info *inring_info = &channel->inbound;
if (buflen <= 0)
return -EINVAL;
@@ -416,7 +422,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
/* Update the read index */
hv_set_next_read_location(inring_info, next_read_location);
- *signal = hv_need_to_signal_on_read(inring_info);
+ hv_signal_on_read(channel);
return ret;
}