summaryrefslogtreecommitdiffstats
path: root/net/tls/tls_sw.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-06-28 09:43:44 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-06-28 10:40:47 -0700
commita11e1d432b51f63ba698d044441284a661f01144 (patch)
tree9f3c5a10bf0d7f9a342d5fb39c0c35ea14170124 /net/tls/tls_sw.c
parentf57494321cbf5b1e7769b6135407d2995a369e28 (diff)
downloadlinux-a11e1d432b51f63ba698d044441284a661f01144.tar.gz
linux-a11e1d432b51f63ba698d044441284a661f01144.tar.bz2
linux-a11e1d432b51f63ba698d044441284a661f01144.zip
Revert changes to convert to ->poll_mask() and aio IOCB_CMD_POLL
The poll() changes were not well thought out, and completely unexplained. They also caused a huge performance regression, because "->poll()" was no longer a trivial file operation that just called down to the underlying file operations, but instead did at least two indirect calls. Indirect calls are sadly slow now with the Spectre mitigation, but the performance problem could at least be largely mitigated by changing the "->get_poll_head()" operation to just have a per-file-descriptor pointer to the poll head instead. That gets rid of one of the new indirections. But that doesn't fix the new complexity that is completely unwarranted for the regular case. The (undocumented) reason for the poll() changes was some alleged AIO poll race fixing, but we don't make the common case slower and more complex for some uncommon special case, so this all really needs way more explanations and most likely a fundamental redesign. [ This revert is a revert of about 30 different commits, not reverted individually because that would just be unnecessarily messy - Linus ] Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Christoph Hellwig <hch@lst.de> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'net/tls/tls_sw.c')
-rw-r--r--net/tls/tls_sw.c19
1 files changed, 10 insertions, 9 deletions
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index f127fac88acf..d2380548f8f6 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -919,22 +919,23 @@ splice_read_end:
return copied ? : err;
}
-__poll_t tls_sw_poll_mask(struct socket *sock, __poll_t events)
+unsigned int tls_sw_poll(struct file *file, struct socket *sock,
+ struct poll_table_struct *wait)
{
+ unsigned int ret;
struct sock *sk = sock->sk;
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
- __poll_t mask;
- /* Grab EPOLLOUT and EPOLLHUP from the underlying socket */
- mask = ctx->sk_poll_mask(sock, events);
+ /* Grab POLLOUT and POLLHUP from the underlying socket */
+ ret = ctx->sk_poll(file, sock, wait);
- /* Clear EPOLLIN bits, and set based on recv_pkt */
- mask &= ~(EPOLLIN | EPOLLRDNORM);
+ /* Clear POLLIN bits, and set based on recv_pkt */
+ ret &= ~(POLLIN | POLLRDNORM);
if (ctx->recv_pkt)
- mask |= EPOLLIN | EPOLLRDNORM;
+ ret |= POLLIN | POLLRDNORM;
- return mask;
+ return ret;
}
static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
@@ -1191,7 +1192,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
sk->sk_data_ready = tls_data_ready;
write_unlock_bh(&sk->sk_callback_lock);
- sw_ctx_rx->sk_poll_mask = sk->sk_socket->ops->poll_mask;
+ sw_ctx_rx->sk_poll = sk->sk_socket->ops->poll;
strp_check_rcv(&sw_ctx_rx->strp);
}