summaryrefslogtreecommitdiffstats
path: root/drivers/xen
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-09-17 08:31:49 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-09-17 08:31:49 -0700
commitc6460daea23dcd160f2dc497c64b4c882ea1de69 (patch)
treedc8249def0fae1dceb6fb1bb92c6ec245c1c861f /drivers/xen
parentbdb575f872175ed0ecf2638369da1cb7a6e86a14 (diff)
parentd859ed25b24289c87a97889653596f8088367e16 (diff)
downloadlinux-stable-c6460daea23dcd160f2dc497c64b4c882ea1de69.tar.gz
linux-stable-c6460daea23dcd160f2dc497c64b4c882ea1de69.tar.bz2
linux-stable-c6460daea23dcd160f2dc497c64b4c882ea1de69.zip
Merge tag 'for-linus-5.15b-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull xen fixes from Juergen Gross: - The first hunk of a Xen swiotlb fixup series fixing multiple minor issues and doing some small cleanups - Some further Xen related fixes avoiding WARN() splats when running as Xen guests or dom0 - A Kconfig fix allowing the pvcalls frontend to be built as a module * tag 'for-linus-5.15b-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: swiotlb-xen: drop DEFAULT_NSLABS swiotlb-xen: arrange to have buffer info logged swiotlb-xen: drop leftover __ref swiotlb-xen: limit init retries swiotlb-xen: suppress certain init retries swiotlb-xen: maintain slab count properly swiotlb-xen: fix late init retry swiotlb-xen: avoid double free xen/pvcalls: backend can be a module xen: fix usage of pmd_populate in mremap for pv guests xen: reset legacy rtc flag for PV domU PM: base: power: don't try to use non-existing RTC for storing data xen/balloon: use a kernel thread instead a workqueue
Diffstat (limited to 'drivers/xen')
-rw-r--r--drivers/xen/Kconfig2
-rw-r--r--drivers/xen/balloon.c62
-rw-r--r--drivers/xen/swiotlb-xen.c37
3 files changed, 63 insertions, 38 deletions
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 5f1ce59b44b9..a37eb52fb401 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -214,7 +214,7 @@ config XEN_PVCALLS_FRONTEND
implements them.
config XEN_PVCALLS_BACKEND
- bool "XEN PV Calls backend driver"
+ tristate "XEN PV Calls backend driver"
depends on INET && XEN && XEN_BACKEND
help
Experimental backend for the Xen PV Calls protocol
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 671c71245a7b..2d2803883306 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -43,6 +43,8 @@
#include <linux/sched.h>
#include <linux/cred.h>
#include <linux/errno.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
#include <linux/mm.h>
#include <linux/memblock.h>
#include <linux/pagemap.h>
@@ -115,7 +117,7 @@ static struct ctl_table xen_root[] = {
#define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
/*
- * balloon_process() state:
+ * balloon_thread() state:
*
* BP_DONE: done or nothing to do,
* BP_WAIT: wait to be rescheduled,
@@ -130,6 +132,8 @@ enum bp_state {
BP_ECANCELED
};
+/* Main waiting point for xen-balloon thread. */
+static DECLARE_WAIT_QUEUE_HEAD(balloon_thread_wq);
static DEFINE_MUTEX(balloon_mutex);
@@ -144,10 +148,6 @@ static xen_pfn_t frame_list[PAGE_SIZE / sizeof(xen_pfn_t)];
static LIST_HEAD(ballooned_pages);
static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
-/* Main work function, always executed in process context. */
-static void balloon_process(struct work_struct *work);
-static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
-
/* When ballooning out (allocating memory to return to Xen) we don't really
want the kernel to try too hard since that can trigger the oom killer. */
#define GFP_BALLOON \
@@ -366,7 +366,7 @@ static void xen_online_page(struct page *page, unsigned int order)
static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v)
{
if (val == MEM_ONLINE)
- schedule_delayed_work(&balloon_worker, 0);
+ wake_up(&balloon_thread_wq);
return NOTIFY_OK;
}
@@ -491,18 +491,43 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
}
/*
- * As this is a work item it is guaranteed to run as a single instance only.
+ * Stop waiting if either state is not BP_EAGAIN and ballooning action is
+ * needed, or if the credit has changed while state is BP_EAGAIN.
+ */
+static bool balloon_thread_cond(enum bp_state state, long credit)
+{
+ if (state != BP_EAGAIN)
+ credit = 0;
+
+ return current_credit() != credit || kthread_should_stop();
+}
+
+/*
+ * As this is a kthread it is guaranteed to run as a single instance only.
* We may of course race updates of the target counts (which are protected
* by the balloon lock), or with changes to the Xen hard limit, but we will
* recover from these in time.
*/
-static void balloon_process(struct work_struct *work)
+static int balloon_thread(void *unused)
{
enum bp_state state = BP_DONE;
long credit;
+ unsigned long timeout;
+
+ set_freezable();
+ for (;;) {
+ if (state == BP_EAGAIN)
+ timeout = balloon_stats.schedule_delay * HZ;
+ else
+ timeout = 3600 * HZ;
+ credit = current_credit();
+ wait_event_interruptible_timeout(balloon_thread_wq,
+ balloon_thread_cond(state, credit), timeout);
+
+ if (kthread_should_stop())
+ return 0;
- do {
mutex_lock(&balloon_mutex);
credit = current_credit();
@@ -529,12 +554,7 @@ static void balloon_process(struct work_struct *work)
mutex_unlock(&balloon_mutex);
cond_resched();
-
- } while (credit && state == BP_DONE);
-
- /* Schedule more work if there is some still to be done. */
- if (state == BP_EAGAIN)
- schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
+ }
}
/* Resets the Xen limit, sets new target, and kicks off processing. */
@@ -542,7 +562,7 @@ void balloon_set_new_target(unsigned long target)
{
/* No need for lock. Not read-modify-write updates. */
balloon_stats.target_pages = target;
- schedule_delayed_work(&balloon_worker, 0);
+ wake_up(&balloon_thread_wq);
}
EXPORT_SYMBOL_GPL(balloon_set_new_target);
@@ -647,7 +667,7 @@ void free_xenballooned_pages(int nr_pages, struct page **pages)
/* The balloon may be too large now. Shrink it if needed. */
if (current_credit())
- schedule_delayed_work(&balloon_worker, 0);
+ wake_up(&balloon_thread_wq);
mutex_unlock(&balloon_mutex);
}
@@ -679,6 +699,8 @@ static void __init balloon_add_region(unsigned long start_pfn,
static int __init balloon_init(void)
{
+ struct task_struct *task;
+
if (!xen_domain())
return -ENODEV;
@@ -722,6 +744,12 @@ static int __init balloon_init(void)
}
#endif
+ task = kthread_run(balloon_thread, NULL, "xen-balloon");
+ if (IS_ERR(task)) {
+ pr_err("xen-balloon thread could not be started, ballooning will not work!\n");
+ return PTR_ERR(task);
+ }
+
/* Init the xen-balloon driver. */
xen_balloon_init();
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 643fe440c46e..8c10edf9efe6 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -106,27 +106,26 @@ static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
static int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
{
- int i, rc;
- int dma_bits;
+ int rc;
+ unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
+ unsigned int i, dma_bits = order + PAGE_SHIFT;
dma_addr_t dma_handle;
phys_addr_t p = virt_to_phys(buf);
- dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
+ BUILD_BUG_ON(IO_TLB_SEGSIZE & (IO_TLB_SEGSIZE - 1));
+ BUG_ON(nslabs % IO_TLB_SEGSIZE);
i = 0;
do {
- int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
-
do {
rc = xen_create_contiguous_region(
- p + (i << IO_TLB_SHIFT),
- get_order(slabs << IO_TLB_SHIFT),
+ p + (i << IO_TLB_SHIFT), order,
dma_bits, &dma_handle);
} while (rc && dma_bits++ < MAX_DMA_BITS);
if (rc)
return rc;
- i += slabs;
+ i += IO_TLB_SEGSIZE;
} while (i < nslabs);
return 0;
}
@@ -153,9 +152,7 @@ static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
return "";
}
-#define DEFAULT_NSLABS ALIGN(SZ_64M >> IO_TLB_SHIFT, IO_TLB_SEGSIZE)
-
-int __ref xen_swiotlb_init(void)
+int xen_swiotlb_init(void)
{
enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
unsigned long bytes = swiotlb_size_or_default();
@@ -185,7 +182,7 @@ retry:
order--;
}
if (!start)
- goto error;
+ goto exit;
if (order != get_order(bytes)) {
pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
(PAGE_SIZE << order) >> 20);
@@ -208,15 +205,15 @@ retry:
swiotlb_set_max_segment(PAGE_SIZE);
return 0;
error:
- if (repeat--) {
+ if (nslabs > 1024 && repeat--) {
/* Min is 2MB */
- nslabs = max(1024UL, (nslabs >> 1));
- pr_info("Lowering to %luMB\n",
- (nslabs << IO_TLB_SHIFT) >> 20);
+ nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE));
+ bytes = nslabs << IO_TLB_SHIFT;
+ pr_info("Lowering to %luMB\n", bytes >> 20);
goto retry;
}
+exit:
pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
- free_pages((unsigned long)start, order);
return rc;
}
@@ -244,9 +241,9 @@ retry:
rc = xen_swiotlb_fixup(start, nslabs);
if (rc) {
memblock_free(__pa(start), PAGE_ALIGN(bytes));
- if (repeat--) {
+ if (nslabs > 1024 && repeat--) {
/* Min is 2MB */
- nslabs = max(1024UL, (nslabs >> 1));
+ nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE));
bytes = nslabs << IO_TLB_SHIFT;
pr_info("Lowering to %luMB\n", bytes >> 20);
goto retry;
@@ -254,7 +251,7 @@ retry:
panic("%s (rc:%d)", xen_swiotlb_error(XEN_SWIOTLB_EFIXUP), rc);
}
- if (swiotlb_init_with_tbl(start, nslabs, false))
+ if (swiotlb_init_with_tbl(start, nslabs, true))
panic("Cannot allocate SWIOTLB buffer");
swiotlb_set_max_segment(PAGE_SIZE);
}