summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/sw/rxe/rxe_task.c
blob: 80332638d9e3acf8f45f885defb20fb6c377a82b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
 */

#include "rxe.h"

static struct workqueue_struct *rxe_wq;

int rxe_alloc_wq(void)
{
	rxe_wq = alloc_workqueue("rxe_wq", WQ_UNBOUND, WQ_MAX_ACTIVE);
	if (!rxe_wq)
		return -ENOMEM;

	return 0;
}

void rxe_destroy_wq(void)
{
	destroy_workqueue(rxe_wq);
}

/* Check if task is idle i.e. not running, not scheduled in
 * work queue and not draining. If so move to busy to
 * reserve a slot in do_task() by setting to busy and taking
 * a qp reference to cover the gap from now until the task finishes.
 * state will move out of busy if task returns a non zero value
 * in do_task(). If state is already busy it is raised to armed
 * to indicate to do_task that additional pass should be made
 * over the task.
 * Context: caller should hold task->lock.
 * Returns: true if state transitioned from idle to busy else false.
 */
static bool __reserve_if_idle(struct rxe_task *task)
{
	WARN_ON(rxe_read(task->qp) <= 0);

	if (task->state == TASK_STATE_IDLE) {
		rxe_get(task->qp);
		task->state = TASK_STATE_BUSY;
		task->num_sched++;
		return true;
	}

	if (task->state == TASK_STATE_BUSY)
		task->state = TASK_STATE_ARMED;

	return false;
}

/* check if task is idle or drained and not currently
 * scheduled in the work queue. This routine is
 * called by rxe_cleanup_task or rxe_disable_task to
 * see if the queue is empty.
 * Context: caller should hold task->lock.
 * Returns true if done else false.
 */
static bool __is_done(struct rxe_task *task)
{
	if (work_pending(&task->work))
		return false;

	if (task->state == TASK_STATE_IDLE ||
	    task->state == TASK_STATE_DRAINED) {
		return true;
	}

	return false;
}

/* a locked version of __is_done */
static bool is_done(struct rxe_task *task)
{
	unsigned long flags;
	int done;

	spin_lock_irqsave(&task->lock, flags);
	done = __is_done(task);
	spin_unlock_irqrestore(&task->lock, flags);

	return done;
}

/* do_task is a wrapper for the three tasks (requester,
 * completer, responder) and calls them in a loop until
 * they return a non-zero value. It is called either
 * directly by rxe_run_task or indirectly if rxe_sched_task
 * schedules the task. They must call __reserve_if_idle to
 * move the task to busy before calling or scheduling.
 * The task can also be moved to drained or invalid
 * by calls to rxe_cleanup_task or rxe_disable_task.
 * In that case tasks which get here are not executed but
 * just flushed. The tasks are designed to look to see if
 * there is work to do and then do part of it before returning
 * here with a return value of zero until all the work
 * has been consumed then it returns a non-zero value.
 * The number of times the task can be run is limited by
 * max iterations so one task cannot hold the cpu forever.
 * If the limit is hit and work remains the task is rescheduled.
 */
static void do_task(struct rxe_task *task)
{
	unsigned int iterations;
	unsigned long flags;
	int resched = 0;
	int cont;
	int ret;

	WARN_ON(rxe_read(task->qp) <= 0);

	spin_lock_irqsave(&task->lock, flags);
	if (task->state >= TASK_STATE_DRAINED) {
		rxe_put(task->qp);
		task->num_done++;
		spin_unlock_irqrestore(&task->lock, flags);
		return;
	}
	spin_unlock_irqrestore(&task->lock, flags);

	do {
		iterations = RXE_MAX_ITERATIONS;
		cont = 0;

		do {
			ret = task->func(task->qp);
		} while (ret == 0 && iterations-- > 0);

		spin_lock_irqsave(&task->lock, flags);
		/* we're not done yet but we ran out of iterations.
		 * yield the cpu and reschedule the task
		 */
		if (!ret) {
			task->state = TASK_STATE_IDLE;
			resched = 1;
			goto exit;
		}

		switch (task->state) {
		case TASK_STATE_BUSY:
			task->state = TASK_STATE_IDLE;
			break;

		/* someone tried to schedule the task while we
		 * were running, keep going
		 */
		case TASK_STATE_ARMED:
			task->state = TASK_STATE_BUSY;
			cont = 1;
			break;

		case TASK_STATE_DRAINING:
			task->state = TASK_STATE_DRAINED;
			break;

		default:
			WARN_ON(1);
			rxe_dbg_qp(task->qp, "unexpected task state = %d\n",
				   task->state);
			task->state = TASK_STATE_IDLE;
		}

exit:
		if (!cont) {
			task->num_done++;
			if (WARN_ON(task->num_done != task->num_sched))
				rxe_dbg_qp(
					task->qp,
					"%ld tasks scheduled, %ld tasks done\n",
					task->num_sched, task->num_done);
		}
		spin_unlock_irqrestore(&task->lock, flags);
	} while (cont);

	task->ret = ret;

	if (resched)
		rxe_sched_task(task);

	rxe_put(task->qp);
}

/* wrapper around do_task to fix argument for work queue */
static void do_work(struct work_struct *work)
{
	do_task(container_of(work, struct rxe_task, work));
}

int rxe_init_task(struct rxe_task *task, struct rxe_qp *qp,
		  int (*func)(struct rxe_qp *))
{
	WARN_ON(rxe_read(qp) <= 0);

	task->qp = qp;
	task->func = func;
	task->state = TASK_STATE_IDLE;
	spin_lock_init(&task->lock);
	INIT_WORK(&task->work, do_work);

	return 0;
}

/* rxe_cleanup_task is only called from rxe_do_qp_cleanup in
 * process context. The qp is already completed with no
 * remaining references. Once the queue is drained the
 * task is moved to invalid and returns. The qp cleanup
 * code then calls the task functions directly without
 * using the task struct to drain any late arriving packets
 * or work requests.
 */
void rxe_cleanup_task(struct rxe_task *task)
{
	unsigned long flags;

	spin_lock_irqsave(&task->lock, flags);
	if (!__is_done(task) && task->state < TASK_STATE_DRAINED) {
		task->state = TASK_STATE_DRAINING;
	} else {
		task->state = TASK_STATE_INVALID;
		spin_unlock_irqrestore(&task->lock, flags);
		return;
	}
	spin_unlock_irqrestore(&task->lock, flags);

	/* now the task cannot be scheduled or run just wait
	 * for the previously scheduled tasks to finish.
	 */
	while (!is_done(task))
		cond_resched();

	spin_lock_irqsave(&task->lock, flags);
	task->state = TASK_STATE_INVALID;
	spin_unlock_irqrestore(&task->lock, flags);
}

/* run the task inline if it is currently idle
 * cannot call do_task holding the lock
 */
void rxe_run_task(struct rxe_task *task)
{
	unsigned long flags;
	bool run;

	WARN_ON(rxe_read(task->qp) <= 0);

	spin_lock_irqsave(&task->lock, flags);
	run = __reserve_if_idle(task);
	spin_unlock_irqrestore(&task->lock, flags);

	if (run)
		do_task(task);
}

/* schedule the task to run later as a work queue entry.
 * the queue_work call can be called holding
 * the lock.
 */
void rxe_sched_task(struct rxe_task *task)
{
	unsigned long flags;

	WARN_ON(rxe_read(task->qp) <= 0);

	spin_lock_irqsave(&task->lock, flags);
	if (__reserve_if_idle(task))
		queue_work(rxe_wq, &task->work);
	spin_unlock_irqrestore(&task->lock, flags);
}

/* rxe_disable/enable_task are only called from
 * rxe_modify_qp in process context. Task is moved
 * to the drained state by do_task.
 */
void rxe_disable_task(struct rxe_task *task)
{
	unsigned long flags;

	WARN_ON(rxe_read(task->qp) <= 0);

	spin_lock_irqsave(&task->lock, flags);
	if (!__is_done(task) && task->state < TASK_STATE_DRAINED) {
		task->state = TASK_STATE_DRAINING;
	} else {
		task->state = TASK_STATE_DRAINED;
		spin_unlock_irqrestore(&task->lock, flags);
		return;
	}
	spin_unlock_irqrestore(&task->lock, flags);

	while (!is_done(task))
		cond_resched();

	spin_lock_irqsave(&task->lock, flags);
	task->state = TASK_STATE_DRAINED;
	spin_unlock_irqrestore(&task->lock, flags);
}

void rxe_enable_task(struct rxe_task *task)
{
	unsigned long flags;

	WARN_ON(rxe_read(task->qp) <= 0);

	spin_lock_irqsave(&task->lock, flags);
	if (task->state == TASK_STATE_INVALID) {
		spin_unlock_irqrestore(&task->lock, flags);
		return;
	}

	task->state = TASK_STATE_IDLE;
	spin_unlock_irqrestore(&task->lock, flags);
}