summaryrefslogtreecommitdiffstats
path: root/drivers/uwb/rsv.c
diff options
context:
space:
mode:
authorStefano Panella <stefano.panella@csr.com>2008-12-12 13:00:06 +0000
committerDavid Vrabel <david.vrabel@csr.com>2008-12-12 13:00:06 +0000
commit5b37717a23b8e40f6cf7ad85a26ddcf41c171e2c (patch)
tree3c611f907bc61c6e1900c4092e8f2f1e8eefd907 /drivers/uwb/rsv.c
parentc35fa3ea1ae8198bd65c2c6e59d9ebd68c115a59 (diff)
downloadlinux-stable-5b37717a23b8e40f6cf7ad85a26ddcf41c171e2c.tar.gz
linux-stable-5b37717a23b8e40f6cf7ad85a26ddcf41c171e2c.tar.bz2
linux-stable-5b37717a23b8e40f6cf7ad85a26ddcf41c171e2c.zip
uwb: improved MAS allocator and reservation conflict handling
Greatly enhance the MAS allocator: - Handle row and column reservations. - Permit all the available MAS to be allocated. - Follows the WiMedia rules on MAS selection. Take appropriate action when reservation conflicts are detected. - Correctly identify which reservation wins the conflict. - Protect alien BP reservations. - If an owned reservation loses, resize/move it. - Follow the backoff procedure before requesting additional MAS. When reservations are terminated, move the remaining reservations (if necessary) so they keep following the MAS allocation rules. Signed-off-by: Stefano Panella <stefano.panella@csr.com> Signed-off-by: David Vrabel <david.vrabel@csr.com>
Diffstat (limited to 'drivers/uwb/rsv.c')
-rw-r--r--drivers/uwb/rsv.c482
1 files changed, 382 insertions, 100 deletions
diff --git a/drivers/uwb/rsv.c b/drivers/uwb/rsv.c
index 1cd84f927540..165aec6a8f97 100644
--- a/drivers/uwb/rsv.c
+++ b/drivers/uwb/rsv.c
@@ -17,20 +17,31 @@
*/
#include <linux/kernel.h>
#include <linux/uwb.h>
+#include <linux/random.h>
#include "uwb-internal.h"
static void uwb_rsv_timer(unsigned long arg);
static const char *rsv_states[] = {
- [UWB_RSV_STATE_NONE] = "none",
- [UWB_RSV_STATE_O_INITIATED] = "initiated",
- [UWB_RSV_STATE_O_PENDING] = "pending",
- [UWB_RSV_STATE_O_MODIFIED] = "modified",
- [UWB_RSV_STATE_O_ESTABLISHED] = "established",
- [UWB_RSV_STATE_T_ACCEPTED] = "accepted",
- [UWB_RSV_STATE_T_DENIED] = "denied",
- [UWB_RSV_STATE_T_PENDING] = "pending",
+ [UWB_RSV_STATE_NONE] = "none ",
+ [UWB_RSV_STATE_O_INITIATED] = "o initiated ",
+ [UWB_RSV_STATE_O_PENDING] = "o pending ",
+ [UWB_RSV_STATE_O_MODIFIED] = "o modified ",
+ [UWB_RSV_STATE_O_ESTABLISHED] = "o established ",
+ [UWB_RSV_STATE_O_TO_BE_MOVED] = "o to be moved ",
+ [UWB_RSV_STATE_O_MOVE_EXPANDING] = "o move expanding",
+ [UWB_RSV_STATE_O_MOVE_COMBINING] = "o move combining",
+ [UWB_RSV_STATE_O_MOVE_REDUCING] = "o move reducing ",
+ [UWB_RSV_STATE_T_ACCEPTED] = "t accepted ",
+ [UWB_RSV_STATE_T_CONFLICT] = "t conflict ",
+ [UWB_RSV_STATE_T_PENDING] = "t pending ",
+ [UWB_RSV_STATE_T_DENIED] = "t denied ",
+ [UWB_RSV_STATE_T_RESIZED] = "t resized ",
+ [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = "t expanding acc ",
+ [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = "t expanding conf",
+ [UWB_RSV_STATE_T_EXPANDING_PENDING] = "t expanding pend",
+ [UWB_RSV_STATE_T_EXPANDING_DENIED] = "t expanding den ",
};
static const char *rsv_types[] = {
@@ -41,6 +52,31 @@ static const char *rsv_types[] = {
[UWB_DRP_TYPE_PCA] = "pca",
};
+bool uwb_rsv_has_two_drp_ies(struct uwb_rsv *rsv)
+{
+ static const bool has_two_drp_ies[] = {
+ [UWB_RSV_STATE_O_INITIATED] = false,
+ [UWB_RSV_STATE_O_PENDING] = false,
+ [UWB_RSV_STATE_O_MODIFIED] = false,
+ [UWB_RSV_STATE_O_ESTABLISHED] = false,
+ [UWB_RSV_STATE_O_TO_BE_MOVED] = false,
+ [UWB_RSV_STATE_O_MOVE_COMBINING] = false,
+ [UWB_RSV_STATE_O_MOVE_REDUCING] = false,
+ [UWB_RSV_STATE_O_MOVE_EXPANDING] = true,
+ [UWB_RSV_STATE_T_ACCEPTED] = false,
+ [UWB_RSV_STATE_T_CONFLICT] = false,
+ [UWB_RSV_STATE_T_PENDING] = false,
+ [UWB_RSV_STATE_T_DENIED] = false,
+ [UWB_RSV_STATE_T_RESIZED] = false,
+ [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = true,
+ [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = true,
+ [UWB_RSV_STATE_T_EXPANDING_PENDING] = true,
+ [UWB_RSV_STATE_T_EXPANDING_DENIED] = true,
+ };
+
+ return has_two_drp_ies[rsv->state];
+}
+
/**
* uwb_rsv_state_str - return a string for a reservation state
* @state: the reservation state.
@@ -65,7 +101,7 @@ const char *uwb_rsv_type_str(enum uwb_drp_type type)
}
EXPORT_SYMBOL_GPL(uwb_rsv_type_str);
-static void uwb_rsv_dump(struct uwb_rsv *rsv)
+void uwb_rsv_dump(char *text, struct uwb_rsv *rsv)
{
struct device *dev = &rsv->rc->uwb_dev.dev;
struct uwb_dev_addr devaddr;
@@ -88,12 +124,12 @@ static void uwb_rsv_release(struct kref *kref)
kfree(rsv);
}
-static void uwb_rsv_get(struct uwb_rsv *rsv)
+void uwb_rsv_get(struct uwb_rsv *rsv)
{
kref_get(&rsv->kref);
}
-static void uwb_rsv_put(struct uwb_rsv *rsv)
+void uwb_rsv_put(struct uwb_rsv *rsv)
{
kref_put(&rsv->kref, uwb_rsv_release);
}
@@ -108,6 +144,7 @@ static void uwb_rsv_put(struct uwb_rsv *rsv)
static int uwb_rsv_get_stream(struct uwb_rsv *rsv)
{
struct uwb_rc *rc = rsv->rc;
+ struct device *dev = &rc->uwb_dev.dev;
unsigned long *streams_bm;
int stream;
@@ -129,12 +166,15 @@ static int uwb_rsv_get_stream(struct uwb_rsv *rsv)
rsv->stream = stream;
set_bit(stream, streams_bm);
+ dev_dbg(dev, "get stream %d\n", rsv->stream);
+
return 0;
}
static void uwb_rsv_put_stream(struct uwb_rsv *rsv)
{
struct uwb_rc *rc = rsv->rc;
+ struct device *dev = &rc->uwb_dev.dev;
unsigned long *streams_bm;
switch (rsv->target.type) {
@@ -149,86 +189,52 @@ static void uwb_rsv_put_stream(struct uwb_rsv *rsv)
}
clear_bit(rsv->stream, streams_bm);
+
+ dev_dbg(dev, "put stream %d\n", rsv->stream);
}
-/*
- * Generate a MAS allocation with a single row component.
- */
-static void uwb_rsv_gen_alloc_row(struct uwb_mas_bm *mas,
- int first_mas, int mas_per_zone,
- int zs, int ze)
+void uwb_rsv_backoff_win_timer(unsigned long arg)
{
- struct uwb_mas_bm col;
- int z;
+ struct uwb_drp_backoff_win *bow = (struct uwb_drp_backoff_win *)arg;
+ struct uwb_rc *rc = container_of(bow, struct uwb_rc, bow);
+ struct device *dev = &rc->uwb_dev.dev;
- bitmap_zero(mas->bm, UWB_NUM_MAS);
- bitmap_zero(col.bm, UWB_NUM_MAS);
- bitmap_fill(col.bm, mas_per_zone);
- bitmap_shift_left(col.bm, col.bm, first_mas + zs * UWB_MAS_PER_ZONE, UWB_NUM_MAS);
-
- for (z = zs; z <= ze; z++) {
- bitmap_or(mas->bm, mas->bm, col.bm, UWB_NUM_MAS);
- bitmap_shift_left(col.bm, col.bm, UWB_MAS_PER_ZONE, UWB_NUM_MAS);
+ bow->can_reserve_extra_mases = true;
+ if (bow->total_expired <= 4) {
+ bow->total_expired++;
+ } else {
+ /* after 4 backoff window has expired we can exit from
+ * the backoff procedure */
+ bow->total_expired = 0;
+ bow->window = UWB_DRP_BACKOFF_WIN_MIN >> 1;
}
+ dev_dbg(dev, "backoff_win_timer total_expired=%d, n=%d\n: ", bow->total_expired, bow->n);
+
+ /* try to relocate all the "to be moved" relocations */
+ uwb_rsv_handle_drp_avail_change(rc);
}
-/*
- * Allocate some MAS for this reservation based on current local
- * availability, the reservation parameters (max_mas, min_mas,
- * sparsity), and the WiMedia rules for MAS allocations.
- *
- * Returns -EBUSY is insufficient free MAS are available.
- *
- * FIXME: to simplify this, only safe reservations with a single row
- * component in zones 1 to 15 are tried (zone 0 is skipped to avoid
- * problems with the MAS reserved for the BP).
- *
- * [ECMA-368] section B.2.
- */
-static int uwb_rsv_alloc_mas(struct uwb_rsv *rsv)
+void uwb_rsv_backoff_win_increment(struct uwb_rc *rc)
{
- static const int safe_mas_in_row[UWB_NUM_ZONES] = {
- 8, 7, 6, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 2, 1,
- };
- int n, r;
- struct uwb_mas_bm mas;
- bool found = false;
+ struct uwb_drp_backoff_win *bow = &rc->bow;
+ struct device *dev = &rc->uwb_dev.dev;
+ unsigned timeout_us;
- /*
- * Search all valid safe allocations until either: too few MAS
- * are available; or the smallest allocation with sufficient
- * MAS is found.
- *
- * The top of the zones are preferred, so space for larger
- * allocations is available in the bottom of the zone (e.g., a
- * 15 MAS allocation should start in row 14 leaving space for
- * a 120 MAS allocation at row 0).
- */
- for (n = safe_mas_in_row[0]; n >= 1; n--) {
- int num_mas;
+ dev_dbg(dev, "backoff_win_increment: window=%d\n", bow->window);
- num_mas = n * (UWB_NUM_ZONES - 1);
- if (num_mas < rsv->min_mas)
- break;
- if (found && num_mas < rsv->max_mas)
- break;
+ bow->can_reserve_extra_mases = false;
- for (r = UWB_MAS_PER_ZONE-1; r >= 0; r--) {
- if (safe_mas_in_row[r] < n)
- continue;
- uwb_rsv_gen_alloc_row(&mas, r, n, 1, UWB_NUM_ZONES);
- if (uwb_drp_avail_reserve_pending(rsv->rc, &mas) == 0) {
- found = true;
- break;
- }
- }
- }
+ if((bow->window << 1) == UWB_DRP_BACKOFF_WIN_MAX)
+ return;
- if (!found)
- return -EBUSY;
+ bow->window <<= 1;
+ bow->n = random32() & (bow->window - 1);
+ dev_dbg(dev, "new_window=%d, n=%d\n: ", bow->window, bow->n);
- bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS);
- return 0;
+ /* reset the timer associated variables */
+ timeout_us = bow->n * UWB_SUPERFRAME_LENGTH_US;
+ bow->total_expired = 0;
+ mod_timer(&bow->timer, jiffies + usecs_to_jiffies(timeout_us));
}
static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv)
@@ -241,13 +247,16 @@ static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv)
* received.
*/
if (rsv->is_multicast) {
- if (rsv->state == UWB_RSV_STATE_O_INITIATED)
+ if (rsv->state == UWB_RSV_STATE_O_INITIATED
+ || rsv->state == UWB_RSV_STATE_O_MOVE_EXPANDING
+ || rsv->state == UWB_RSV_STATE_O_MOVE_COMBINING
+ || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING)
sframes = 1;
if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED)
sframes = 0;
+
}
- rsv->expired = false;
if (sframes > 0) {
/*
* Add an additional 2 superframes to account for the
@@ -269,7 +278,7 @@ static void uwb_rsv_state_update(struct uwb_rsv *rsv,
rsv->state = new_state;
rsv->ie_valid = false;
- uwb_rsv_dump(rsv);
+ uwb_rsv_dump("SU", rsv);
uwb_rsv_stroke_timer(rsv);
uwb_rsv_sched_update(rsv->rc);
@@ -283,10 +292,17 @@ static void uwb_rsv_callback(struct uwb_rsv *rsv)
void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state)
{
+ struct uwb_rsv_move *mv = &rsv->mv;
+
if (rsv->state == new_state) {
switch (rsv->state) {
case UWB_RSV_STATE_O_ESTABLISHED:
+ case UWB_RSV_STATE_O_MOVE_EXPANDING:
+ case UWB_RSV_STATE_O_MOVE_COMBINING:
+ case UWB_RSV_STATE_O_MOVE_REDUCING:
case UWB_RSV_STATE_T_ACCEPTED:
+ case UWB_RSV_STATE_T_EXPANDING_ACCEPTED:
+ case UWB_RSV_STATE_T_RESIZED:
case UWB_RSV_STATE_NONE:
uwb_rsv_stroke_timer(rsv);
break;
@@ -298,11 +314,10 @@ void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state)
return;
}
+ uwb_rsv_dump("SC", rsv);
+
switch (new_state) {
case UWB_RSV_STATE_NONE:
- uwb_drp_avail_release(rsv->rc, &rsv->mas);
- if (uwb_rsv_is_owner(rsv))
- uwb_rsv_put_stream(rsv);
uwb_rsv_state_update(rsv, UWB_RSV_STATE_NONE);
uwb_rsv_callback(rsv);
break;
@@ -312,12 +327,45 @@ void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state)
case UWB_RSV_STATE_O_PENDING:
uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_PENDING);
break;
+ case UWB_RSV_STATE_O_MODIFIED:
+ /* in the companion there are the MASes to drop */
+ bitmap_andnot(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS);
+ uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MODIFIED);
+ break;
case UWB_RSV_STATE_O_ESTABLISHED:
+ if (rsv->state == UWB_RSV_STATE_O_MODIFIED
+ || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING) {
+ uwb_drp_avail_release(rsv->rc, &mv->companion_mas);
+ rsv->needs_release_companion_mas = false;
+ }
uwb_drp_avail_reserve(rsv->rc, &rsv->mas);
uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_ESTABLISHED);
uwb_rsv_callback(rsv);
break;
+ case UWB_RSV_STATE_O_MOVE_EXPANDING:
+ rsv->needs_release_companion_mas = true;
+ uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING);
+ break;
+ case UWB_RSV_STATE_O_MOVE_COMBINING:
+ rsv->needs_release_companion_mas = false;
+ uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas);
+ bitmap_or(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS);
+ rsv->mas.safe += mv->companion_mas.safe;
+ rsv->mas.unsafe += mv->companion_mas.unsafe;
+ uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
+ break;
+ case UWB_RSV_STATE_O_MOVE_REDUCING:
+ bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS);
+ rsv->needs_release_companion_mas = true;
+ rsv->mas.safe = mv->final_mas.safe;
+ rsv->mas.unsafe = mv->final_mas.unsafe;
+ bitmap_copy(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS);
+ bitmap_copy(rsv->mas.unsafe_bm, mv->final_mas.unsafe_bm, UWB_NUM_MAS);
+ uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
+ break;
case UWB_RSV_STATE_T_ACCEPTED:
+ case UWB_RSV_STATE_T_RESIZED:
+ rsv->needs_release_companion_mas = false;
uwb_drp_avail_reserve(rsv->rc, &rsv->mas);
uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_ACCEPTED);
uwb_rsv_callback(rsv);
@@ -325,12 +373,82 @@ void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state)
case UWB_RSV_STATE_T_DENIED:
uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_DENIED);
break;
+ case UWB_RSV_STATE_T_CONFLICT:
+ uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_CONFLICT);
+ break;
+ case UWB_RSV_STATE_T_PENDING:
+ uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_PENDING);
+ break;
+ case UWB_RSV_STATE_T_EXPANDING_ACCEPTED:
+ rsv->needs_release_companion_mas = true;
+ uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas);
+ uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
+ break;
default:
dev_err(&rsv->rc->uwb_dev.dev, "unhandled state: %s (%d)\n",
uwb_rsv_state_str(new_state), new_state);
}
}
+static void uwb_rsv_handle_timeout_work(struct work_struct *work)
+{
+ struct uwb_rsv *rsv = container_of(work, struct uwb_rsv,
+ handle_timeout_work);
+ struct uwb_rc *rc = rsv->rc;
+
+ mutex_lock(&rc->rsvs_mutex);
+
+ uwb_rsv_dump("TO", rsv);
+
+ switch (rsv->state) {
+ case UWB_RSV_STATE_O_INITIATED:
+ if (rsv->is_multicast) {
+ uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
+ goto unlock;
+ }
+ break;
+ case UWB_RSV_STATE_O_MOVE_EXPANDING:
+ if (rsv->is_multicast) {
+ uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
+ goto unlock;
+ }
+ break;
+ case UWB_RSV_STATE_O_MOVE_COMBINING:
+ if (rsv->is_multicast) {
+ uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
+ goto unlock;
+ }
+ break;
+ case UWB_RSV_STATE_O_MOVE_REDUCING:
+ if (rsv->is_multicast) {
+ uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
+ goto unlock;
+ }
+ break;
+ case UWB_RSV_STATE_O_ESTABLISHED:
+ if (rsv->is_multicast)
+ goto unlock;
+ break;
+ case UWB_RSV_STATE_T_EXPANDING_ACCEPTED:
+ /*
+ * The time out could be for the main or of the
+ * companion DRP, assume it's for the companion and
+ * drop that first. A further time out is required to
+ * drop the main.
+ */
+ uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED);
+ uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);
+ goto unlock;
+ default:
+ break;
+ }
+
+ uwb_rsv_remove(rsv);
+
+unlock:
+ mutex_unlock(&rc->rsvs_mutex);
+}
+
static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc)
{
struct uwb_rsv *rsv;
@@ -347,6 +465,7 @@ static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc)
rsv->timer.data = (unsigned long)rsv;
rsv->rc = rc;
+ INIT_WORK(&rsv->handle_timeout_work, uwb_rsv_handle_timeout_work);
return rsv;
}
@@ -381,8 +500,18 @@ EXPORT_SYMBOL_GPL(uwb_rsv_create);
void uwb_rsv_remove(struct uwb_rsv *rsv)
{
+ uwb_rsv_dump("RM", rsv);
+
if (rsv->state != UWB_RSV_STATE_NONE)
uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
+
+ if (rsv->needs_release_companion_mas)
+ uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);
+ uwb_drp_avail_release(rsv->rc, &rsv->mas);
+
+ if (uwb_rsv_is_owner(rsv))
+ uwb_rsv_put_stream(rsv);
+
del_timer_sync(&rsv->timer);
uwb_dev_put(rsv->owner);
if (rsv->target.type == UWB_RSV_TARGET_DEV)
@@ -409,7 +538,7 @@ EXPORT_SYMBOL_GPL(uwb_rsv_destroy);
* @rsv: the reservation
*
* The PAL should fill in @rsv's owner, target, type, max_mas,
- * min_mas, sparsity and is_multicast fields. If the target is a
+ * min_mas, max_interval and is_multicast fields. If the target is a
* uwb_dev it must be referenced.
*
* The reservation's callback will be called when the reservation is
@@ -418,16 +547,27 @@ EXPORT_SYMBOL_GPL(uwb_rsv_destroy);
int uwb_rsv_establish(struct uwb_rsv *rsv)
{
struct uwb_rc *rc = rsv->rc;
+ struct uwb_mas_bm available;
int ret;
mutex_lock(&rc->rsvs_mutex);
-
ret = uwb_rsv_get_stream(rsv);
if (ret)
goto out;
- ret = uwb_rsv_alloc_mas(rsv);
- if (ret) {
+ rsv->tiebreaker = random32() & 1;
+ /* get available mas bitmap */
+ uwb_drp_available(rc, &available);
+
+ ret = uwb_rsv_find_best_allocation(rsv, &available, &rsv->mas);
+ if (ret == UWB_RSV_ALLOC_NOT_FOUND) {
+ ret = -EBUSY;
+ uwb_rsv_put_stream(rsv);
+ goto out;
+ }
+
+ ret = uwb_drp_avail_reserve_pending(rc, &rsv->mas);
+ if (ret != 0) {
uwb_rsv_put_stream(rsv);
goto out;
}
@@ -448,16 +588,71 @@ EXPORT_SYMBOL_GPL(uwb_rsv_establish);
* @rsv: the reservation to modify
* @max_mas: new maximum MAS to reserve
* @min_mas: new minimum MAS to reserve
- * @sparsity: new sparsity to use
+ * @max_interval: new max_interval to use
*
* FIXME: implement this once there are PALs that use it.
*/
-int uwb_rsv_modify(struct uwb_rsv *rsv, int max_mas, int min_mas, int sparsity)
+int uwb_rsv_modify(struct uwb_rsv *rsv, int max_mas, int min_mas, int max_interval)
{
return -ENOSYS;
}
EXPORT_SYMBOL_GPL(uwb_rsv_modify);
+/*
+ * move an already established reservation (rc->rsvs_mutex must to be
+ * taken when tis function is called)
+ */
+int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available)
+{
+ struct uwb_rc *rc = rsv->rc;
+ struct uwb_drp_backoff_win *bow = &rc->bow;
+ struct device *dev = &rc->uwb_dev.dev;
+ struct uwb_rsv_move *mv;
+ int ret = 0;
+
+ if (bow->can_reserve_extra_mases == false)
+ return -EBUSY;
+
+ mv = &rsv->mv;
+
+ if (uwb_rsv_find_best_allocation(rsv, available, &mv->final_mas) == UWB_RSV_ALLOC_FOUND) {
+
+ if (!bitmap_equal(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS)) {
+ /* We want to move the reservation */
+ bitmap_andnot(mv->companion_mas.bm, mv->final_mas.bm, rsv->mas.bm, UWB_NUM_MAS);
+ uwb_drp_avail_reserve_pending(rc, &mv->companion_mas);
+ uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING);
+ }
+ } else {
+ dev_dbg(dev, "new allocation not found\n");
+ }
+
+ return ret;
+}
+
+/* It will try to move every reservation in state O_ESTABLISHED giving
+ * to the MAS allocator algorithm an availability that is the real one
+ * plus the allocation already established from the reservation. */
+void uwb_rsv_handle_drp_avail_change(struct uwb_rc *rc)
+{
+ struct uwb_drp_backoff_win *bow = &rc->bow;
+ struct uwb_rsv *rsv;
+ struct uwb_mas_bm mas;
+
+ if (bow->can_reserve_extra_mases == false)
+ return;
+
+ list_for_each_entry(rsv, &rc->reservations, rc_node) {
+ if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED ||
+ rsv->state == UWB_RSV_STATE_O_TO_BE_MOVED) {
+ uwb_drp_available(rc, &mas);
+ bitmap_or(mas.bm, mas.bm, rsv->mas.bm, UWB_NUM_MAS);
+ uwb_rsv_try_move(rsv, &mas);
+ }
+ }
+
+}
+
/**
* uwb_rsv_terminate - terminate an established reservation
* @rsv: the reservation to terminate
@@ -546,6 +741,7 @@ static struct uwb_rsv *uwb_rsv_new_target(struct uwb_rc *rc,
uwb_dev_get(rsv->owner);
rsv->target.type = UWB_RSV_TARGET_DEV;
rsv->target.dev = &rc->uwb_dev;
+ uwb_dev_get(&rc->uwb_dev);
rsv->type = uwb_ie_drp_type(drp_ie);
rsv->stream = uwb_ie_drp_stream_index(drp_ie);
uwb_drp_ie_to_bm(&rsv->mas, drp_ie);
@@ -567,12 +763,34 @@ static struct uwb_rsv *uwb_rsv_new_target(struct uwb_rc *rc,
list_add_tail(&rsv->rc_node, &rc->reservations);
state = rsv->state;
rsv->state = UWB_RSV_STATE_NONE;
- uwb_rsv_set_state(rsv, state);
+
+ /* FIXME: do something sensible here */
+ if (state == UWB_RSV_STATE_T_ACCEPTED
+ && uwb_drp_avail_reserve_pending(rc, &rsv->mas) == -EBUSY) {
+ /* FIXME: do something sensible here */
+ } else {
+ uwb_rsv_set_state(rsv, state);
+ }
return rsv;
}
/**
+ * uwb_rsv_get_usable_mas - get the bitmap of the usable MAS of a reservations
+ * @rsv: the reservation.
+ * @mas: returns the available MAS.
+ *
+ * The usable MAS of a reservation may be less than the negotiated MAS
+ * if alien BPs are present.
+ */
+void uwb_rsv_get_usable_mas(struct uwb_rsv *rsv, struct uwb_mas_bm *mas)
+{
+ bitmap_zero(mas->bm, UWB_NUM_MAS);
+ bitmap_andnot(mas->bm, rsv->mas.bm, rsv->rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS);
+}
+EXPORT_SYMBOL_GPL(uwb_rsv_get_usable_mas);
+
+/**
* uwb_rsv_find - find a reservation for a received DRP IE.
* @rc: the radio controller
* @src: source of the DRP IE
@@ -611,8 +829,6 @@ static bool uwb_rsv_update_all(struct uwb_rc *rc)
bool ie_updated = false;
list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) {
- if (rsv->expired)
- uwb_drp_handle_timeout(rsv);
if (!rsv->ie_valid) {
uwb_drp_ie_update(rsv);
ie_updated = true;
@@ -622,9 +838,47 @@ static bool uwb_rsv_update_all(struct uwb_rc *rc)
return ie_updated;
}
+void uwb_rsv_queue_update(struct uwb_rc *rc)
+{
+ unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
+
+ queue_delayed_work(rc->rsv_workq, &rc->rsv_update_work, usecs_to_jiffies(delay_us));
+}
+
+/**
+ * uwb_rsv_sched_update - schedule an update of the DRP IEs
+ * @rc: the radio controller.
+ *
+ * To improve performance and ensure correctness with [ECMA-368] the
+ * number of SET-DRP-IE commands that are done are limited.
+ *
+ * DRP IEs update come from two sources: DRP events from the hardware
+ * which all occur at the beginning of the superframe ('syncronous'
+ * events) and reservation establishment/termination requests from
+ * PALs or timers ('asynchronous' events).
+ *
+ * A delayed work ensures that all the synchronous events result in
+ * one SET-DRP-IE command.
+ *
+ * Additional logic (the set_drp_ie_pending and rsv_updated_postponed
+ * flags) will prevent an asynchrous event starting a SET-DRP-IE
+ * command if one is currently awaiting a response.
+ *
+ * FIXME: this does leave a window where an asynchrous event can delay
+ * the SET-DRP-IE for a synchronous event by one superframe.
+ */
void uwb_rsv_sched_update(struct uwb_rc *rc)
{
- queue_work(rc->rsv_workq, &rc->rsv_update_work);
+ spin_lock(&rc->rsvs_lock);
+ if (!delayed_work_pending(&rc->rsv_update_work)) {
+ if (rc->set_drp_ie_pending > 0) {
+ rc->set_drp_ie_pending++;
+ goto unlock;
+ }
+ uwb_rsv_queue_update(rc);
+ }
+unlock:
+ spin_unlock(&rc->rsvs_lock);
}
/*
@@ -633,7 +887,8 @@ void uwb_rsv_sched_update(struct uwb_rc *rc)
*/
static void uwb_rsv_update_work(struct work_struct *work)
{
- struct uwb_rc *rc = container_of(work, struct uwb_rc, rsv_update_work);
+ struct uwb_rc *rc = container_of(work, struct uwb_rc,
+ rsv_update_work.work);
bool ie_updated;
mutex_lock(&rc->rsvs_mutex);
@@ -645,18 +900,34 @@ static void uwb_rsv_update_work(struct work_struct *work)
ie_updated = true;
}
- if (ie_updated)
+ if (ie_updated && (rc->set_drp_ie_pending == 0))
uwb_rc_send_all_drp_ie(rc);
mutex_unlock(&rc->rsvs_mutex);
}
+static void uwb_rsv_alien_bp_work(struct work_struct *work)
+{
+ struct uwb_rc *rc = container_of(work, struct uwb_rc,
+ rsv_alien_bp_work.work);
+ struct uwb_rsv *rsv;
+
+ mutex_lock(&rc->rsvs_mutex);
+
+ list_for_each_entry(rsv, &rc->reservations, rc_node) {
+ if (rsv->type != UWB_DRP_TYPE_ALIEN_BP) {
+ rsv->callback(rsv);
+ }
+ }
+
+ mutex_unlock(&rc->rsvs_mutex);
+}
+
static void uwb_rsv_timer(unsigned long arg)
{
struct uwb_rsv *rsv = (struct uwb_rsv *)arg;
- rsv->expired = true;
- uwb_rsv_sched_update(rsv->rc);
+ queue_work(rsv->rc->rsv_workq, &rsv->handle_timeout_work);
}
/**
@@ -673,16 +944,27 @@ void uwb_rsv_remove_all(struct uwb_rc *rc)
list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) {
uwb_rsv_remove(rsv);
}
+ /* Cancel any postponed update. */
+ rc->set_drp_ie_pending = 0;
mutex_unlock(&rc->rsvs_mutex);
- cancel_work_sync(&rc->rsv_update_work);
+ cancel_delayed_work_sync(&rc->rsv_update_work);
}
void uwb_rsv_init(struct uwb_rc *rc)
{
INIT_LIST_HEAD(&rc->reservations);
+ INIT_LIST_HEAD(&rc->cnflt_alien_list);
mutex_init(&rc->rsvs_mutex);
- INIT_WORK(&rc->rsv_update_work, uwb_rsv_update_work);
+ spin_lock_init(&rc->rsvs_lock);
+ INIT_DELAYED_WORK(&rc->rsv_update_work, uwb_rsv_update_work);
+ INIT_DELAYED_WORK(&rc->rsv_alien_bp_work, uwb_rsv_alien_bp_work);
+ rc->bow.can_reserve_extra_mases = true;
+ rc->bow.total_expired = 0;
+ rc->bow.window = UWB_DRP_BACKOFF_WIN_MIN >> 1;
+ init_timer(&rc->bow.timer);
+ rc->bow.timer.function = uwb_rsv_backoff_win_timer;
+ rc->bow.timer.data = (unsigned long)&rc->bow;
bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS);
}