summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNicolas Schodet2012-03-16 13:42:38 +0100
committerNicolas Schodet2012-03-22 11:12:45 +0100
commitbf2612398e770561aedd8b54c7f0a115ab8efcbf (patch)
treef2fda4560fd2d390af010fd2a3e95f91d38060df
parentf5ad8b9437321134b47f1f059f8bbaacaa1c94cd (diff)
cesar/hal/arch: use arch_atomic_add_lock for reference counting, closes #2927
-rw-r--r--cesar/hal/arch/arch.h37
-rw-r--r--cesar/hal/arch/inc/generic.h6
-rw-r--r--cesar/hal/arch/inc/sparc.h19
-rw-r--r--cesar/hal/arch/test/atomic/src/test_atomic.c95
-rw-r--r--cesar/lib/src/blk.c6
-rw-r--r--cesar/lib/src/slab.c4
-rw-r--r--cesar/lib/src/trace.c4
-rw-r--r--cesar/mac/pbproc/src/mfs.c7
8 files changed, 168 insertions, 10 deletions
diff --git a/cesar/hal/arch/arch.h b/cesar/hal/arch/arch.h
index a1ad0d96fa..12c6f89acd 100644
--- a/cesar/hal/arch/arch.h
+++ b/cesar/hal/arch/arch.h
@@ -144,10 +144,47 @@ arch_update_cache (u32 *addr, uint n);
* \param p pointer to value to change
* \param d difference to add
* \return the new value
+ *
+ * This function is not truly atomic because the value may take intermediary
+ * surprising values. Here are the guaranties:
+ *
+ * - Once every arch_atomic_add are left, the value is good. This means that
+ * the new value (after the last arch_atomic_add left) is the sum of the
+ * old value (before the first arch_atomic_add is entered) and all deltas.
+ * - Therefore, if A can be interrupted by B, but B can not be interrupted by
+ * A:
+ * - values read by A are always good,
+ * - values read by B are undefined (the sum of the old value and a
+ * combination of all deltas since the first arch_atomic_add).
+ *
+ * To summarize: if you use the value in an interrupting context, the use of
+ * this function might be dangerous. Here, an interrupting context is
+ * anything like ISR, DSR or a task with higher priority.
*/
extern inline int
arch_atomic_add (volatile int *p, int d);
+/**
+ * Atomically add an integer value, regarding interrupts, but never return
+ * zero if the value is still evolving.
+ * \param p pointer to value to change, limited to 24 bits
+ * \param d difference to add
+ * \return the new value, or a non zero value if still evolving
+ *
+ * See arch_atomic_add. This function is a variant which adds a guarantee to
+ * the "undefined" case:
+ *
+ * - if A can be interrupted by B, but B can not be interrupted by A, the
+ * returned value is either:
+ * - the good value if A is not in arch_atomic_add_lock,
+ * - a non zero value if A is in arch_atomic_add_lock.
+ *
+ * This is fine for a reference counter for which only the zero value will
+ * trigger an action.
+ */
+extern inline int
+arch_atomic_add_lock (volatile int *p, int d);
+
#if defined (ECOS) && ECOS
# include "hal/arch/inc/ecos.h"
#else
diff --git a/cesar/hal/arch/inc/generic.h b/cesar/hal/arch/inc/generic.h
index bd823105bb..9825e17111 100644
--- a/cesar/hal/arch/inc/generic.h
+++ b/cesar/hal/arch/inc/generic.h
@@ -46,6 +46,12 @@ arch_atomic_add (volatile int *p, int d)
return v;
}
+extern inline int
+arch_atomic_add_lock (volatile int *p, int d)
+{
+ return arch_atomic_add (p, d);
+}
+
extern inline void
arch_write_buffer_flush (void)
{
diff --git a/cesar/hal/arch/inc/sparc.h b/cesar/hal/arch/inc/sparc.h
index beafd42c3b..379c318623 100644
--- a/cesar/hal/arch/inc/sparc.h
+++ b/cesar/hal/arch/inc/sparc.h
@@ -159,4 +159,23 @@ arch_atomic_add (volatile int *p, int d)
return n;
}
+extern inline int
+arch_atomic_add_lock (volatile int *p, int d)
+{
+ int o, n, t, l;
+ __asm__ (" ldstub %[p], %[l]" "\n\t"
+ " ld %[p], %[o]" "\n\t"
+ "1: add %[o], %[d], %[n]" "\n\t"
+ " mov %[n], %[t]" "\n\t"
+ " swap %[p], %[t]" "\n\t"
+ " subcc %[t], %[o], %[d]" "\n\t"
+ " bne,a 1b" "\n\t"
+ " mov %[n], %[o]" "\n\t"
+ " stub %[l], %[p]" "\n\t"
+ : [p] "=m" (*p), [d] "=r" (d), [o] "=&r" (o), [n] "=&r" (n), [t] "=&r" (t), [l] "=&r" (l)
+ : "1" (d)
+ );
+ return *p;
+}
+
#endif /* hal_arch_inc_sparc_h */
diff --git a/cesar/hal/arch/test/atomic/src/test_atomic.c b/cesar/hal/arch/test/atomic/src/test_atomic.c
index 0707ff06f3..cbf51252e3 100644
--- a/cesar/hal/arch/test/atomic/src/test_atomic.c
+++ b/cesar/hal/arch/test/atomic/src/test_atomic.c
@@ -50,6 +50,7 @@ struct atomic_algo_coroutine_t
int state;
int *p, d;
int o, n, t;
+ uint l;
};
typedef struct atomic_algo_coroutine_t atomic_algo_coroutine_t;
@@ -96,12 +97,56 @@ bne:
return ctx->n;
}
+#define LDSTUB(m, r) do { \
+ r = (uint) m >> 24; \
+ m |= 0xff << 24; \
+} while (0)
+
+#define STUB(m, r) do { \
+ m = (m & 0xffffff) | (r << 24); \
+} while (0)
+
+static int
+atomic_algo_lock_coroutine (atomic_algo_coroutine_t *ctx, int *p_, int d_)
+{
+ /* The assembly algorithm: */
+ COROUTINE_BEGIN (ctx);
+ ctx->p = p_; ctx->d = d_;
+#define v (*ctx->p)
+#define d (ctx->d)
+#define o (ctx->o)
+#define n (ctx->n)
+#define t (ctx->t)
+#define l (ctx->l)
+ LDSTUB (v, l); COROUTINE_RETURN (ctx, 0);
+ o = v; COROUTINE_RETURN (ctx, 0);
+bne:
+ n = o + d; COROUTINE_RETURN (ctx, 0);
+ t = n; COROUTINE_RETURN (ctx, 0);
+ XCH (t, v); COROUTINE_RETURN (ctx, 0);
+ d = t - o; COROUTINE_RETURN (ctx, 0);
+ if (d != 0)
+ { COROUTINE_RETURN (ctx, 0);
+ o = n; COROUTINE_RETURN (ctx, 0);
+ goto bne;
+ }
+ STUB (v, l); COROUTINE_RETURN (ctx, 0);
+#undef l
+#undef t
+#undef n
+#undef o
+#undef d
+#undef v
+ COROUTINE_END (ctx);
+ return *ctx->p;
+}
+
void
atomic_algo_test_case (test_t t)
{
int v, vr, d1, d2, r1, r2;
uint i, j, wait;
- bool done1, done2, it;
+ bool done1, done2, it, done1_first;
lib_rnd_t rnd[1];
test_case_begin (t, "algo");
lib_rnd_init (rnd, 1234);
@@ -143,6 +188,43 @@ atomic_algo_test_case (test_t t)
test_fail_unless (v == vr);
}
} test_end;
+ test_begin (t, "add lock")
+ {
+ for (i = 0; i < NB_ITER; i++)
+ {
+ atomic_algo_coroutine_t cr1, cr2;
+ COROUTINE_INIT (&cr1);
+ COROUTINE_INIT (&cr2);
+ v = vr = LIMIT (lib_rnd32 (rnd)) & 0xffff;
+ d1 = (LIMIT (lib_rnd32 (rnd)) & 0xffff) + 1;
+ d2 = (LIMIT (lib_rnd32 (rnd)) & 0xffff) + 1;
+ wait = lib_rnd_uniform (rnd, 16);
+ it = lib_rnd_flip_coin (rnd, LIB_RND_RATIO (0.8));
+ vr += d1 + d2;
+ done1 = done2 = false;
+ do
+ {
+ /* No intermixing. */
+ if (!done1 && (wait || done2))
+ {
+ r1 = atomic_algo_lock_coroutine (&cr1, &v, d1);
+ done1 = COROUTINE_FINISH (&cr1);
+ done1_first = done1 && !done2;
+ }
+ if (wait)
+ wait--;
+ else if (!done2)
+ {
+ r2 = atomic_algo_lock_coroutine (&cr2, &v, d2);
+ done2 = COROUTINE_FINISH (&cr2);
+ }
+ if (it && !done1)
+ atomic_algo_may_change (rnd, &v, &vr);
+ } while (wait || !done1 || !done2);
+ test_fail_unless (done1_first ? r2 == vr : r1 == vr);
+ test_fail_unless (v == vr);
+ }
+ } test_end;
}
void
@@ -167,6 +249,17 @@ atomic_impl_test_case (test_t t)
test_fail_unless (v == v2);
}
} test_end;
+ test_begin (t, "add_lock")
+ {
+ for (i = 0; i < NB_ITER; i++)
+ {
+ v = v2 = LIMIT (lib_rnd32 (rnd)) & 0xfff;
+ d = (LIMIT (lib_rnd32 (rnd)) & 0xfff) + 1;
+ v2 += d;
+ arch_atomic_add_lock (&v, d);
+ test_fail_unless (v == v2);
+ }
+ } test_end;
}
void
diff --git a/cesar/lib/src/blk.c b/cesar/lib/src/blk.c
index da596756de..b378a1fe03 100644
--- a/cesar/lib/src/blk.c
+++ b/cesar/lib/src/blk.c
@@ -281,7 +281,7 @@ blk_addref_desc_ (blk_t *blk __FL)
dbg_blame_ptr (blk);
dbg_blame (DATA_TO_DESC (blk->data) == blk);
/* Update reference counter. */
- arch_atomic_add (REFCNT (blk), 1);
+ arch_atomic_add_lock (REFCNT (blk), 1);
/* Book keeping. */
restrack_update (NULL, blk, _fl_, 1);
}
@@ -319,7 +319,7 @@ blk_release_desc_ (blk_t *blk __FL)
/* Book keeping. */
restrack_update (NULL, blk, _fl_, -1);
/* Update reference counter. */
- if (arch_atomic_add (REFCNT (blk), -1) == 0)
+ if (arch_atomic_add_lock (REFCNT (blk), -1) == 0)
{
blk_free_desc_ (blk __fl);
}
@@ -391,7 +391,7 @@ blk_release_ (void *data __FL)
dbg_blame_ptr (obj);
dbg_blame (obj->magic == BLK_OBJ_MAGIC);
restrack_update (NULL, obj, _fl_, -1);
- if (arch_atomic_add (REFCNT (obj), -1) == 0)
+ if (arch_atomic_add_lock (REFCNT (obj), -1) == 0)
{
if (obj->destructor)
obj->destructor (data);
diff --git a/cesar/lib/src/slab.c b/cesar/lib/src/slab.c
index 017a03dddb..058a07671a 100644
--- a/cesar/lib/src/slab.c
+++ b/cesar/lib/src/slab.c
@@ -222,7 +222,7 @@ slab_addref_ (void *object __FL)
{
dbg_blame_ptr (object);
/* Update reference counter. */
- arch_atomic_add (REFCNT (object), 1);
+ arch_atomic_add_lock (REFCNT (object), 1);
/* Book keeping. */
restrack_update (NULL, object, _fl_, 1);
}
@@ -235,7 +235,7 @@ slab_release_ (void *object __FL)
/* Book keeping. */
restrack_update (NULL, object, _fl_, -1);
/* Update reference counter. */
- if (arch_atomic_add (REFCNT (object), -1) == 0)
+ if (arch_atomic_add_lock (REFCNT (object), -1) == 0)
{
/* Lock. */
arch_dsr_lock ();
diff --git a/cesar/lib/src/trace.c b/cesar/lib/src/trace.c
index db9e65b2c8..819f8c90c7 100644
--- a/cesar/lib/src/trace.c
+++ b/cesar/lib/src/trace.c
@@ -118,7 +118,9 @@ trace_drop_chunks (uint n)
hdrop = bigest_buf->head;
tdrop = hdrop;
n -= i_can_drop;
- arch_atomic_add ((int *) &bigest_buf->chunks_nb, -i_can_drop);
+ uint flags = arch_isr_lock ();
+ bigest_buf->chunks_nb -= i_can_drop;
+ arch_isr_unlock (flags);
while (--i_can_drop)
tdrop = tdrop->next;
bigest_buf->head = tdrop->next;
diff --git a/cesar/mac/pbproc/src/mfs.c b/cesar/mac/pbproc/src/mfs.c
index 7815315eab..2304b4b62c 100644
--- a/cesar/mac/pbproc/src/mfs.c
+++ b/cesar/mac/pbproc/src/mfs.c
@@ -118,10 +118,11 @@ pbproc_mfs_provide (mfs_tx_t *mfs, uint nb)
{
dbg_claim (mfs);
dbg_assert ((int) nb <= mfs->pending_seg_nb);
- volatile mfs_tx_t *vmfs = mfs;
/* PB Processing can now use the newly inserted segments. */
- vmfs->pending_seg_nb -= nb;
- arch_atomic_add (&vmfs->seg_nb, nb);
+ uint flags = arch_isr_lock ();
+ mfs->pending_seg_nb -= nb;
+ mfs->seg_nb += nb;
+ arch_isr_unlock (flags);
}
void