summaryrefslogtreecommitdiff
path: root/cesar/mac/ca/test
diff options
context:
space:
mode:
Diffstat (limited to 'cesar/mac/ca/test')
-rw-r--r--cesar/mac/ca/test/ca/src/phy_stub.c1
-rw-r--r--cesar/mac/ca/test/ca/src/test_access.c2
-rw-r--r--cesar/mac/ca/test/ca_eoc/Config7
-rw-r--r--cesar/mac/ca/test/ca_eoc/Makefile8
-rw-r--r--cesar/mac/ca/test/ca_eoc/inc/phy_stub.h23
-rw-r--r--cesar/mac/ca/test/ca_eoc/src/test_access.c1002
-rw-r--r--cesar/mac/ca/test/ca_eoc/src/test_alloc.c444
-rw-r--r--cesar/mac/ca/test/ca_eoc/src/test_ca.c39
8 files changed, 1526 insertions, 0 deletions
diff --git a/cesar/mac/ca/test/ca/src/phy_stub.c b/cesar/mac/ca/test/ca/src/phy_stub.c
index d5626afcaa..1e86eb3e0a 100644
--- a/cesar/mac/ca/test/ca/src/phy_stub.c
+++ b/cesar/mac/ca/test/ca/src/phy_stub.c
@@ -54,6 +54,7 @@ phy_access_backoff_update (phy_t *ctx, uint cap)
void
phy_access_timer_program (phy_t *ctx, u32 date)
{
+ dbg_assert (lesseq_mod2p32(ctx->date, date));
dbg_assert (ctx);
}
diff --git a/cesar/mac/ca/test/ca/src/test_access.c b/cesar/mac/ca/test/ca/src/test_access.c
index 5ab9e251e9..b2d77c437a 100644
--- a/cesar/mac/ca/test/ca/src/test_access.c
+++ b/cesar/mac/ca/test/ca/src/test_access.c
@@ -307,6 +307,8 @@ access_basic_test_case (test_t t)
|| mfs->seg_nb == 0
|| fl_tck > ca->access.param.duration_tck)
{
+ if (mfs && mfs->seg_nb == 0)
+ test_fail_unless (!mfs->common.mme);
ca_access_done (ca);
ca_access_defer (ca, phy->date);
access_check_defer (t, ca);
diff --git a/cesar/mac/ca/test/ca_eoc/Config b/cesar/mac/ca/test/ca_eoc/Config
new file mode 100644
index 0000000000..6911585ee3
--- /dev/null
+++ b/cesar/mac/ca/test/ca_eoc/Config
@@ -0,0 +1,7 @@
+CONFIG_DEBUG_FATAL_CATCH = y
+CONFIG_TRACE = y
+CONFIG_RESTRACK = y
+CONFIG_MAC_COMMON_EOC_SCHED = y
+CONFIG_MAC_PBPROC_EOC_FC = y
+CONFIG_MAC_COMMON_EOC_MFS = y
+CONFIG_MAC_COMMON_EOC_TEI = y
diff --git a/cesar/mac/ca/test/ca_eoc/Makefile b/cesar/mac/ca/test/ca_eoc/Makefile
new file mode 100644
index 0000000000..e246399098
--- /dev/null
+++ b/cesar/mac/ca/test/ca_eoc/Makefile
@@ -0,0 +1,8 @@
+BASE = ../../../..
+
+HOST_PROGRAMS = test_ca
+test_ca_SOURCES = test_ca.c ../../ca/src/test_backoff.c test_alloc.c test_access.c \
+ ../../ca/src/phy_stub.c
+test_ca_MODULES = lib mac/ca mac/common
+
+include $(BASE)/common/make/top.mk
diff --git a/cesar/mac/ca/test/ca_eoc/inc/phy_stub.h b/cesar/mac/ca/test/ca_eoc/inc/phy_stub.h
new file mode 100644
index 0000000000..b1a4746d35
--- /dev/null
+++ b/cesar/mac/ca/test/ca_eoc/inc/phy_stub.h
@@ -0,0 +1,23 @@
+#ifndef inc_phy_stub_h
+#define inc_phy_stub_h
+/* Cesar project {{{
+ *
+ * Copyright (C) 2007 Spidcom
+ *
+ * <<<Licence>>>
+ *
+ * }}} */
+/**
+ * \file inc/phy_stub.h
+ * \brief HAL Phy stub.
+ * \ingroup test
+ */
+
+/** Stub phy structure. */
+struct phy_t
+{
+ u32 date;
+};
+/* Forward declaration in hal/phy/forward.h. */
+
+#endif /* inc_phy_stub_h */
diff --git a/cesar/mac/ca/test/ca_eoc/src/test_access.c b/cesar/mac/ca/test/ca_eoc/src/test_access.c
new file mode 100644
index 0000000000..d7c8a59db2
--- /dev/null
+++ b/cesar/mac/ca/test/ca_eoc/src/test_access.c
@@ -0,0 +1,1002 @@
+/* Cesar project {{{
+ *
+ * Copyright (C) 2007 Spidcom
+ *
+ * <<<Licence>>>
+ *
+ * }}} */
+/**
+ * \file src/test_access.c
+ * \brief Test access code.
+ * \ingroup test
+ */
+#include "common/std.h"
+
+#include "mac/ca/inc/context.h"
+#include "mac/ca/inc/alloc.h"
+#include "mac/common/timings.h"
+
+#include "hal/phy/phy.h"
+#include "inc/phy_stub.h"
+
+#include "lib/test.h"
+
+#include <string.h>
+#include <stdarg.h>
+
+#define NB_ITER 100000
+#define NB_PEER 5
+#define NB_GLID 20
+#define RANDOM_START 0
+
+#define PEER_MIN 10
+#define MAX_SYMB 3
+
+void
+access_random_schedule (lib_rnd_t *rnd, ca_schedule_t *sched, uint length_tck)
+{
+ uint i;
+ const uint nb = lib_rnd_uniform (rnd, 10/*CA_SCHEDULE_SIZE*/ - 3) + 3;
+ /* Coexistence mode and encryption. */
+ sched->coexistence_mode = MAC_COEXISTENCE_SHARED_CSMA_HYBRID_MODE;
+ sched->snid = 5;
+ sched->nek_switch = 0;
+ /* Random allocations. */
+ uint zoffset_tck = 0;
+ uint avr_alloc_tck = length_tck/nb;
+ uint low_alloc_tck = avr_alloc_tck - (avr_alloc_tck>>2); /* 3/4 */
+ uint dif_alloc_tck = (avr_alloc_tck>>2); /* 1/4 */
+ sched->allocations_nb = nb;
+ for (i = 0; i < nb - 1; i++)
+ {
+ sched->allocations[i].end_offset_tck =
+ lib_rnd_uniform (rnd, dif_alloc_tck)
+ + zoffset_tck + low_alloc_tck;
+ zoffset_tck = sched->allocations[i].end_offset_tck;
+ }
+ sched->allocations[nb - 1].end_offset_tck = length_tck;
+ /* Random GLID. */
+ static const struct
+ {
+ u8 glid_min;
+ u8 glid_max;
+ uint prob;
+ } glid_prob[] = {
+ { MAC_LID_SPC_HOLE, MAC_LID_SPC_HOLE, 8 },
+ { MAC_GLID_MIN, MAC_GLID_MAX, 8 + 8 },
+ { MAC_LID_CFPI, MAC_LID_CFPI, 8 + 8 + 1 },
+ { MAC_LID_SHARED_CSMA, MAC_LID_SHARED_CSMA, 8 + 8 + 1 + 2 + 8 },
+ { MAC_LID_LOCAL_CSMA, MAC_LID_LOCAL_CSMA, 8 + 8 + 1 + 2 + 8 + 8 },
+ };
+ sched->allocations[0].glid = MAC_LID_SPC_CENTRAL;
+ for (i = 1; i < nb; i++)
+ {
+ uint r = lib_rnd_uniform (rnd, glid_prob[COUNT (glid_prob) - 1].prob);
+ uint j;
+ DICHOTOMY_SEARCH (0, COUNT (glid_prob), j, r < glid_prob[j].prob);
+ dbg_assert (j < COUNT (glid_prob));
+ sched->allocations[i].glid = glid_prob[j].glid_min
+ + lib_rnd_uniform (rnd, glid_prob[j].glid_max
+ - glid_prob[j].glid_min + 1);
+ }
+}
+
+/**
+ * Get allocation end date without AIFS.
+ * \param ca CA context
+ * \param date date in allocation
+ * \return allocation end date
+ */
+static uint
+access_allocation_end_date (ca_t *ca, uint date)
+{
+ dbg_assert (ca);
+ ca_beacon_period_t *bp = ca_alloc_find_beacon_period (ca, date);
+ dbg_assert (bp);
+ ca_schedule_t *sched = &ca->schedules[bp->schedule_index];
+ uint alloc_i = ca_alloc_find (sched, date - bp->start_date);
+ dbg_assert (alloc_i < sched->allocations_nb);
+ ca_allocation_t *alloc = &sched->allocations[alloc_i];
+ return bp->start_date + alloc->end_offset_tck;
+}
+
+/**
+ * Compute AIFS at given date.
+ * \param ca CA context
+ * \param date current date
+ * \return AIFS in tck
+ */
+static uint
+access_aifs_tck (ca_t *ca, uint date)
+{
+ dbg_assert (ca);
+ ca_beacon_period_t *bp = ca_alloc_find_beacon_period (ca, date);
+ dbg_assert (bp);
+ ca_schedule_t *sched = &ca->schedules[bp->schedule_index];
+ uint alloc_i = ca_alloc_find (sched, date - bp->start_date);
+ dbg_assert (alloc_i < sched->allocations_nb);
+ ca_allocation_t *alloc = &sched->allocations[alloc_i];
+ uint aifs_tck = MAC_LID_IS_BEACON (alloc->glid)
+ ? MAC_B2BIFS_TCK : MAC_AIFS_TCK;
+ if (alloc_i == sched->allocations_nb - 1
+ && MAC_LID_IS_BEACON (ca->schedules[bp[1].schedule_index]
+ .allocations[0].glid)
+ && MAC_B2BIFS_TCK > MAC_AIFS_TCK)
+ aifs_tck = MAC_B2BIFS_TCK;
+ return aifs_tck;
+}
+
+void
+access_check_vcs_restart (test_t t, ca_t *ca, u32 date, uint duration_tck,
+ bool eifs, const ca_alloc_param_t *ap)
+{
+ test_within (t);
+ dbg_assert (ca);
+ ca_beacon_period_t *bp = ca_alloc_find_beacon_period (ca, date);
+ dbg_assert (bp);
+ ca_schedule_t *sched = &ca->schedules[bp->schedule_index];
+ dbg_assert (sched->coexistence_mode < MAC_COEXISTENCE_NB);
+ dbg_assert (sched->snid < 16);
+ dbg_assert (sched->nek_switch < 2);
+ dbg_assert (sched->allocations_nb);
+ uint alloc_i = ca_alloc_find (sched, date - bp->start_date);
+ dbg_assert (alloc_i < sched->allocations_nb);
+ ca_allocation_t *alloc = &sched->allocations[alloc_i];
+ ca_access_param_t *a = &ca->access.param;
+ uint aifs_tck = access_aifs_tck (ca, date);
+ uint alloc_end_date = bp->start_date + alloc->end_offset_tck - aifs_tck;
+ if (ap)
+ {
+ /* Check current allocation parameters. */
+ test_fail_unless (ap->coexistence_mode == sched->coexistence_mode);
+ test_fail_unless (ap->snid == sched->snid);
+ test_fail_unless (ap->nek_switch == sched->nek_switch);
+ test_fail_unless (ap->end_date == alloc_end_date);
+ }
+ if (!ca->access.unusable)
+ {
+ /* Usable, the access should be prepared. */
+ test_fail_unless (lesseq_mod2p32 (date, a->access_date));
+ uint slot = CA_ALLOC_IS_CSMA (alloc->glid)
+ ? (eifs ? 0 : 2) + ca->backoff.bc
+ : 0;
+ bool prp = CA_ALLOC_IS_CSMA (alloc->glid) && !eifs;
+ test_fail_unless (a->access_date == date + duration_tck
+ + slot * MAC_SLOT_TCK
+ /* TODO: remove this: */
+ + (alloc->glid == MAC_LID_CFPI
+ ? MAC_US_TO_TCK (40) : 0));
+ test_fail_unless (a->cw_start_date == date + duration_tck
+ + (prp ? 2 * MAC_SLOT_TCK : 0));
+ test_fail_unless (a->aifs
+ || (a->duration_tck == alloc_end_date
+ - a->access_date));
+ test_fail_unless (a->prp == prp);
+ test_fail_unless (a->cfp == !CA_ALLOC_IS_CSMA (alloc->glid));
+ }
+ if (!a->aifs)
+ {
+ /* ACCESS scheduled. */
+ test_fail_unless (lesseq_mod2p32 (a->access_date, alloc_end_date));
+ test_fail_unless (ca->access.timer_date == a->access_date
+ - ca->anticipation_tck);
+ }
+ else
+ {
+ /* AIFS scheduled. */
+ test_fail_unless (ca->access.timer_date == alloc_end_date);
+ }
+}
+
+void
+access_check_vcs_restart_eifs (test_t t, ca_t *ca, u32 date,
+ const ca_alloc_param_t *ap)
+{
+ const ca_alloc_param_t *cap = &ca->current_allocation_param;
+ uint eifs_tck = ((cap->coexistence_mode
+ == MAC_COEXISTENCE_HYBRID_DELIMITERS_MODE)
+ || !cap->hybrid) ? MAC_EIFS_AV_TCK : MAC_EIFS_10_TCK;
+ access_check_vcs_restart (t, ca, date, eifs_tck, true, ap);
+}
+
+void
+access_check_defer (test_t t, ca_t *ca)
+{
+ test_within (t);
+ dbg_assert (ca);
+ test_fail_unless (ca->access.param.aifs);
+ test_fail_unless (ca->access.timer_date !=
+ ca->current_allocation_param.end_date
+ - access_aifs_tck (ca, ca->access.timer_date));
+}
+
+void
+access_check_aifs (test_t t, ca_t *ca, u32 date,
+ const ca_alloc_param_t *ap)
+{
+ dbg_assert_ptr (ap);
+ access_check_vcs_restart (t, ca, access_allocation_end_date (ca, date), 0,
+ false, ap);
+}
+
+void
+access_basic_test_case (test_t t)
+{
+ uint i, j;
+ lib_rnd_t rnd[1];
+ phy_t *phy;
+ mac_config_t config;
+ mac_store_t *store;
+ ca_t *ca;
+ test_case_begin (t, "basic");
+ /* Initialise. */
+ lib_rnd_init (rnd, 1234);
+ phy = phy_init (NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+#if RANDOM_START
+ phy->date = lib_rnd32 (rnd);
+#else
+ phy->date = MAC_MS_TO_TCK (500 / 50);
+#endif
+ mac_config_init (&config);
+ config.tei = 1;
+ store = mac_store_init ();
+ ca = ca_init (phy, &config, store, 0);
+ /* Characteristics for null slots are determined as-is:
+ * - for index < NB_GLID: i = index
+ * - lid = i != 0 ? i + MAC_GLID_MIN : MAC_LID_SPC_CENTRAL
+ * - cap = i % 4
+ * - tei = i % NB_PEER + PEER_MIN
+ * - bcast = false
+ * - cfp = true
+ * - for index >= NB_GLID: i = index - NB_GLID
+ * - lid = i % MAC_PLID_NB + MAC_PLID_MIN
+ * - cap = lid - MAC_PLID_MIN
+ * - tei = i >= MAC_PLID_NB * NB_PEER
+ * ? MAC_TEI_BCAST
+ * : i / MAC_PLID_NB + PEER_MIN
+ * - bcast = i >= MAC_PLID_NB * NB_PEER
+ * - cfp = false
+ */
+ mfs_tx_t *mfses[NB_GLID + MAC_PLID_NB * NB_PEER + 1];
+ uint mfses_used = 0;
+ memset (mfses, 0, sizeof (mfses));
+ /* Now the big test.
+ * Create random schedules and random MFS for the STA with TEI = [2,3]. */
+ test_begin (t, "random test")
+ {
+ const int beacon_period_length_tck = MAC_MS_TO_TCK (1000 / 10);
+ ca_beacon_period_t bps[2];
+ /* Initialise first beacon period. */
+ access_random_schedule (rnd, ca_alloc_get_schedule (ca, 3),
+ beacon_period_length_tck);
+ bps[1].start_date = phy->date - beacon_period_length_tck / 2;
+ bps[1].schedule_index = 3;
+ for (i = 0; i < NB_ITER; i++)
+ {
+ /* Make new schedule. */
+ access_random_schedule (rnd, ca_alloc_get_schedule (ca, i % 4),
+ beacon_period_length_tck);
+ /* Make new beacon period. */
+ bps[0] = bps[1];
+ bps[1].start_date = bps[0].start_date + beacon_period_length_tck;
+ bps[1].schedule_index = i % 4;
+ /* Update beacon period. */
+ phy->date += CA_ACCESS_AIFS_ANTICIP_TCK;
+ ca_alloc_update_beacon_periods (ca, bps, COUNT (bps));
+ phy->date -= CA_ACCESS_AIFS_ANTICIP_TCK;
+ /* First schedule. */
+ if (i == 0)
+ {
+ const ca_alloc_param_t *ap =
+ ca_access_activate (ca, phy->date);
+ access_check_vcs_restart_eifs (t, ca, phy->date, ap);
+ }
+ while (less_mod2p32 (phy->date, bps[1].start_date))
+ {
+ if (phy->date == ca->access.timer_date
+ || lib_rnd_flip_coin (rnd, LIB_RND_RATIO (0.5)))
+ {
+ /* Next ACCESS. */
+ phy->date = ca->access.timer_date;
+ if (!ca->access.param.aifs)
+ {
+ mfs_tx_t *mfs = ca_access_get_mfs (ca);
+ int seg_sent;
+ uint fl_tck;
+ if (mfs)
+ {
+ /* Timings are completely approximated. */
+ seg_sent = mfs->seg_nb
+ ? lib_rnd_uniform (rnd, mfs->seg_nb) + 1 : 0;
+ fl_tck = MAC_PREAMBLE_TCK + MAC_FC_AV_TCK
+ + MAC_DX567_TCK * seg_sent;
+ }
+ if (!mfs
+ || mfs->seg_nb == 0
+ || fl_tck > ca->access.param.duration_tck)
+ {
+ if (mfs && mfs->seg_nb == 0
+ && ca->access.param.cfp)
+ {
+ test_fail_unless (!mfs->common.mme);
+ uint lid = mfs->common.lid;
+ test_fail_unless (lid >= MAC_LLID_MIN
+ && lid <= MAC_LLID_MAX);
+ /* Check this is the smallest LLID. */
+ while (lid-- > MAC_LLID_MIN)
+ test_fail_if (
+ mac_store_mfs_get_sta_tx_data_locked (
+ store, lid, mfs->common.tei));
+ }
+ ca_access_done (ca);
+ ca_access_defer (ca, phy->date);
+ access_check_defer (t, ca);
+ }
+ else
+ {
+ ca_access_vcs_restart_eifs (ca, phy->date);
+ access_check_vcs_restart_eifs (t, ca, phy->date,
+ NULL);
+ if (seg_sent)
+ {
+ mfs->seg_nb -= seg_sent;
+ ca_access_done (ca);
+ ca_access_vcs_restart (ca,
+ phy->date + fl_tck);
+ access_check_vcs_restart (t, ca, phy->date,
+ fl_tck, false,
+ NULL);
+ }
+ else
+ ca_access_done (ca);
+ }
+ }
+ else
+ {
+ const ca_alloc_param_t *ap = ca_access_aifs (ca);
+ access_check_aifs (t, ca, phy->date, ap);
+ }
+ }
+ else
+ {
+ /* Random event. */
+ dbg_assert (less_mod2p32 (phy->date,
+ ca->access.timer_date));
+ u32 next_date = phy->date + lib_rnd_uniform (
+ rnd, ca->access.timer_date - phy->date);
+ /* TODO: For the moment, avoid bad cases where date is in
+ * AIFS. */
+ u32 danger_date = bps[1].start_date + MAC_US_TO_TCK (100);
+ if (!less_mod2p32 (next_date, danger_date))
+ next_date = danger_date;
+ phy->date = next_date;
+ if (mfses_used == 0
+ || lib_rnd_flip_coin (rnd, LIB_RND_RATIO (0.2)))
+ {
+ j = lib_rnd_uniform (rnd, COUNT (mfses));
+ if (!mfses[j])
+ {
+ /* Create a new MFS. */
+ uint lid, cap, tei;
+ bool bcast, cfp;
+ if (j < NB_GLID)
+ {
+ lid = j != 0 ? j + MAC_GLID_MIN
+ : MAC_LID_SPC_CENTRAL;
+ cap = j % 4;
+ tei = j % NB_PEER + PEER_MIN;
+ bcast = false;
+ cfp = true;
+ }
+ else
+ {
+ uint sj = j - NB_GLID;
+ bcast = sj >= MAC_PLID_NB * NB_PEER;
+ cap = bcast ? 0: sj % MAC_PLID_NB;
+ lid = cap +
+ (bcast ? MAC_PLID_MIN : MAC_LLID_MIN);
+ tei = bcast ? MAC_TEI_BCAST
+ : sj / MAC_PLID_NB + PEER_MIN;
+ cfp = true;
+ }
+ bool added;
+ mfs_tx_t *mfs = mac_store_mfs_add_tx
+ (store, bcast, false, lid, tei, &added);
+ mfs->cfp = cfp;
+ dbg_assert (added);
+ mfs->seg_nb = lib_rnd_uniform (rnd, 100);
+ mfses[j] = mfs;
+ mfses_used++;
+ ca_mfs_add (ca, mfs);
+ }
+ else
+ {
+ /* Remove an MFS. */
+ mfs_tx_t *mfs = mfses[j];
+ ca_mfs_remove (ca, mfs);
+ mfs->seg_nb = 0;
+ mac_store_mfs_remove (store,
+ PARENT_OF (mfs_t, tx, mfs));
+ blk_release (mfs);
+ mfses[j] = NULL;
+ mfses_used--;
+ }
+ }
+ else
+ {
+ /* Modify an MFS. */
+ do {
+ j = lib_rnd_uniform (rnd, COUNT (mfses));
+ } while (!mfses[j]);
+ mfs_tx_t *mfs = mfses[j];
+ if (mfs->ca_state != CA_MFS_STATE_HELD)
+ {
+ mfs->seg_nb = lib_rnd_uniform (rnd, 100);
+ ca_mfs_update (ca, mfs);
+ }
+ }
+ }
+ }
+ }
+ ca_access_deactivate (ca);
+ } test_end;
+ /* Uninitialise. */
+ for (i = 0; i < COUNT (mfses); i++)
+ {
+ mfs_tx_t *mfs = mfses[i];
+ if (mfs)
+ {
+ ca_mfs_remove (ca, mfs);
+ mac_store_mfs_remove (store, PARENT_OF (mfs_t, tx, mfs));
+ blk_release (mfs);
+ }
+ }
+ for (i = 0; i < NB_PEER; i++)
+ {
+ bool ok = mac_store_sta_remove (store, PEER_MIN + i);
+ dbg_assert (ok);
+ }
+ ca_uninit (ca);
+ phy_uninit (phy);
+ mac_store_uninit (store);
+}
+
+/**
+ * Test TDMA poll.
+ * \param t test context
+ * \param sta_mask mask of present stations
+ * \param rx_mask mask of activated stations
+ * \param tx_mask mask of MFS with content
+ * \param ... list of polled STA TEI (uint), stop with 0
+ */
+void
+access_tdma_poll_test (test_t t, u64 sta_mask, u64 rx_mask, u64 tx_mask, ...)
+{
+ test_within (t);
+ int i;
+ phy_t *phy;
+ mac_config_t config;
+ mac_store_t *store;
+ ca_t *ca;
+ /* Initialise contexts. */
+ phy = phy_init (NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ phy->date = 0;
+ mac_config_init (&config);
+ config.tei = 1;
+ store = mac_store_init ();
+ ca = ca_init (phy, &config, store, 0);
+ /* Prepare schedule. */
+ ca_schedule_t *sched = ca_alloc_get_schedule (ca, 0);
+ sched->coexistence_mode = MAC_COEXISTENCE_AV_ONLY_MODE;
+ sched->snid = 5;
+ sched->nek_switch = 0;
+ sched->allocations_nb = 1;
+ sched->allocations[0].end_offset_tck = 1000000;
+ sched->allocations[0].glid = MAC_LID_CFPI;
+ /* Prepare beacon period. */
+ ca_beacon_period_t bps[1] = { { .start_date = 0, .schedule_index = 0 } };
+ ca_alloc_update_beacon_periods (ca, bps, COUNT (bps));
+ /* Prepare STA. */
+ bool added;
+ u64 bit;
+ for (i = 0, bit = 1; i < 64; i++, bit <<= 1)
+ {
+ bool bcast = i == 0;
+ uint tei = bcast ? MAC_TEI_BCAST : i;
+ uint lid = bcast ? MAC_PLID_MIN : MAC_LLID_MIN;
+ if (sta_mask & bit)
+ {
+ mfs_tx_t *mfs = mac_store_mfs_add_tx (
+ store, bcast, false, lid, tei, &added);
+ dbg_assert (added);
+ if (tx_mask & bit)
+ mfs->seg_nb = 1;
+ ca_mfs_add (ca, mfs);
+ blk_release (mfs);
+ if (rx_mask & bit)
+ {
+ sta_t *sta = mac_store_sta_get (store, i);
+ sta->sppb = 1;
+ ca_sta_update (ca, sta);
+ blk_release (sta);
+ }
+ }
+ }
+ /* Compute schedule and check result. */
+ ca_access_activate (ca, 0);
+ ca_access_reprogram (ca, 0, 25000, MAC_TEI_UNASSOCIATED);
+ va_list ap;
+ uint expected_tei;
+ va_start (ap, tx_mask);
+ for (i = 0; (expected_tei = va_arg (ap, uint)); i++)
+ {
+ mfs_tx_t *the_mfs;
+ the_mfs = ca_access_get_mfs (ca);
+ test_fail_unless (
+ the_mfs->common.tei == expected_tei,
+ "unexpected tei at iteration %d: got %d, expected %d", i,
+ the_mfs->common.tei, expected_tei);
+ ca_access_done (ca);
+ }
+ va_end (ap);
+ /* Remove MFS and STA. */
+ for (i = 0, bit = 1; i < 64; i++, bit <<= 1)
+ {
+ bool bcast = i == 0;
+ uint tei = bcast ? MAC_TEI_BCAST : i;
+ uint lid = bcast ? MAC_PLID_MIN : MAC_LLID_MIN;
+ if (sta_mask & bit)
+ {
+ mfs_t *mfs = mac_store_mfs_get (
+ store, true, bcast, false, lid, tei);
+ mac_store_mfs_remove (store, mfs);
+ if (!bcast)
+ dbg_check (mac_store_sta_remove (store, tei));
+ blk_release (mfs);
+ }
+ }
+ /* Cleanup. */
+ ca_access_deactivate (ca);
+ ca_uninit (ca);
+ phy_uninit (phy);
+ mac_store_uninit (store);
+ dbg_assert (blk_check_memory ());
+}
+
+void
+access_tdma_poll_test_case (test_t t)
+{
+ test_case_begin (t, "tdma poll");
+ test_begin (t, "no sta")
+ {
+ access_tdma_poll_test (t, 1, 0, 0, MAC_TEI_BCAST, 0);
+ } test_end;
+ test_begin (t, "no data")
+ {
+ access_tdma_poll_test (t, 0x330ull, 0x300ull, 0ull,
+ 8, 9, /* RX poll */
+ 4, 5, 8, 9, /* Empty poll. */
+ 4, 5, /* Empty poll... */
+ 0);
+ } test_end;
+ test_begin (t, "data")
+ {
+ access_tdma_poll_test (t, 0x331ull, 0x000ull, 0x31ull,
+ 4, 5, MAC_TEI_BCAST, /* TX poll. */
+ 4, 5, MAC_TEI_BCAST, /* Again... */
+ 0);
+ } test_end;
+ test_begin (t, "data rx tx")
+ {
+ access_tdma_poll_test (t, 0x11111ull, 0x01010ull, 0x10101ull,
+ 4, 8, 12, 16, MAC_TEI_BCAST, /* RX & TX poll. */
+ 8, 16, MAC_TEI_BCAST, /* TX poll only. */
+ 0);
+ } test_end;
+}
+
+/** TDMA test result, which MFS should be selected. */
+enum access_tdma_polled_test_result_t
+{
+ ACCESS_TDMA_TEST_BCAST_MME,
+ ACCESS_TDMA_TEST_BCAST_DATA,
+ ACCESS_TDMA_TEST_MME,
+ ACCESS_TDMA_TEST_DATA0,
+ ACCESS_TDMA_TEST_DATA1,
+ ACCESS_TDMA_TEST_DATA2,
+ ACCESS_TDMA_TEST_DATA3,
+ ACCESS_TDMA_TEST_NULL,
+};
+
+/** TDMA test MFS configuration. */
+struct access_tdma_polled_test_mfs_t
+{
+ /** MFS exists. */
+ bool present;
+ /** Number of segments. */
+ int seg_nb;
+};
+
+/** TDMA test configuration. */
+struct access_tdma_polled_test_t
+{
+ /** Result of the test. */
+ enum access_tdma_polled_test_result_t result;
+ /** Schedule TEI. */
+ uint tei;
+ /** Repeat scheduling to test deficit. */
+ uint repeat;
+ /** MFS test configuration. */
+ struct access_tdma_polled_test_mfs_t mfs[ACCESS_TDMA_TEST_NULL];
+};
+
+void
+access_tdma_polled_test (test_t t,
+ const struct access_tdma_polled_test_t *conf)
+{
+ test_within (t);
+ int i;
+ const uint sta_tei = 5;
+ phy_t *phy;
+ mac_config_t config;
+ mac_store_t *store;
+ ca_t *ca;
+ mfs_tx_t *mfs[ACCESS_TDMA_TEST_NULL];
+ /* Check configuration. */
+ dbg_assert (conf->tei == MAC_TEI_BCAST || conf->tei == sta_tei);
+ /* Initialise contexts. */
+ phy = phy_init (NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ phy->date = 0;
+ mac_config_init (&config);
+ config.tei = 1;
+ store = mac_store_init ();
+ ca = ca_init (phy, &config, store, 0);
+ /* Prepare schedule. */
+ ca_schedule_t *sched = ca_alloc_get_schedule (ca, 0);
+ sched->coexistence_mode = MAC_COEXISTENCE_AV_ONLY_MODE;
+ sched->snid = 5;
+ sched->nek_switch = 0;
+ sched->allocations_nb = 1;
+ sched->allocations[0].end_offset_tck = 1000000;
+ sched->allocations[0].glid = MAC_LID_CFPI;
+ /* Prepare beacon period. */
+ ca_beacon_period_t bps[1] = { { .start_date = 0, .schedule_index = 0 } };
+ ca_alloc_update_beacon_periods (ca, bps, COUNT (bps));
+ /* Prepare MFS. */
+ struct {
+ bool bcast;
+ bool mme;
+ uint lid;
+ uint tei;
+ } mfs_param[ACCESS_TDMA_TEST_NULL] = {
+ { true, true, MAC_LID_NONE, MAC_TEI_BCAST },
+ { true, false, MAC_PLID_MIN, MAC_TEI_BCAST },
+ { false, true, MAC_LID_NONE, sta_tei },
+ { false, false, MAC_LLID_MIN + 0, sta_tei },
+ { false, false, MAC_LLID_MIN + 1, sta_tei },
+ { false, false, MAC_LLID_MIN + 2, sta_tei },
+ { false, false, MAC_LLID_MIN + 3, sta_tei },
+ };
+ bool sta_added = false;
+ bool added;
+ for (i = 0; i < ACCESS_TDMA_TEST_NULL; i++)
+ {
+ if (conf->mfs[i].present)
+ {
+ mfs[i] = mac_store_mfs_add_tx (
+ store, mfs_param[i].bcast, mfs_param[i].mme, mfs_param[i].lid,
+ mfs_param[i].tei, &added);
+ if (mfs_param[i].tei != MAC_TEI_BCAST)
+ sta_added = true;
+ dbg_assert (added);
+ mfs[i]->seg_nb = conf->mfs[i].seg_nb;
+ ca_mfs_add (ca, mfs[i]);
+ }
+ else
+ mfs[i] = 0;
+ }
+ /* Compute schedule and check result. */
+ ca_access_activate (ca, 0);
+ ca_access_reprogram (ca, 0, 25000, conf->tei);
+ int repeat = conf->repeat;
+ mfs_tx_t *the_mfs;
+ the_mfs = ca_access_get_mfs (ca);
+ while (repeat--)
+ {
+ ca_access_done (ca);
+ the_mfs = ca_access_get_mfs (ca);
+ }
+ if (the_mfs)
+ {
+ test_fail_unless (conf->result != ACCESS_TDMA_TEST_NULL);
+ test_fail_unless (the_mfs == mfs[conf->result]);
+ }
+ else
+ test_fail_unless (conf->result == ACCESS_TDMA_TEST_NULL);
+ ca_access_done (ca);
+ /* Remove MFS. */
+ for (i = 0; i < ACCESS_TDMA_TEST_NULL; i++)
+ {
+ if (mfs[i])
+ {
+ ca_mfs_remove (ca, mfs[i]);
+ mac_store_mfs_remove (store, PARENT_OF (mfs_t, tx, mfs[i]));
+ blk_release (mfs[i]);
+ }
+ }
+ if (sta_added)
+ {
+ bool ok = mac_store_sta_remove (store, sta_tei);
+ dbg_assert (ok);
+ }
+ /* Cleanup. */
+ ca_access_deactivate (ca);
+ ca_uninit (ca);
+ phy_uninit (phy);
+ mac_store_uninit (store);
+ dbg_assert (blk_check_memory ());
+}
+
+void
+access_tdma_polled_test_case (test_t t)
+{
+ test_case_begin (t, "tdma polled");
+ test_begin (t, "bcast null")
+ {
+ access_tdma_polled_test (t, &(struct access_tdma_polled_test_t) {
+ .result = ACCESS_TDMA_TEST_NULL,
+ .tei = MAC_TEI_BCAST,
+ .mfs = {
+ { .present = false },
+ { .present = false },
+ { .present = false },
+ { .present = false },
+ },
+ });
+ } test_end;
+ test_begin (t, "bcast no seg")
+ {
+ access_tdma_polled_test (t, &(struct access_tdma_polled_test_t) {
+ .result = ACCESS_TDMA_TEST_BCAST_DATA,
+ .tei = MAC_TEI_BCAST,
+ .mfs = {
+ { .present = true },
+ { .present = true },
+ { .present = false },
+ { .present = false },
+ },
+ });
+ } test_end;
+ test_begin (t, "bcast mme")
+ {
+ access_tdma_polled_test (t, &(struct access_tdma_polled_test_t) {
+ .result = ACCESS_TDMA_TEST_BCAST_MME,
+ .tei = MAC_TEI_BCAST,
+ .mfs = {
+ { .present = true, .seg_nb = 1 },
+ { .present = true, .seg_nb = 0 },
+ { .present = false },
+ { .present = false },
+ },
+ });
+ access_tdma_polled_test (t, &(struct access_tdma_polled_test_t) {
+ .result = ACCESS_TDMA_TEST_BCAST_MME,
+ .tei = MAC_TEI_BCAST,
+ .mfs = {
+ { .present = true, .seg_nb = 1 },
+ { .present = true, .seg_nb = 1 },
+ { .present = false },
+ { .present = false },
+ },
+ });
+ access_tdma_polled_test (t, &(struct access_tdma_polled_test_t) {
+ .result = ACCESS_TDMA_TEST_BCAST_MME,
+ .tei = 5,
+ .mfs = {
+ { .present = true, .seg_nb = 1 },
+ { .present = false, .seg_nb = 1 },
+ { .present = true, .seg_nb = 1 },
+ { .present = true, .seg_nb = 1 },
+ },
+ });
+ } test_end;
+ test_begin (t, "bcast data")
+ {
+ access_tdma_polled_test (t, &(struct access_tdma_polled_test_t) {
+ .result = ACCESS_TDMA_TEST_BCAST_DATA,
+ .tei = MAC_TEI_BCAST,
+ .mfs = {
+ { .present = true, .seg_nb = 0 },
+ { .present = true, .seg_nb = 1 },
+ { .present = false },
+ { .present = false },
+ },
+ });
+ } test_end;
+ test_begin (t, "null")
+ {
+ access_tdma_polled_test (t, &(struct access_tdma_polled_test_t) {
+ .result = ACCESS_TDMA_TEST_NULL,
+ .tei = 5,
+ .mfs = {
+ { .present = false },
+ { .present = false },
+ { .present = false },
+ { .present = false },
+ },
+ });
+ } test_end;
+ test_begin (t, "no seg")
+ {
+ access_tdma_polled_test (t, &(struct access_tdma_polled_test_t) {
+ .result = ACCESS_TDMA_TEST_BCAST_MME,
+ .tei = 5,
+ .mfs = {
+ { .present = true, .seg_nb = 1 },
+ { .present = false },
+ { .present = true },
+ { .present = true },
+ },
+ });
+ access_tdma_polled_test (t, &(struct access_tdma_polled_test_t) {
+ .result = ACCESS_TDMA_TEST_DATA0,
+ .tei = 5,
+ .mfs = {
+ { .present = true },
+ { .present = false },
+ { .present = true },
+ { .present = true },
+ },
+ });
+ } test_end;
+ test_begin (t, "mme")
+ {
+ access_tdma_polled_test (t, &(struct access_tdma_polled_test_t) {
+ .result = ACCESS_TDMA_TEST_MME,
+ .tei = 5,
+ .mfs = {
+ { .present = true },
+ { .present = false },
+ { .present = true, .seg_nb = 1 },
+ { .present = true },
+ },
+ });
+ access_tdma_polled_test (t, &(struct access_tdma_polled_test_t) {
+ .result = ACCESS_TDMA_TEST_MME,
+ .tei = 5,
+ .mfs = {
+ { .present = true },
+ { .present = false },
+ { .present = true, .seg_nb = 1 },
+ { .present = true, .seg_nb = 1 },
+ },
+ });
+ } test_end;
+ test_begin (t, "data")
+ {
+ access_tdma_polled_test (t, &(struct access_tdma_polled_test_t) {
+ .result = ACCESS_TDMA_TEST_DATA0,
+ .tei = 5,
+ .mfs = {
+ { .present = true },
+ { .present = false },
+ { .present = true },
+ { .present = true, .seg_nb = 1 },
+ },
+ });
+ } test_end;
+ test_begin (t, "data hi prio")
+ {
+ access_tdma_polled_test (t, &(struct access_tdma_polled_test_t) {
+ .result = ACCESS_TDMA_TEST_DATA2, .tei = 5,
+ .mfs = {
+ { .present = true },
+ { .present = false },
+ { .present = true },
+ { .present = true, .seg_nb = 1 },
+ { .present = true },
+ { .present = true, .seg_nb = 1 },
+ { .present = true },
+ },
+ });
+ } test_end;
+ test_begin (t, "data low prio")
+ {
+ access_tdma_polled_test (t, &(struct access_tdma_polled_test_t) {
+ .result = ACCESS_TDMA_TEST_DATA0,
+ .tei = 5,
+ .mfs = {
+ { .present = true },
+ { .present = false },
+ { .present = true },
+ { .present = true, .seg_nb = 1 },
+ { .present = true },
+ { .present = true },
+ { .present = true },
+ },
+ });
+ } test_end;
+ test_begin (t, "data hi defer")
+ {
+ access_tdma_polled_test (t, &(struct access_tdma_polled_test_t) {
+ .result = ACCESS_TDMA_TEST_DATA0,
+ .tei = 5,
+ .mfs = {
+ { .present = true },
+ { .present = false },
+ { .present = true },
+ { .present = true, .seg_nb = 2 },
+ { .present = true },
+ { .present = true, .seg_nb = 1 },
+ { .present = true },
+ },
+ });
+ access_tdma_polled_test (t, &(struct access_tdma_polled_test_t) {
+ .result = ACCESS_TDMA_TEST_DATA1,
+ .tei = 5,
+ .mfs = {
+ { .present = true },
+ { .present = false },
+ { .present = true },
+ { .present = true, .seg_nb = 2 },
+ { .present = true, .seg_nb = 100 },
+ { .present = true, .seg_nb = 1 },
+ { .present = true },
+ },
+ });
+ } test_end;
+ test_begin (t, "data dwrr")
+ {
+ access_tdma_polled_test (t, &(struct access_tdma_polled_test_t) {
+ .result = ACCESS_TDMA_TEST_DATA0,
+ .tei = 5,
+ .mfs = {
+ { .present = true },
+ { .present = false },
+ { .present = true },
+ { .present = true, .seg_nb = 100 },
+ { .present = true },
+ { .present = true, .seg_nb = 10 },
+ { .present = true },
+ },
+ });
+ access_tdma_polled_test (t, &(struct access_tdma_polled_test_t) {
+ .result = ACCESS_TDMA_TEST_DATA0,
+ .tei = 5,
+ .mfs = {
+ { .present = true },
+ { .present = false },
+ { .present = true },
+ { .present = true, .seg_nb = 100 },
+ { .present = true },
+ { .present = true, .seg_nb = 10 },
+ { .present = true },
+ },
+ .repeat = 2,
+ });
+ access_tdma_polled_test (t, &(struct access_tdma_polled_test_t) {
+ .result = ACCESS_TDMA_TEST_DATA2,
+ .tei = 5,
+ .mfs = {
+ { .present = true },
+ { .present = false },
+ { .present = true },
+ { .present = true, .seg_nb = 100 },
+ { .present = true },
+ { .present = true, .seg_nb = 10 },
+ { .present = true },
+ },
+ .repeat = 5,
+ });
+ } test_end;
+}
+
+void
+access_test_suite (test_t t)
+{
+ test_suite_begin (t, "access");
+ access_basic_test_case (t);
+ access_tdma_poll_test_case (t);
+ access_tdma_polled_test_case (t);
+}
+
diff --git a/cesar/mac/ca/test/ca_eoc/src/test_alloc.c b/cesar/mac/ca/test/ca_eoc/src/test_alloc.c
new file mode 100644
index 0000000000..91aaa6f61c
--- /dev/null
+++ b/cesar/mac/ca/test/ca_eoc/src/test_alloc.c
@@ -0,0 +1,444 @@
+/* Cesar project {{{
+ *
+ * Copyright (C) 2007 Spidcom
+ *
+ * <<<Licence>>>
+ *
+ * }}} */
+/**
+ * \file src/test_alloc.c
+ * \brief Test allocations code.
+ * \ingroup test
+ */
+#include "common/std.h"
+
+#include "mac/ca/inc/context.h"
+#include "mac/ca/inc/access.h"
+#include "mac/ca/inc/alloc.h"
+#include "mac/common/timings.h"
+
+#include "hal/phy/phy.h"
+#include "inc/phy_stub.h"
+
+#include "lib/test.h"
+
+#define NB_ITER 100000
+
+void
+alloc_basic_test_case (test_t t)
+{
+ uint i, j;
+ lib_rnd_t rnd[1];
+ phy_t *phy;
+ mac_config_t config;
+ mac_store_t *store;
+ ca_t *ca;
+ test_case_begin (t, "basic");
+ /* Initialise. */
+ phy = phy_init (NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ mac_config_init (&config);
+ config.tei = 1;
+ store = mac_store_init ();
+ ca = ca_init (phy, &config, store, 0);
+ lib_rnd_init (rnd, 1234);
+/*
+ test_begin (t, "is hybrid")
+ {
+ struct
+ {
+ u8 lid;
+ mac_coexistence_mode_t coex;
+ bool expect;
+ } is_hybrid_tab[] = {
+ { 0x80, MAC_COEXISTENCE_AV_ONLY_MODE, false },
+ { 0x8a, MAC_COEXISTENCE_SHARED_CSMA_HYBRID_MODE, false },
+ { 0xa2, MAC_COEXISTENCE_FULL_HYBRID_MODE, true },
+ { 0xcd, MAC_COEXISTENCE_HYBRID_DELIMITERS_MODE, true },
+ { MAC_LID_DISCOVER, MAC_COEXISTENCE_AV_ONLY_MODE, true },
+ { MAC_LID_DISCOVER, MAC_COEXISTENCE_SHARED_CSMA_HYBRID_MODE, true },
+ { MAC_LID_DISCOVER, MAC_COEXISTENCE_FULL_HYBRID_MODE, true },
+ { MAC_LID_DISCOVER, MAC_COEXISTENCE_HYBRID_DELIMITERS_MODE, true },
+ { MAC_LID_SHARED_CSMA, MAC_COEXISTENCE_AV_ONLY_MODE, false },
+ { MAC_LID_SHARED_CSMA, MAC_COEXISTENCE_SHARED_CSMA_HYBRID_MODE, true },
+ { MAC_LID_SHARED_CSMA, MAC_COEXISTENCE_FULL_HYBRID_MODE, true },
+ { MAC_LID_SHARED_CSMA, MAC_COEXISTENCE_HYBRID_DELIMITERS_MODE, true },
+ { MAC_LID_LOCAL_CSMA, MAC_COEXISTENCE_AV_ONLY_MODE, false },
+ { MAC_LID_LOCAL_CSMA, MAC_COEXISTENCE_SHARED_CSMA_HYBRID_MODE, false },
+ { MAC_LID_LOCAL_CSMA, MAC_COEXISTENCE_FULL_HYBRID_MODE, true },
+ { MAC_LID_LOCAL_CSMA, MAC_COEXISTENCE_HYBRID_DELIMITERS_MODE, true },
+ { MAC_LID_DISCOVER, MAC_COEXISTENCE_AV_ONLY_MODE, true },
+ { MAC_LID_DISCOVER, MAC_COEXISTENCE_SHARED_CSMA_HYBRID_MODE, true },
+ { MAC_LID_DISCOVER, MAC_COEXISTENCE_FULL_HYBRID_MODE, true },
+ { MAC_LID_DISCOVER, MAC_COEXISTENCE_HYBRID_DELIMITERS_MODE, true },
+ { MAC_LID_SPC_CENTRAL, MAC_COEXISTENCE_AV_ONLY_MODE, true },
+ { MAC_LID_SPC_CENTRAL, MAC_COEXISTENCE_SHARED_CSMA_HYBRID_MODE, true },
+ { MAC_LID_SPC_CENTRAL, MAC_COEXISTENCE_FULL_HYBRID_MODE, true },
+ { MAC_LID_SPC_CENTRAL, MAC_COEXISTENCE_HYBRID_DELIMITERS_MODE, true },
+ // { MAC_LID_CFPI, ? },
+ };
+ for (i = 0; i < COUNT (is_hybrid_tab); i++)
+ {
+ test_fail_unless (
+ CA_ALLOC_IS_HYBRID (is_hybrid_tab[i].coex,
+ is_hybrid_tab[i].lid) ==
+ is_hybrid_tab[i].expect, "is hybrid mismatch i=%d", i);
+ }
+ } test_end;
+*/
+ test_begin (t, "beacon periods")
+ {
+ const int beacon_period_length = MAC_MS_TO_TCK (1000 / 50);
+ ca_beacon_period_t periods[CA_BEACON_PERIOD_NB];
+ uint periods_nb = 0;
+ uint periods_nb_new;
+ uint used;
+ /* Create schedule to satisfy asserts. */
+ for (i = 0; i < CA_SCHEDULE_NB; i++)
+ {
+ ca_schedule_t *sched = ca_alloc_get_schedule (ca, i);
+ sched->coexistence_mode = MAC_COEXISTENCE_AV_ONLY_MODE;
+ sched->snid = 5;
+ sched->nek_switch = 0;
+ sched->allocations_nb = 1;
+ sched->allocations[0].end_offset_tck = beacon_period_length;
+ sched->allocations[0].glid = MAC_LID_SPC_HOLE;
+ }
+ /* Create beacon periods. */
+ for (i = 0; i < NB_ITER; i++)
+ {
+ phy->date = i * beacon_period_length + beacon_period_length / 3;
+ periods_nb_new =
+ lib_rnd_uniform (rnd, CA_BEACON_PERIOD_NB - 2) + 1;
+ for (j = 0; j < periods_nb_new; j++)
+ {
+ /* Change older periods, set new periods. */
+ if (j + 1 >= periods_nb
+ || lib_rnd_flip_coin (rnd, LIB_RND_RATIO (0.3)))
+ {
+ periods[j].start_date = (i + j) * beacon_period_length
+ + lib_rnd_uniform (rnd, beacon_period_length / 100)
+ - beacon_period_length / 200;
+ periods[j].schedule_index =
+ lib_rnd_uniform (rnd, CA_SCHEDULE_NB);
+ }
+ else
+ {
+ periods[j] = periods[j + 1];
+ }
+ }
+ periods_nb = periods_nb_new;
+ /* Update CA periods. */
+ ca_alloc_update_beacon_periods (ca, periods, periods_nb);
+ /* Check the update. */
+ test_fail_unless (periods_nb == ca->beacon_periods_nb);
+ for (j = 0; j < periods_nb; j++)
+ {
+ test_fail_unless ((ca->beacon_periods[j].start_date
+ == periods[j].start_date)
+ && (ca->beacon_periods[j].schedule_index
+ == periods[j].schedule_index));
+ }
+ /* Test get_schedule. */
+ used = 0;
+ for (j = 0; j < periods_nb; j++)
+ {
+ dbg_assert (periods[j].schedule_index < CA_SCHEDULE_NB);
+ used |= 1 << periods[j].schedule_index;
+ }
+ for (j = 0; used; j++, used >>= 1)
+ {
+ bool caught = false;
+ dbg_fatal_try_begin
+ {
+ ca_alloc_get_schedule (ca, j);
+ }
+ dbg_fatal_try_catch_void ()
+ {
+ caught = true;
+ }
+ dbg_fatal_try_end;
+ test_fail_unless (((used & 1) && caught)
+ || (!(used & 1) && !caught));
+ }
+ /* Test find_beacon_period. */
+ for (j = 0; j < periods_nb; j++)
+ {
+ test_fail_unless (ca_alloc_find_beacon_period (
+ ca, periods[j].start_date) == &ca->beacon_periods[j]);
+ test_fail_unless (ca_alloc_find_beacon_period (
+ ca, periods[j].start_date + beacon_period_length / 3)
+ == &ca->beacon_periods[j]);
+ }
+ }
+ } test_end;
+ test_begin (t, "alloc find")
+ {
+ ca_schedule_t schedule;
+ uint end;
+ for (i = 0; i < NB_ITER / CA_SCHEDULE_SIZE; i++)
+ {
+ schedule.coexistence_mode = MAC_COEXISTENCE_AV_ONLY_MODE;
+ schedule.snid = 5;
+ schedule.nek_switch = 0;
+ schedule.allocations_nb = lib_rnd_uniform (rnd, CA_SCHEDULE_SIZE);
+ end = 0;
+ for (j = 0; j < schedule.allocations_nb; j++)
+ {
+ end = end + 2 + lib_rnd_uniform (
+ rnd, (1 << 24)
+ - (schedule.allocations_nb - j) * 2
+ - end - 2);
+ schedule.allocations[j].end_offset_tck = end;
+ schedule.allocations[j].glid = 0;
+ }
+ end = 0;
+ for (j = 0; j < schedule.allocations_nb; j++)
+ {
+ test_fail_unless (ca_alloc_find (&schedule, end) == j);
+ test_fail_unless (ca_alloc_find (&schedule, end + 1) == j);
+ end = schedule.allocations[j].end_offset_tck;
+ test_fail_unless (ca_alloc_find (&schedule, end) == j + 1);
+ test_fail_unless (ca_alloc_find (&schedule, end + 1)
+ == j + 1);
+ }
+ }
+ } test_end;
+ /* Uninitialise. */
+ ca_uninit (ca);
+ phy_uninit (phy);
+ mac_store_uninit (store);
+}
+
+/**
+ * Helper function to prepare schedules.
+ * \param ca CA context
+ *
+ * Schedules is described with variable arguments:
+ *
+ * arguments: beacon_period* 0
+ * beacon_period: allocations_nb start_date coexistence_mode snid nek_switch
+ * allocations+
+ * allocations: end_offset_tck glid
+ */
+static void
+alloc_prepare_schedules (ca_t *ca, ...)
+{
+ ca_beacon_period_t periods[CA_BEACON_PERIOD_NB];
+ ca_schedule_t *sched = NULL;
+ uint allocs_nb = 0;
+ uint period_index = 0;
+ uint alloc_index = 0;
+ va_list ap;
+ va_start (ap, ca);
+ do
+ {
+ /* Fetch next allocation number for next beacon period. */
+ if (alloc_index == 0)
+ allocs_nb = va_arg (ap, uint);
+ if (allocs_nb)
+ {
+ /* If first allocation initialise schedule. */
+ if (alloc_index == 0)
+ {
+ periods[period_index].start_date = va_arg (ap, u32);
+ periods[period_index].schedule_index = period_index;
+ sched = ca_alloc_get_schedule (ca, period_index);
+ sched->coexistence_mode = va_arg (ap, uint);
+ sched->snid = va_arg (ap, uint);
+ sched->nek_switch = va_arg (ap, uint);
+ sched->allocations_nb = 0;
+ }
+ /* Set allocation from parameters. */
+ sched->allocations[alloc_index].end_offset_tck =
+ va_arg (ap, uint);
+ sched->allocations[alloc_index].glid = va_arg (ap, uint);
+ sched->allocations_nb++;
+ alloc_index++;
+ /* Next beacon period? */
+ if (alloc_index == allocs_nb)
+ {
+ period_index++;
+ alloc_index = 0;
+ }
+ }
+ } while (allocs_nb);
+ va_end (ap);
+ /* Program CA. */
+ ca_alloc_update_beacon_periods (ca, periods, period_index);
+}
+
+static void
+alloc_prepare_test_case (test_t t)
+{
+ phy_t *phy;
+ mac_config_t config;
+ mac_store_t *store;
+ ca_t *ca;
+ test_case_begin (t, "prepare");
+ /* Initialise. */
+ phy = phy_init (NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ mac_config_init (&config);
+ config.tei = 1;
+ store = mac_store_init ();
+ test_begin (t, "no shrink")
+ {
+ ca = ca_init (phy, &config, store, 0);
+ alloc_prepare_schedules (
+ ca,
+ 1, 0, MAC_COEXISTENCE_AV_ONLY_MODE, 1, 0,
+ 1000000, MAC_GLID_MIN + 10,
+ 1, 1000000, MAC_COEXISTENCE_AV_ONLY_MODE, 1, 0,
+ 1000000, MAC_GLID_MIN + 11,
+ 0);
+ ca_access_activate (ca, 0);
+ ca_alloc_param_t *ap = &ca->current_allocation_param;
+ test_fail_unless (ap->coexistence_mode
+ == MAC_COEXISTENCE_AV_ONLY_MODE);
+ test_fail_unless (ap->snid == 1);
+ test_fail_unless (!ap->hybrid);
+ test_fail_unless (!ap->merge);
+ test_fail_unless (ap->nek_switch == 0);
+ test_fail_unless (ap->end_date == 1000000 - MAC_AIFS_TCK);
+ test_fail_unless (ap->aifs_date == 1000000);
+ test_fail_unless (ap->beacon_period_start_date == 0);
+ test_fail_unless (ap->glid == MAC_GLID_MIN + 10);
+ ca_access_deactivate (ca);
+ ca_uninit (ca);
+ } test_end;
+ test_begin (t, "shrink")
+ {
+ ca = ca_init (phy, &config, store, 0);
+ alloc_prepare_schedules (
+ ca,
+ 1, 0, MAC_COEXISTENCE_AV_ONLY_MODE, 1, 0,
+ 1000000, MAC_GLID_MIN + 10,
+ 1, 900000, MAC_COEXISTENCE_AV_ONLY_MODE, 1, 0,
+ 1000000, MAC_GLID_MIN + 11,
+ 0);
+ ca_access_activate (ca, 0);
+ ca_alloc_param_t *ap = &ca->current_allocation_param;
+ test_fail_unless (ap->coexistence_mode
+ == MAC_COEXISTENCE_AV_ONLY_MODE);
+ test_fail_unless (ap->snid == 1);
+ test_fail_unless (!ap->hybrid);
+ test_fail_unless (!ap->merge);
+ test_fail_unless (ap->nek_switch == 0);
+ test_fail_unless (ap->end_date == 900000 - MAC_AIFS_TCK);
+ test_fail_unless (ap->aifs_date == 900000);
+ test_fail_unless (ap->beacon_period_start_date == 0);
+ test_fail_unless (ap->glid == MAC_GLID_MIN + 10);
+ ca_access_deactivate (ca);
+ ca_uninit (ca);
+ } test_end;
+ test_begin (t, "shrink beacon")
+ {
+ ca = ca_init (phy, &config, store, 0);
+ alloc_prepare_schedules (
+ ca,
+ 1, 0, MAC_COEXISTENCE_AV_ONLY_MODE, 1, 0,
+ 1000000, MAC_GLID_MIN + 10,
+ 1, 900000, MAC_COEXISTENCE_AV_ONLY_MODE, 1, 0,
+ 1000000, MAC_LID_SPC_CENTRAL,
+ 0);
+ ca_access_activate (ca, 0);
+ ca_alloc_param_t *ap = &ca->current_allocation_param;
+ test_fail_unless (ap->coexistence_mode
+ == MAC_COEXISTENCE_AV_ONLY_MODE);
+ test_fail_unless (ap->snid == 1);
+ test_fail_unless (!ap->hybrid);
+ test_fail_unless (!ap->merge);
+ test_fail_unless (ap->nek_switch == 0);
+ test_fail_unless (ap->end_date == 900000 - MAC_B2BIFS_TCK);
+ test_fail_unless (ap->aifs_date == 900000);
+ test_fail_unless (ap->beacon_period_start_date == 0);
+ test_fail_unless (ap->glid == MAC_GLID_MIN + 10);
+ ca_access_deactivate (ca);
+ ca_uninit (ca);
+ } test_end;
+ test_begin (t, "merge")
+ {
+ ca = ca_init (phy, &config, store, 0);
+ alloc_prepare_schedules (
+ ca,
+ 1, 0, MAC_COEXISTENCE_HYBRID_DELIMITERS_MODE, 1, 0,
+ 1000000, MAC_LID_SHARED_CSMA,
+ 1, 1000000, MAC_COEXISTENCE_HYBRID_DELIMITERS_MODE, 1, 0,
+ 1000000, MAC_LID_SHARED_CSMA,
+ 0);
+ ca_access_activate (ca, 0);
+ ca_alloc_param_t *ap = &ca->current_allocation_param;
+ test_fail_unless (ap->coexistence_mode
+ == MAC_COEXISTENCE_HYBRID_DELIMITERS_MODE);
+ test_fail_unless (ap->snid == 1);
+ test_fail_unless (ap->merge);
+ test_fail_unless (ap->nek_switch == 0);
+ test_fail_unless (ap->end_date == 1000000 - MAC_AIFS_TCK
+ + CA_ACCESS_MERGE_MARGIN_TCK);
+ test_fail_unless (ap->aifs_date == 1000000);
+ test_fail_unless (ap->beacon_period_start_date == 0);
+ test_fail_unless (ap->glid == MAC_LID_SHARED_CSMA);
+ ca_access_deactivate (ca);
+ ca_uninit (ca);
+ } test_end;
+ test_begin (t, "no merge, not last")
+ {
+ ca = ca_init (phy, &config, store, 0);
+ alloc_prepare_schedules (
+ ca,
+ 2, 0, MAC_COEXISTENCE_HYBRID_DELIMITERS_MODE, 1, 0,
+ 500000, MAC_LID_SHARED_CSMA,
+ 1000000, MAC_LID_SHARED_CSMA,
+ 1, 1000000, MAC_COEXISTENCE_HYBRID_DELIMITERS_MODE, 1, 0,
+ 1000000, MAC_LID_SHARED_CSMA,
+ 0);
+ ca_access_activate (ca, 0);
+ ca_alloc_param_t *ap = &ca->current_allocation_param;
+ test_fail_unless (ap->coexistence_mode
+ == MAC_COEXISTENCE_HYBRID_DELIMITERS_MODE);
+ test_fail_unless (ap->snid == 1);
+ test_fail_unless (!ap->merge);
+ test_fail_unless (ap->nek_switch == 0);
+ test_fail_unless (ap->end_date == 500000 - MAC_AIFS_TCK);
+ test_fail_unless (ap->aifs_date == 500000);
+ test_fail_unless (ap->beacon_period_start_date == 0);
+ test_fail_unless (ap->glid == MAC_LID_SHARED_CSMA);
+ ca_access_deactivate (ca);
+ ca_uninit (ca);
+ } test_end;
+ test_begin (t, "no merge, not csma")
+ {
+ ca = ca_init (phy, &config, store, 0);
+ alloc_prepare_schedules (
+ ca,
+ 1, 0, MAC_COEXISTENCE_HYBRID_DELIMITERS_MODE, 1, 0,
+ 1000000, MAC_LID_SHARED_CSMA,
+ 1, 1000000, MAC_COEXISTENCE_HYBRID_DELIMITERS_MODE, 1, 0,
+ 1000000, MAC_LID_SPC_PROXY,
+ 0);
+ ca_access_activate (ca, 0);
+ ca_alloc_param_t *ap = &ca->current_allocation_param;
+ test_fail_unless (ap->coexistence_mode
+ == MAC_COEXISTENCE_HYBRID_DELIMITERS_MODE);
+ test_fail_unless (ap->snid == 1);
+ test_fail_unless (!ap->merge);
+ test_fail_unless (ap->nek_switch == 0);
+ test_fail_unless (ap->end_date == 1000000 - MAC_AIFS_TCK);
+ test_fail_unless (ap->aifs_date == 1000000);
+ test_fail_unless (ap->beacon_period_start_date == 0);
+ test_fail_unless (ap->glid == MAC_LID_SHARED_CSMA);
+ ca_access_deactivate (ca);
+ ca_uninit (ca);
+ } test_end;
+ /* Uninitialise. */
+ phy_uninit (phy);
+ mac_store_uninit (store);
+}
+
+void
+alloc_test_suite (test_t t)
+{
+ test_suite_begin (t, "alloc");
+ alloc_basic_test_case (t);
+ alloc_prepare_test_case (t);
+}
+
diff --git a/cesar/mac/ca/test/ca_eoc/src/test_ca.c b/cesar/mac/ca/test/ca_eoc/src/test_ca.c
new file mode 100644
index 0000000000..ec9b6724ba
--- /dev/null
+++ b/cesar/mac/ca/test/ca_eoc/src/test_ca.c
@@ -0,0 +1,39 @@
+/* Cesar project {{{
+ *
+ * Copyright (C) 2007 Spidcom
+ *
+ * <<<Licence>>>
+ *
+ * }}} */
+/**
+ * \file src/test_ca.c
+ * \brief Test Channel Access.
+ * \ingroup test
+ */
+#include "common/std.h"
+
+#include "lib/test.h"
+#include "lib/trace.h"
+
+void
+backoff_test_suite (test_t t);
+
+void
+alloc_test_suite (test_t t);
+
+void
+access_test_suite (test_t t);
+
+int
+main (int argc, char **argv)
+{
+ test_t t;
+ trace_init ();
+ test_init (t, argc, argv);
+ backoff_test_suite (t);
+ alloc_test_suite (t);
+ access_test_suite (t);
+ trace_uninit ();
+ test_result (t);
+ return test_nb_failed (t) == 0 ? 0 : 1;
+}