summaryrefslogtreecommitdiff
path: root/cesar/mac/ca
diff options
context:
space:
mode:
authorsave2008-04-07 14:17:42 +0000
committersave2008-04-07 14:17:42 +0000
commit3d58a62727346b7ac1a6cb36fed1a06ed72228dd (patch)
treed7788c3cf9f76426aef0286d0202e2097f0fa0eb /cesar/mac/ca
parent095dca4b0a8d4924093bab424f71f588fdd84613 (diff)
Moved the complete svn base into the cesar directory.
git-svn-id: svn+ssh://pessac/svn/cesar/trunk@1769 017c9cb6-072f-447c-8318-d5b54f68fe89
Diffstat (limited to 'cesar/mac/ca')
-rw-r--r--cesar/mac/ca/Module4
-rw-r--r--cesar/mac/ca/ca.h371
-rw-r--r--cesar/mac/ca/inc/access.h28
-rw-r--r--cesar/mac/ca/inc/alloc.h65
-rw-r--r--cesar/mac/ca/inc/backoff.h63
-rw-r--r--cesar/mac/ca/inc/context.h83
-rw-r--r--cesar/mac/ca/inc/mfs.h27
-rw-r--r--cesar/mac/ca/inc/trace.h77
-rw-r--r--cesar/mac/ca/mfs.h26
-rw-r--r--cesar/mac/ca/src/access.c444
-rw-r--r--cesar/mac/ca/src/alloc.c139
-rw-r--r--cesar/mac/ca/src/backoff.c110
-rw-r--r--cesar/mac/ca/src/ca.c190
-rw-r--r--cesar/mac/ca/src/trace.c68
-rw-r--r--cesar/mac/ca/test/ca/Config2
-rw-r--r--cesar/mac/ca/test/ca/Makefile8
-rw-r--r--cesar/mac/ca/test/ca/inc/phy_stub.h23
-rw-r--r--cesar/mac/ca/test/ca/src/phy_stub.c67
-rw-r--r--cesar/mac/ca/test/ca/src/test_access.c375
-rw-r--r--cesar/mac/ca/test/ca/src/test_alloc.c218
-rw-r--r--cesar/mac/ca/test/ca/src/test_backoff.c129
-rw-r--r--cesar/mac/ca/test/ca/src/test_ca.c39
22 files changed, 2556 insertions, 0 deletions
diff --git a/cesar/mac/ca/Module b/cesar/mac/ca/Module
new file mode 100644
index 0000000000..f6778dd579
--- /dev/null
+++ b/cesar/mac/ca/Module
@@ -0,0 +1,4 @@
+SOURCES := access.c alloc.c backoff.c ca.c
+ifeq ($(CONFIG_TRACE),y)
+SOURCES += trace.c
+endif
diff --git a/cesar/mac/ca/ca.h b/cesar/mac/ca/ca.h
new file mode 100644
index 0000000000..38fec6d2f0
--- /dev/null
+++ b/cesar/mac/ca/ca.h
@@ -0,0 +1,371 @@
+#ifndef mac_ca_ca_h
+#define mac_ca_ca_h
+/* Cesar project {{{
+ *
+ * Copyright (C) 2007 Spidcom
+ *
+ * <<<Licence>>>
+ *
+ * }}} */
+/**
+ * \file mac/ca/ca.h
+ * \brief Channel Access public interface.
+ * \ingroup mac_ca
+ */
+#include "mac/common/mfs.h"
+#include "hal/phy/forward.h"
+#include "mac/common/config.h"
+#include "mac/common/store.h"
+
+/* Forward declaration. */
+typedef struct ca_t ca_t;
+
+/** Number of schedules in the schedule table.
+ * At a given time, we can store:
+ * - the schedule for the last beacon period which was not released yet;
+ * - the schedule for the current beacon period, created by mixing the
+ * current persistent schedule with the non-persistent one;
+ * - the schedule for the future beacon period using the current persistent
+ * schedule for which we have no non-persistent schedule;
+ * - the persistent preview schedule, also without non-persistent schedule.
+ *
+ * Moreover, to update the schedules without altering the used ones, the upper
+ * layer may need four other schedules.
+ */
+#define CA_SCHEDULE_NB 8
+
+/** Schedule maximum size.
+ * One transmitted schedule can have a maximum of 64 allocation entries, but
+ * Channel Access schedules are composed from:
+ * - a persistent schedule,
+ * - a non-persistent schedule,
+ * - optional holes between allocations.
+ *
+ * This is quite space consuming, and most of time, much less entries will be
+ * used...
+ *
+ * Actually, a schedule can not be that big, because of the limited beacon
+ * payload size. The beacon payload is 128 byte long, the session allocation
+ * information (SAI) which generate the much data in our implementation is a
+ * SAI with start time, is 4 byte long, and generate two entries. Therefore,
+ * only 64 entries are needed.
+ */
+#define CA_SCHEDULE_SIZE 64
+
+/** Beacon period circular buffer size.
+ * Current schedule can be valid for 8 beacon periods. Preview schedule can
+ * be valid after 7 period. An additional slot is required for the circular
+ * buffer function. */
+#define CA_BEACON_PERIOD_NB 16
+
+/** Beacon period entry. */
+struct ca_beacon_period_t
+{
+ /** Beacon period start date. */
+ u32 start_date;
+ /** Schedule index in Channel Access table. */
+ uint schedule_index;
+};
+typedef struct ca_beacon_period_t ca_beacon_period_t;
+
+/** Channel Access schedule entry. */
+struct ca_allocation_t
+{
+ /** Allocation end date as an offset from the beacon period start. */
+ u32 end_offset_tck:24;
+ /** GLID of this allocation.
+ * - 0xff: local CSMA allocation,
+ * - 0xfe: shared CSMA allocation,
+ * - 0xfd: discover beacon allocation,
+ * - 0xfc: contention free period initialisation,
+ * - 0xf8-0xfb: reserved by the Homeplug standard,
+ * - 0x80-0xf7: GLID,
+ * - 0x00: defined by this layer: hole, unusable allocation. */
+ u32 glid:8;
+};
+typedef struct ca_allocation_t ca_allocation_t;
+
+/** Channel Access schedule.
+ * It should contains allocations from the persistent schedule, the
+ * non-persistent schedule and also entries for holes. Consecutive shared
+ * CSMA allocations should be merged in one allocation by the upper layer. */
+struct ca_schedule_t
+{
+ /** Coexistence mode for this schedule. */
+ mac_coexistence_mode_t coexistence_mode;
+ /** EKS/NEK pair to use. */
+ uint nek_switch;
+ /** Number of used allocations in this schedule. */
+ uint allocations_nb;
+ /** Table of allocations. */
+ ca_allocation_t allocations[CA_SCHEDULE_SIZE];
+};
+typedef struct ca_schedule_t ca_schedule_t;
+
+/** Channel Access parameters for next ACCESS event.
+ * This structure is read by PB Processing to prepare its new MPDU. */
+struct ca_access_param_t
+{
+ /** Scheduled MFS borrowed reference or NULL if none. If NULL, only
+ * access_date and cw_start_date are initialised. */
+ mfs_tx_t *mfs;
+ /** Programmed ACCESS date. Does not include anticipation, but does
+ * include backoff. */
+ u32 access_date;
+ /** Contention window start date. This is used to compute slot count. */
+ u32 cw_start_date;
+ /** Beacon period start. Should be used to locate a tonemap. */
+ uint beacon_period_start_date;
+ /** Available time. */
+ uint duration_tck;
+ /** Does it occurs during a contention free period? This flag is used to
+ * know if a frame might be transmitted right after this one and we have
+ * to hurry. */
+ bool cfp:1;
+ /** Use an hybrid frame control? */
+ bool hybrid:1;
+};
+typedef struct ca_access_param_t ca_access_param_t;
+
+/** Allocation parameters for current allocation.
+ * This structure is read by BP Processing to setup RX. */
+struct ca_access_alloc_param_t
+{
+ /** Coexistence mode. */
+ mac_coexistence_mode_t coexistence_mode;
+ /** Hybrid frame control. */
+ bool hybrid;
+ /** EKS/NEK pair to use. */
+ uint nek_switch;
+};
+typedef struct ca_access_alloc_param_t ca_access_alloc_param_t;
+
+BEGIN_DECLS
+
+/**
+ * Initialise ca and return its context.
+ * \param phy phy context
+ * \param config global mac configuration
+ * \param store MFS and STA store
+ * \return ca context
+ */
+ca_t *
+ca_init (phy_t *phy, mac_config_t *config, mac_store_t *store);
+
+/**
+ * Uninitialise a ca context.
+ * \param ctx ca context
+ */
+void
+ca_uninit (ca_t *ctx);
+
+/**
+ * Called on activation to find the currently used allocation.
+ * \param ctx ca context
+ * \param date current date
+ * \param anticipation_tck ACCESS event anticipation
+ * \return pointer to internal new allocation parameters
+ */
+const ca_access_alloc_param_t *
+ca_access_activate (ca_t *ctx, u32 date, uint anticipation_tck);
+
+/**
+ * Called on deactivation to cancel any programmed access.
+ * \param ctx ca context
+ */
+void
+ca_access_deactivate (ca_t *ctx);
+
+/**
+ * Temporary disable any access update until an explicit access
+ * reprogramming is requested.
+ * \param ctx ca context
+ *
+ * This avoid an MFS update to reprogram access if a access change is
+ * following.
+ */
+void
+ca_access_hold (ca_t *ctx);
+
+/**
+ * Restart VCS.
+ * \param ctx ca context
+ * \param start_date VCS start date
+ * \param length_tck VCS length
+ * \param anticipation_tck ACCESS event anticipation
+ * \param eifs true if this is an EIFS
+ *
+ * Restart the Virtual Carrier Sense timer for a given length. The ca layer
+ * will use its schedule to find the next transmit opportunity after the VCS
+ * expiration. The hardware will programmed with an anticipation in order to
+ * take extra computation depending on the current state into account.
+ *
+ * If this is an EIFS, the medium is idle after VCS expiration if it lands in
+ * the same allocation. In other cases, this depends of the current
+ * allocation.
+ */
+void
+ca_access_vcs_restart (ca_t *ctx, u32 start_date, uint length_tck,
+ uint anticipation_tck, bool eifs);
+
+/**
+ * Program hardware ACCESS timer.
+ * \param ctx ca context
+ * \param date expiration date
+ * \param anticipation_tck ACCESS event anticipation
+ *
+ * Program the hardware ACCESS timer, ignoring VCS. This can be used for
+ * responses or bursts where we know we have the medium access.
+ */
+void
+ca_access_program (ca_t *ctx, u32 date, uint anticipation_tck);
+
+/**
+ * Update next ACCESS information for the given grant.
+ * \param ctx ca context
+ * \param mfs granted MFS or NULL for any MFS
+ * \param date grant start date
+ * \param duration_tck grant duration
+ *
+ * Grants are given by CCo with RTS/CTS with immediate grant flag set, or by
+ * bidirectional bursts.
+ *
+ * This function does not program the hardware timer, it only update
+ * information delivered when an ACCESS event occurs.
+ */
+void
+ca_access_grant (ca_t *ctx, mfs_tx_t *mfs, u32 date, uint duration_tck);
+
+/**
+ * Defer ACCESS until end of allocation or new segments to send.
+ * \param ctx ca context
+ * \param date current date
+ * \param anticipation_tck ACCESS event anticipation
+ *
+ * This function should be called when no segment can be sent.
+ */
+void
+ca_access_defer (ca_t *ctx, u32 date, uint anticipation_tck);
+
+/**
+ * Called at the AIFS start in order to set up access for next allocation.
+ * \param ctx ca context
+ * \return pointer to internal new allocation parameters
+ */
+const ca_access_alloc_param_t *
+ca_access_aifs (ca_t *ctx);
+
+/**
+ * Get ACCESS parameters.
+ * \param ctx ca context
+ * \return pointer to internal access parameter structure
+ *
+ * This structure is valid just after the ACCESS event.
+ */
+const ca_access_param_t *
+ca_access_get_param (ca_t *ctx);
+
+/**
+ * Update backoff after a deferral.
+ * \param ctx ca context
+ * \param slot_counter hardware slot counter value at the time of deferral
+ *
+ * If a FC is received after an ACCESS in CSMA, we did not get the medium.
+ * This is a backoff deferral. If we lost the PRP, this is not counted as a
+ * backoff deferral because backoff procedure is not entered.
+ *
+ * If a collision is inferred (no acknowledgement), this is also a backoff
+ * deferral.
+ */
+void
+ca_backoff_deferred (ca_t *ctx, int slot_counter);
+
+/**
+ * Update backoff after a success.
+ * \param ctx ca context
+ *
+ * Will reset the backoff procedure for the next transmission. This is called
+ * when the MPDU has been sent and no collision is inferred.
+ */
+void
+ca_backoff_success (ca_t *ctx);
+
+/**
+ * Cancel a backoff initialisation.
+ * \param ctx ca context
+ *
+ * Called when the backoff parameters were finally not used.
+ */
+void
+ca_backoff_cancel (ca_t *ctx);
+
+/**
+ * Register an MFS with the Channel Access, so it can be sent.
+ * \param ctx ca context
+ * \param mfs the MFS to add
+ */
+void
+ca_mfs_add (ca_t *ctx, mfs_tx_t *mfs);
+
+/**
+ * Unregister an MFS from the Channel Access, so that it can no longer be
+ * sent.
+ * \param ctx ca context
+ * \param mfs the MFS to remove
+ * \return true if successful
+ *
+ * If unsuccessful, the caller should try later because the MFS is currently
+ * been used for a transmission.
+ */
+bool
+ca_mfs_remove (ca_t *ctx, mfs_tx_t *mfs);
+
+/**
+ * Update Channel Access after a MFS update.
+ * \param ctx ca context
+ * \param mfs the updated MFS
+ *
+ * When a MFS is updated, this can change its priority. The Channel Access
+ * layer must update its priority queue and may change the MFS which was
+ * chosen for the next transmission.
+ */
+void
+ca_mfs_update (ca_t *ctx, mfs_tx_t *mfs);
+
+/**
+ * Hold any transmission on this MFS until the next beacon period.
+ * \param ctx ca context
+ * \param mfs the MFS to hold
+ */
+void
+ca_mfs_hold (ca_t *ctx, mfs_tx_t *mfs);
+
+/**
+ * Retrieve a pointer to a schedule table entry.
+ * \param ctx ca context
+ * \param index schedule index in [0..CA_SCHEDULE_NB)
+ * \return the requested schedule
+ *
+ * This should only be done if the schedule is not currently used.
+ */
+ca_schedule_t *
+ca_alloc_get_schedule (ca_t *ctx, uint index);
+
+/**
+ * Update the beacon periods.
+ * \param ctx ca context
+ * \param beacon_periods pointer to the updated beacon periods table
+ * \param beacon_periods_nb number of beacon periods
+ *
+ * To make a smooth update, the currently used beacon periods should not have
+ * been updated (same start time, same schedule index). A non-smooth update
+ * could trigger an extra computation of the next access.
+ */
+void
+ca_alloc_update_beacon_periods (ca_t *ctx,
+ ca_beacon_period_t *beacon_periods,
+ uint beacon_periods_nb);
+
+END_DECLS
+
+#endif /* mac_ca_ca_h */
diff --git a/cesar/mac/ca/inc/access.h b/cesar/mac/ca/inc/access.h
new file mode 100644
index 0000000000..4f5ce3c03f
--- /dev/null
+++ b/cesar/mac/ca/inc/access.h
@@ -0,0 +1,28 @@
+#ifndef mac_ca_inc_access_h
+#define mac_ca_inc_access_h
+/* Cesar project {{{
+ *
+ * Copyright (C) 2007 Spidcom
+ *
+ * <<<Licence>>>
+ *
+ * }}} */
+/**
+ * \file mac/ca/inc/access.h
+ * \brief ACCESS event and VCS related functions header.
+ * \ingroup mac_ca
+ */
+
+BEGIN_DECLS
+
+/**
+ * Update a previously set up access to match new conditions.
+ * \param ctx ca context
+ * \param date current date
+ */
+void
+ca_access_update (ca_t *ctx, u32 date);
+
+END_DECLS
+
+#endif /* mac_ca_inc_access_h */
diff --git a/cesar/mac/ca/inc/alloc.h b/cesar/mac/ca/inc/alloc.h
new file mode 100644
index 0000000000..47524d248b
--- /dev/null
+++ b/cesar/mac/ca/inc/alloc.h
@@ -0,0 +1,65 @@
+#ifndef mac_ca_inc_alloc_h
+#define mac_ca_inc_alloc_h
+/* Cesar project {{{
+ *
+ * Copyright (C) 2007 Spidcom
+ *
+ * <<<Licence>>>
+ *
+ * }}} */
+/**
+ * \file mac/ca/inc/alloc.h
+ * \brief Beacon period and schedule related functions header.
+ * \ingroup mac_ca
+ */
+
+/** Get the next beacon period index in the circular buffer. */
+#define CA_ALLOC_NEXT_BEACON_PERIOD(x) (((x) + 1) % CA_BEACON_PERIOD_NB)
+
+/** Is the specified LID a CSMA one. Theses are the LID used in beacon
+ * entries. */
+#define CA_ALLOC_IS_CSMA(lid) ((lid) == MAC_LID_SHARED_CSMA \
+ || (lid) == MAC_LID_LOCAL_CSMA)
+
+/** Is the specified LID usable for transmission? */
+#define CA_ALLOC_IS_USABLE(lid) ((lid) != MAC_LID_SPC_HOLE \
+ && (lid) != MAC_LID_CFPI)
+
+/** Should transmissions in the specified allocation use hybrid frame
+ * controls according to the specified coexistence mode. An additional test
+ * should be done for proxy beacon MFS. */
+#define CA_ALLOC_IS_HYBRID(coex, lid) \
+ (coex == MAC_COEXISTENCE_FULL_HYBRID_MODE \
+ || coex == MAC_COEXISTENCE_HYBRID_DELIMITERS_MODE \
+ || ((lid) == MAC_LID_SHARED_CSMA \
+ && coex == MAC_COEXISTENCE_SHARED_CSMA_HYBRID_MODE) \
+ || lid == MAC_LID_SPC_HOLE \
+ || lid == MAC_LID_SPC_CENTRAL \
+ || lid == MAC_LID_DISCOVER)
+
+/**
+ * Find the beacon period index corresponding to the given date.
+ * \param ctx ca context
+ * \param date date to search
+ * \return index in the beacon periods circular buffer or buffer tail if no
+ * beacon period corresponding (no beacon period at all or first beacon period
+ * after the given date)
+ *
+ * Dates have more chance to be at the begin of the circular buffer, in the
+ * current beacon period. Start search at the buffer begin.
+ */
+uint
+ca_alloc_find_beacon_period (const ca_t *ctx, u32 date);
+
+/**
+ * Find the allocation corresponding to the given date offset.
+ * \param sched schedule
+ * \param offset_tck offset to search
+ * \return allocation index, or allocations_nb if not found
+ *
+ * Use dichotomy, no prior assumption about dates repartition.
+ */
+uint
+ca_alloc_find (const ca_schedule_t *sched, uint offset_tck);
+
+#endif /* mac_ca_inc_alloc_h */
diff --git a/cesar/mac/ca/inc/backoff.h b/cesar/mac/ca/inc/backoff.h
new file mode 100644
index 0000000000..7d0b8ab355
--- /dev/null
+++ b/cesar/mac/ca/inc/backoff.h
@@ -0,0 +1,63 @@
+#ifndef mac_ca_inc_backoff_h
+#define mac_ca_inc_backoff_h
+/* Cesar project {{{
+ *
+ * Copyright (C) 2007 Spidcom
+ *
+ * <<<Licence>>>
+ *
+ * }}} */
+/**
+ * \file mac/ca/inc/backoff.h
+ * \brief Backoff handling private header.
+ * \ingroup mac_ca
+ */
+#include "mac/ca/ca.h"
+
+#include "lib/rnd.h"
+
+/** Channel Access backoff context. */
+struct ca_backoff_t
+{
+ /** Whether backoff is active. */
+ bool active;
+ /** Current backoff procedure counter. */
+ uint bpc;
+ /** Current contention window size. */
+ uint cw;
+ /** Current backoff counter value. */
+ uint bc;
+ /** Current deferral counter. */
+ uint dc;
+ /** Random number generator context. */
+ lib_rnd_t rnd_context;
+};
+typedef struct ca_backoff_t ca_backoff_t;
+
+BEGIN_DECLS
+
+/**
+ * Initialise the backoff structure to a known state.
+ * \param ctx ca context
+ * \param seed 32 bit random seed
+ */
+void
+ca_backoff_init (ca_t *ctx, u32 seed);
+
+/**
+ * Initialise the backoff procedure for a new transmission.
+ * \param ctx ca context
+ * \param cap channel access priority
+ *
+ * Called when a new transmission is planned to start in a CSMA allocation.
+ */
+void
+ca_backoff_new (ca_t *ctx, uint cap);
+
+/* ca_backoff_deferred is public */
+/* ca_backoff_success is public */
+/* ca_backoff_cancel is public */
+
+END_DECLS
+
+#endif /* mac_ca_inc_backoff_h */
diff --git a/cesar/mac/ca/inc/context.h b/cesar/mac/ca/inc/context.h
new file mode 100644
index 0000000000..44f7b0de1a
--- /dev/null
+++ b/cesar/mac/ca/inc/context.h
@@ -0,0 +1,83 @@
+#ifndef mac_ca_inc_context_h
+#define mac_ca_inc_context_h
+/* Cesar project {{{
+ *
+ * Copyright (C) 2007 Spidcom
+ *
+ * <<<Licence>>>
+ *
+ * }}} */
+/**
+ * \file mac/ca/inc/context.h
+ * \brief Channel Access private context.
+ * \ingroup mac_ca
+ */
+
+#include "mac/ca/ca.h"
+#include "mac/common/mfs.h"
+
+#include "mac/ca/inc/backoff.h"
+#include "mac/ca/inc/trace.h"
+
+/** Channel Access state. */
+enum ca_state_t
+{
+ /** Still not activated, no access will be programmed. */
+ CA_STATE_IDLE,
+ /** Activated, but access programming is held until explicitly
+ * requested. */
+ CA_STATE_HOLD,
+ /** Activated, but no access programmed. */
+ CA_STATE_ACTIVATED,
+ /** TODO: more to come. */
+};
+typedef enum ca_state_t ca_state_t;
+
+/** Channel Access context. */
+struct ca_t
+{
+ /** Current state. */
+ ca_state_t state;
+ /** Phy context. */
+ phy_t *phy;
+ /** Global configuration. */
+ mac_config_t *config;
+ /** MAC STA & MFS store. */
+ mac_store_t *store;
+#if CONFIG_TRACE
+ /** CA trace. */
+ trace_buffer_t trace;
+#endif /* !CONFIG_TRACE */
+ /** Next ACCESS event parameters. */
+ ca_access_param_t access_param;
+ /** Start date of the current VCS. */
+ u32 vcs_start_date;
+ /** Length of the current VCS. */
+ uint vcs_length_tck;
+ /** Is current VCS a EIFS. */
+ bool vcs_eifs;
+ /** Programmed anticipation, may be needed if the timer must be
+ * reprogrammed. */
+ uint anticipation_tck;
+ /** Backoff context. */
+ ca_backoff_t backoff;
+ /** Beacon period circular buffer head and tail. */
+ uint beacon_periods_head, beacon_periods_tail;
+ /** Beacon period circular buffer. */
+ ca_beacon_period_t beacon_periods[CA_BEACON_PERIOD_NB];
+ /** Schedules table. */
+ ca_schedule_t schedules[CA_SCHEDULE_NB];
+ /** Current beacon period index in buffer, or tail if not set. */
+ uint current_beacon_period;
+ /** Current allocation index in current schedule. */
+ uint current_allocation_index;
+ /** Current allocation parameters. */
+ ca_access_alloc_param_t current_allocation_param;
+ /** Priority sorted MFS heap. */
+ heap_t mfs_heap;
+ /** List of MFS held until the next beacon period. */
+ list_t held;
+};
+/* Forward declaration in mac/ca/ca.h. */
+
+#endif /* mac_ca_inc_context_h */
diff --git a/cesar/mac/ca/inc/mfs.h b/cesar/mac/ca/inc/mfs.h
new file mode 100644
index 0000000000..6354505429
--- /dev/null
+++ b/cesar/mac/ca/inc/mfs.h
@@ -0,0 +1,27 @@
+#ifndef mac_ca_inc_mfs_h
+#define mac_ca_inc_mfs_h
+/* Cesar project {{{
+ *
+ * Copyright (C) 2007 Spidcom
+ *
+ * <<<Licence>>>
+ *
+ * }}} */
+/**
+ * \file mac/ca/inc/mfs.h
+ * \brief MFS related functions header.
+ * \ingroup mac_ca
+ */
+
+BEGIN_DECLS
+
+/**
+ * Prepare for the next beacon period.
+ * \param ctx ca context
+ */
+void
+ca_mfs_next_beacon_period (ca_t *ctx);
+
+END_DECLS
+
+#endif /* mac_ca_inc_mfs_h */
diff --git a/cesar/mac/ca/inc/trace.h b/cesar/mac/ca/inc/trace.h
new file mode 100644
index 0000000000..d8fe2b5a23
--- /dev/null
+++ b/cesar/mac/ca/inc/trace.h
@@ -0,0 +1,77 @@
+#ifndef mac_ca_inc_trace_h
+#define mac_ca_inc_trace_h
+/* Cesar project {{{
+ *
+ * Copyright (C) 2007 Spidcom
+ *
+ * <<<Licence>>>
+ *
+ * }}} */
+/**
+ * \file mac/ca/inc/trace.h
+ * \brief Define Channel Access trace events.
+ * \ingroup mac_ca
+ */
+#include "lib/trace.h"
+
+/** Shortcut for tracing inside CA. */
+#define CA_TRACE(id, args...) \
+ TRACE_FAST_SHORT (CA_TRACE_, &ctx->trace, id, ## args)
+
+#if CONFIG_TRACE
+
+enum
+{
+ CA_TRACE_INIT,
+ CA_TRACE_UNINIT,
+ CA_TRACE_MFS_ADD,
+ CA_TRACE_MFS_REMOVE,
+ CA_TRACE_MFS_UPDATE,
+ CA_TRACE_MFS_HOLD,
+
+ CA_TRACE_BACKOFF_INIT,
+ CA_TRACE_BACKOFF_NEW,
+ CA_TRACE_BACKOFF_DEFERRED,
+ CA_TRACE_BACKOFF_ACCESS,
+ CA_TRACE_BACKOFF_SUCCESS,
+ CA_TRACE_BACKOFF_CANCEL,
+
+ CA_TRACE_ALLOC_UPDATE_BEACON_PERIODS,
+
+ CA_TRACE_ACCESS_ACTIVATE,
+ CA_TRACE_ACCESS_DEACTIVATE,
+ CA_TRACE_ACCESS_VCS_RESTART,
+ CA_TRACE_ACCESS_VCS_RESTART_MFS,
+ CA_TRACE_ACCESS_VCS_RESTART_AIFS,
+ CA_TRACE_ACCESS_PROGRAM,
+ CA_TRACE_ACCESS_GRANT,
+ CA_TRACE_ACCESS_DEFER,
+ CA_TRACE_ACCESS_AIFS,
+};
+
+BEGIN_DECLS
+
+/**
+ * Initialise trace buffer.
+ * \param ctx ca context
+ */
+void
+ca_trace_init (ca_t *ctx);
+
+/**
+ * Uninitialise trace buffer.
+ * \param ctx ca context
+ */
+void
+ca_trace_uninit (ca_t *ctx);
+
+END_DECLS
+
+#else /* !CONFIG_TRACE */
+
+# define ca_trace_init(ctx) ((void) 0)
+# define ca_trace_uninit(ctx) ((void) 0)
+
+#endif /* !CONFIG_TRACE */
+
+#endif /* mac_ca_inc_trace_h */
diff --git a/cesar/mac/ca/mfs.h b/cesar/mac/ca/mfs.h
new file mode 100644
index 0000000000..3e4d8f4111
--- /dev/null
+++ b/cesar/mac/ca/mfs.h
@@ -0,0 +1,26 @@
+#ifndef mac_ca_mfs_h
+#define mac_ca_mfs_h
+/* Cesar project {{{
+ *
+ * Copyright (C) 2007 Spidcom
+ *
+ * <<<Licence>>>
+ *
+ * }}} */
+/**
+ * \file mac/ca/mfs.h
+ * \brief Channel Access related definitions for MFS TX.
+ * \ingroup mac_ca
+ */
+
+/** MFS state according to Channel Access. */
+enum ca_mfs_state_t
+{
+ CA_MFS_STATE_UNKNOWN, /*< MFS unknown to CA or containing no PB. */
+ CA_MFS_STATE_PRIO_QUEUED, /*< MFS queued to CSMA CA MFS heap. */
+ CA_MFS_STATE_CFP_QUEUED, /*< MFS available for TDMA. */
+ CA_MFS_STATE_HELD, /*< MFS held until next beacon period. */
+};
+typedef enum ca_mfs_state_t ca_mfs_state_t;
+
+#endif /* mac_ca_mfs_h */
diff --git a/cesar/mac/ca/src/access.c b/cesar/mac/ca/src/access.c
new file mode 100644
index 0000000000..a312f047f1
--- /dev/null
+++ b/cesar/mac/ca/src/access.c
@@ -0,0 +1,444 @@
+/* Cesar project {{{
+ *
+ * Copyright (C) 2007 Spidcom
+ *
+ * <<<Licence>>>
+ *
+ * }}} */
+/**
+ * \file mac/ca/src/access.c
+ * \brief ACCESS event and VCS related functions.
+ * \ingroup mac_ca
+ */
+#include "common/std.h"
+
+#include "mac/common/timings.h"
+#include "mac/ca/ca.h"
+
+#include "hal/phy/phy.h"
+
+#include "mac/ca/inc/context.h"
+#include "mac/ca/inc/alloc.h"
+#include "mac/ca/inc/mfs.h"
+
+const ca_access_alloc_param_t *
+ca_access_activate (ca_t *ctx, u32 date, uint anticipation_tck)
+{
+ dbg_assert (ctx && ctx->state == CA_STATE_IDLE);
+ CA_TRACE (ACCESS_ACTIVATE, date);
+ /* Change to ACTIVATED state. */
+ ctx->state = CA_STATE_ACTIVATED;
+ /* Find current allocation. */
+ uint bp_i = ca_alloc_find_beacon_period (ctx, date);
+ /* The beacon period must be found or we have a real time problem not
+ * handled for the moment. */
+ dbg_assert (bp_i != ctx->beacon_periods_tail);
+ ca_beacon_period_t *bp = &ctx->beacon_periods[bp_i];
+ ca_schedule_t *sched = &ctx->schedules[bp->schedule_index];
+ uint alloc_i = ca_alloc_find (sched, date - bp->start_date);
+ ctx->current_beacon_period = bp_i;
+ ctx->current_allocation_index = alloc_i;
+ /* Reschedule. */
+ bool hybrid = CA_ALLOC_IS_HYBRID (
+ sched->coexistence_mode,
+ sched->allocations[ctx->current_allocation_index].glid);
+ ca_access_vcs_restart (ctx, date, hybrid ? MAC_EIFS_10_TCK
+ : MAC_EIFS_AV_TCK, anticipation_tck, true);
+ /* Return current allocation parameters. */
+ ctx->current_allocation_param.coexistence_mode = sched->coexistence_mode;
+ ctx->current_allocation_param.hybrid = hybrid;
+ ctx->current_allocation_param.nek_switch = sched->nek_switch;
+ return &ctx->current_allocation_param;
+}
+
+void
+ca_access_deactivate (ca_t *ctx)
+{
+ dbg_assert (ctx && ctx->state != CA_STATE_IDLE);
+ CA_TRACE (ACCESS_DEACTIVATE);
+ /* Cancel programmed timer. */
+ phy_access_timer_cancel (ctx->phy);
+ /* Change to IDLE state. */
+ ctx->state = CA_STATE_IDLE;
+}
+
+void
+ca_access_hold (ca_t *ctx)
+{
+ dbg_assert (ctx && ctx->state != CA_STATE_IDLE);
+ /* Change to HOLD state. */
+ ctx->state = CA_STATE_HOLD;
+}
+
+/**
+ * Choose a MFS TX for the given GLID.
+ * \param ctx ca context
+ * \param glid global link identifier
+ * \return MFS or NULL if none match
+ */
+static mfs_tx_t *
+ca_access_choose_mfs_tx (ca_t *ctx, uint glid);
+
+/**
+ * Prepare current allocation for access updates.
+ * \param ctx ca context
+ * \param start_date start date within the requested allocation
+ * \param sched_p current schedule
+ * \param allocation_end_date_p current allocation end date
+ * \param glid_p current global link identifier
+ */
+static void
+ca_access_prepare_alloc (ca_t *ctx, u32 start_date, ca_schedule_t **sched_p,
+ u32 *allocation_end_date_p, uint *glid_p)
+{
+ uint bp_i;
+ ca_beacon_period_t *bp;
+ ca_schedule_t *sched;
+ uint alloc_i;
+ uint aifs_tck = MAC_AIFS_TCK;
+ dbg_assert (ctx);
+ dbg_assert (sched_p && allocation_end_date_p && glid_p);
+ dbg_assert (ctx->current_beacon_period != ctx->beacon_periods_tail);
+ /* Use current schedule one. */
+ bp_i = ctx->current_beacon_period;
+ bp = &ctx->beacon_periods[bp_i];
+ sched = &ctx->schedules[bp->schedule_index];
+ alloc_i = ctx->current_allocation_index;
+ /* Test for beacon period overflow.
+ * If the current allocation extends past the next beacon period start
+ * date, it must be shrunk to avoid overlapping. */
+ u32 allocation_end_date = bp->start_date
+ + sched->allocations[alloc_i].end_offset_tck;
+ uint next_bp_i = CA_ALLOC_NEXT_BEACON_PERIOD (bp_i);
+ if (next_bp_i != ctx->beacon_periods_tail
+ && less_mod2p32 (ctx->beacon_periods[next_bp_i].start_date,
+ allocation_end_date))
+ {
+ allocation_end_date = ctx->beacon_periods[next_bp_i].start_date;
+ /* There should be a B2BIFS between last allocation and a eventual
+ * beacon allocation at start of next beacon period. */
+ ca_schedule_t *next_sched =
+ &ctx->schedules[ctx->beacon_periods[next_bp_i].schedule_index];
+ dbg_assert (next_sched->allocations_nb > 0);
+ if (next_sched->allocations[0].glid == MAC_LID_SPC_CENTRAL)
+ aifs_tck = MAC_B2BIFS_TCK;
+ }
+ /* Reduce the allocation by the AIFS. */
+ allocation_end_date -= aifs_tck;
+ /* Return the requested information. */
+ *sched_p = sched;
+ *allocation_end_date_p = allocation_end_date;
+ *glid_p = sched->allocations[alloc_i].glid;
+}
+
+/**
+ * Setup an AIFS ACCESS for the next allocation.
+ * \param ctx ca context
+ * \param allocation_end_date allocation end date (without AIFS)
+ */
+static void
+ca_access_goto_aifs (ca_t *ctx, u32 allocation_end_date)
+{
+ uint bp_i;
+ ca_beacon_period_t *bp;
+ ca_schedule_t *sched;
+ dbg_assert (ctx);
+ /* Get current schedule. */
+ bp_i = ctx->current_beacon_period;
+ dbg_assert (bp_i != ctx->beacon_periods_tail);
+ bp = &ctx->beacon_periods[bp_i];
+ sched = &ctx->schedules[bp->schedule_index];
+ /* AIFS ACCESS, no MFS. */
+ ctx->access_param.mfs = NULL;
+ if (ctx->current_allocation_index + 1 == sched->allocations_nb)
+ {
+ /* Last allocation, go on with the next beacon period (which MUST
+ * exist). */
+ uint next_bp_i = CA_ALLOC_NEXT_BEACON_PERIOD (bp_i);
+ dbg_assert (next_bp_i != ctx->beacon_periods_tail);
+ /* Note that we do not care if this is an AIFS or a B2BIFS. */
+ ctx->access_param.access_date =
+ ctx->beacon_periods[next_bp_i].start_date - MAC_AIFS_TCK;
+ }
+ else
+ {
+ /* Next allocation. */
+ ctx->access_param.access_date = allocation_end_date;
+ }
+ /* No anticipation delay for AIFS, cancel the addition done later. */
+ ctx->access_param.access_date += ctx->anticipation_tck;
+}
+
+void
+ca_access_vcs_restart (ca_t *ctx, u32 start_date, uint length_tck,
+ uint anticipation_tck, bool eifs)
+{
+ ca_schedule_t *sched;
+ u32 allocation_end_date;
+ uint glid;
+ dbg_assert (ctx && ctx->state != CA_STATE_IDLE);
+ CA_TRACE (ACCESS_VCS_RESTART, start_date, length_tck, anticipation_tck,
+ eifs);
+ /* Prepare allocation. */
+ ca_access_prepare_alloc (ctx, start_date, &sched, &allocation_end_date,
+ &glid);
+ /* Test for allocation overflow or unusable allocation. */
+ if (!less_mod2p32 (start_date + length_tck, allocation_end_date)
+ || !CA_ALLOC_IS_USABLE (glid))
+ {
+ ca_access_goto_aifs (ctx, allocation_end_date);
+ /* Here, no hope for any frame reception, do not even activate RX. */
+ ctx->access_param.cw_start_date = allocation_end_date;
+ }
+ /* No overflow, ACCESS event. */
+ else
+ {
+ u32 access_date = start_date + length_tck;
+ bool csma = CA_ALLOC_IS_CSMA (glid);
+ uint cap = 0;
+ /* PRP? */
+ if (csma && !eifs)
+ access_date += 2 * MAC_SLOT_TCK;
+ ctx->access_param.cw_start_date = access_date;
+ /* Find a suitable MFS. */
+ mfs_tx_t *mfs = ca_access_choose_mfs_tx (ctx, glid);
+ if (mfs && mfs->ca_state != CA_MFS_STATE_UNKNOWN)
+ {
+ bool hybrid = CA_ALLOC_IS_HYBRID (sched->coexistence_mode, glid)
+ || mfs->beacon;
+ cap = mfs->cap;
+ if (csma)
+ {
+ ca_backoff_new (ctx, cap);
+ access_date += ctx->backoff.bc * MAC_SLOT_TCK;
+ }
+ /* Test again for allocation overflow after backoff update. */
+ if (!less_mod2p32 (access_date, allocation_end_date))
+ {
+ ca_access_goto_aifs (ctx, allocation_end_date);
+ }
+ else
+ {
+ /* Set Access parameters. */
+ ca_beacon_period_t *bp =
+ &ctx->beacon_periods[ctx->current_beacon_period];
+ ctx->access_param.mfs = mfs;
+ ctx->access_param.access_date = access_date;
+ ctx->access_param.beacon_period_start_date = bp->start_date;
+ ctx->access_param.duration_tck =
+ allocation_end_date - access_date;
+ ctx->access_param.cfp = !csma;
+ ctx->access_param.hybrid = hybrid;
+ }
+ }
+ else
+ {
+ ca_access_goto_aifs (ctx, allocation_end_date);
+ }
+ /* Reactivate RX or start PRP. */
+ if (!eifs)
+ {
+ if (csma)
+ {
+ phy_access_backoff_start (ctx->phy, start_date + length_tck,
+ cap);
+ }
+ else
+ {
+ phy_rx_activate (ctx->phy, false,
+ ctx->access_param.cw_start_date, true);
+ }
+ }
+ }
+ /* Setup ACCESS for this allocation. */
+ phy_access_timer_program (ctx->phy, ctx->access_param.access_date
+ - anticipation_tck);
+ /* Record result for debug. */
+ if (ctx->access_param.mfs)
+ CA_TRACE (ACCESS_VCS_RESTART_MFS, ctx->access_param.mfs,
+ ctx->access_param.access_date,
+ ctx->access_param.beacon_period_start_date,
+ ctx->access_param.duration_tck, ctx->access_param.cfp,
+ ctx->access_param.hybrid);
+ else
+ CA_TRACE (ACCESS_VCS_RESTART_AIFS, ctx->access_param.access_date);
+ /* Record parameters if VCS need to be reprogrammed. */
+ ctx->vcs_start_date = start_date;
+ ctx->vcs_length_tck = length_tck;
+ ctx->vcs_eifs = eifs;
+ ctx->anticipation_tck = anticipation_tck;
+ /* Change state. */
+ ctx->state = CA_STATE_ACTIVATED;
+}
+
+void
+ca_access_program (ca_t *ctx, u32 date, uint anticipation_tck)
+{
+ dbg_assert (ctx && ctx->state != CA_STATE_IDLE);
+ CA_TRACE (ACCESS_PROGRAM, date, anticipation_tck);
+ phy_access_timer_program (ctx->phy, date - anticipation_tck);
+ ctx->access_param.mfs = NULL;
+ /* Change state, should not be updated. */
+ ctx->state = CA_STATE_HOLD;
+}
+
+void
+ca_access_grant (ca_t *ctx, mfs_tx_t *mfs, u32 date, uint duration_tck)
+{
+ dbg_assert (ctx && ctx->state != CA_STATE_IDLE);
+ CA_TRACE (ACCESS_GRANT, mfs, date, duration_tck);
+ if (!mfs)
+ {
+ dbg_assert (!heap_empty (&ctx->mfs_heap)); /* TODO */
+ /* Choose an MFS. */
+ mfs = PARENT_OF (mfs_tx_t, ca_prio_link, heap_get_root
+ (&ctx->mfs_heap));
+ dbg_assert (mfs->ca_state == CA_MFS_STATE_PRIO_QUEUED);
+ }
+ else
+ {
+ /* Use the given one. */
+ }
+ ctx->access_param.mfs = mfs;
+ ctx->access_param.access_date = date;
+ ctx->access_param.duration_tck = duration_tck;
+ ctx->access_param.cfp = true;
+ ctx->access_param.hybrid = false; /* TODO */
+ /* Change state, should not be updated. */
+ ctx->state = CA_STATE_HOLD;
+}
+
+void
+ca_access_defer (ca_t *ctx, u32 date, uint anticipation_tck)
+{
+ dbg_assert (ctx && ctx->state != CA_STATE_IDLE);
+ CA_TRACE (ACCESS_DEFER, date, anticipation_tck);
+ ca_schedule_t *sched;
+ u32 allocation_end_date;
+ uint glid;
+ /* Prepare allocation. */
+ ca_access_prepare_alloc (ctx, date, &sched, &allocation_end_date, &glid);
+ /* Go directly to AIFS. */
+ ca_access_goto_aifs (ctx, allocation_end_date);
+ /* Setup ACCESS for this allocation. */
+ phy_access_timer_program (ctx->phy, ctx->access_param.access_date
+ - anticipation_tck);
+ /* Record parameters if VCS need to be reprogrammed. */
+ ctx->anticipation_tck = anticipation_tck;
+ /* Change state. */
+ ctx->state = CA_STATE_ACTIVATED;
+}
+
+const ca_access_alloc_param_t *
+ca_access_aifs (ca_t *ctx)
+{
+ dbg_assert (ctx && ctx->state != CA_STATE_IDLE);
+ dbg_assert (ctx->current_beacon_period != ctx->beacon_periods_tail);
+ CA_TRACE (ACCESS_AIFS);
+ /* Go on with the next allocation, first get current one. */
+ uint bp_i = ctx->current_beacon_period;
+ ca_beacon_period_t *bp = &ctx->beacon_periods[bp_i];
+ ca_schedule_t *sched = &ctx->schedules[bp->schedule_index];
+ uint alloc_i = ctx->current_allocation_index;
+ u32 access_date;
+ /* Last allocation? */
+ if (alloc_i + 1 == sched->allocations_nb)
+ {
+ bp_i = CA_ALLOC_NEXT_BEACON_PERIOD (bp_i);
+ dbg_assert (bp_i != ctx->beacon_periods_tail);
+ alloc_i = 0;
+ access_date = ctx->beacon_periods[bp_i].start_date;
+ /* Prepare next beacon period. */
+ ca_mfs_next_beacon_period (ctx);
+ }
+ else
+ {
+ access_date = bp->start_date
+ + sched->allocations[alloc_i].end_offset_tck;
+ alloc_i++;
+ }
+ ctx->current_beacon_period = bp_i;
+ ctx->current_allocation_index = alloc_i;
+ /* Reschedule. */
+ ca_access_vcs_restart (ctx, access_date, 0, ctx->anticipation_tck, false);
+ /* Return current allocation parameters. */
+ ctx->current_allocation_param.coexistence_mode = sched->coexistence_mode;
+ ctx->current_allocation_param.hybrid = CA_ALLOC_IS_HYBRID (
+ sched->coexistence_mode,
+ sched->allocations[ctx->current_allocation_index].glid);
+ ctx->current_allocation_param.nek_switch = sched->nek_switch;
+ return &ctx->current_allocation_param;
+}
+
+const ca_access_param_t *
+ca_access_get_param (ca_t *ctx)
+{
+ dbg_assert (ctx && ctx->state != CA_STATE_IDLE);
+ return &ctx->access_param;
+}
+
+void
+ca_access_update (ca_t *ctx, u32 date)
+{
+ dbg_assert (ctx);
+ if (ctx->state >= CA_STATE_ACTIVATED)
+ {
+ if (lesseq_mod2p32 (ctx->vcs_start_date + ctx->vcs_length_tck, date))
+ {
+ ca_access_vcs_restart (ctx, date, 0, ctx->anticipation_tck,
+ false);
+ }
+ else if (less_mod2p32 (date, ctx->vcs_start_date))
+ {
+ ca_access_vcs_restart (ctx, ctx->vcs_start_date,
+ ctx->vcs_length_tck, ctx->anticipation_tck,
+ ctx->vcs_eifs);
+ }
+ else
+ {
+ ca_access_vcs_restart (ctx, date, ctx->vcs_length_tck
+ - (date - ctx->vcs_start_date),
+ ctx->anticipation_tck, ctx->vcs_eifs);
+ }
+ }
+}
+
+static mfs_tx_t *
+ca_access_choose_mfs_tx (ca_t *ctx, uint glid)
+{
+ mfs_tx_t *mfs;
+ dbg_assert (ctx);
+ dbg_assert (glid >= MAC_GLID_MIN);
+ /* If GLID, this is a CFP allocation, else choose the MFS with the greater
+ * priority. */
+ if (!CA_ALLOC_IS_CSMA (glid))
+ {
+ dbg_assert (glid <= MAC_GLID_MAX
+ || glid == MAC_LID_SPC_CENTRAL
+ || glid == MAC_LID_DISCOVER);
+ mfs = mac_store_mfs_get_tx (ctx->store, false, false, glid, 0);
+ dbg_assert (!mfs || mfs->ca_state == CA_MFS_STATE_UNKNOWN
+ || mfs->ca_state == CA_MFS_STATE_CFP_QUEUED
+ || mfs->ca_state == CA_MFS_STATE_HELD);
+ /* Reference is borrowed from the store. */
+ if (mfs)
+ blk_release (mfs);
+ if (mfs && mfs->ca_state == CA_MFS_STATE_CFP_QUEUED)
+ return mfs;
+ else
+ return NULL;
+ }
+ else
+ {
+ if (!heap_empty (&ctx->mfs_heap))
+ {
+ mfs = PARENT_OF (mfs_tx_t, ca_prio_link,
+ heap_get_root (&ctx->mfs_heap));
+ dbg_assert (mfs->ca_state == CA_MFS_STATE_PRIO_QUEUED);
+ return mfs;
+ }
+ else
+ return NULL;
+ }
+}
+
diff --git a/cesar/mac/ca/src/alloc.c b/cesar/mac/ca/src/alloc.c
new file mode 100644
index 0000000000..0206100a57
--- /dev/null
+++ b/cesar/mac/ca/src/alloc.c
@@ -0,0 +1,139 @@
+/* Cesar project {{{
+ *
+ * Copyright (C) 2007 Spidcom
+ *
+ * <<<Licence>>>
+ *
+ * }}} */
+/**
+ * \file mac/ca/src/alloc.c
+ * \brief Beacon period and schedule related functions.
+ * \ingroup mac_ca
+ */
+#include "common/std.h"
+
+#include "mac/ca/inc/context.h"
+#include "mac/ca/inc/alloc.h"
+#include "mac/ca/inc/access.h"
+#include "hal/phy/phy.h"
+
+uint
+ca_alloc_find_beacon_period (const ca_t *ctx, u32 date)
+{
+ uint pi, i;
+ dbg_assert (ctx);
+ pi = ctx->beacon_periods_tail;
+ for (i = ctx->beacon_periods_head;
+ i != ctx->beacon_periods_tail
+ && lesseq_mod2p32 (ctx->beacon_periods[i].start_date, date);
+ pi = i, i = CA_ALLOC_NEXT_BEACON_PERIOD (i))
+ ;
+ return pi;
+}
+
+uint
+ca_alloc_find (const ca_schedule_t *sched, uint offset_tck)
+{
+ uint a, b, m;
+ /* Dichotomy search. */
+ a = 0;
+ b = sched->allocations_nb;
+ while (a != b)
+ {
+ m = (a + b) / 2;
+ if (offset_tck < sched->allocations[m].end_offset_tck)
+ b = m;
+ else
+ a = m + 1;
+ }
+ return a;
+}
+
+ca_schedule_t *
+ca_alloc_get_schedule (ca_t *ctx, uint index)
+{
+ uint i;
+ dbg_assert (ctx);
+ /* Check this schedule is not used. */
+ for (i = ctx->beacon_periods_head;
+ i != ctx->beacon_periods_tail;
+ i = CA_ALLOC_NEXT_BEACON_PERIOD (i))
+ dbg_assert (ctx->beacon_periods[i].schedule_index != index);
+ return &ctx->schedules[index];
+}
+
+void
+ca_alloc_update_beacon_periods (ca_t *ctx,
+ ca_beacon_period_t *beacon_periods,
+ uint beacon_periods_nb)
+{
+ u32 now;
+ uint i, j;
+ bool non_smooth = false;
+ bool current_at_tail;
+ dbg_assert (ctx);
+ dbg_assert (beacon_periods);
+ dbg_assert (beacon_periods_nb > 0
+ && beacon_periods_nb < CA_BEACON_PERIOD_NB - 1);
+ CA_TRACE (ALLOC_UPDATE_BEACON_PERIODS);
+ /* Eliminate expired beacon period from context. */
+ for (i = ctx->beacon_periods_head;
+ i != ctx->beacon_periods_tail
+ && less_mod2p32 (ctx->beacon_periods[i].start_date,
+ beacon_periods[0].start_date);
+ i = CA_ALLOC_NEXT_BEACON_PERIOD (i))
+ ;
+ ctx->beacon_periods_head = i;
+ current_at_tail = ctx->current_beacon_period == ctx->beacon_periods_tail;
+ /* First beacon period provided should be the current one (actually, it
+ * might be the second one...). */
+ now = phy_date (ctx->phy);
+ dbg_assert (lesseq_mod2p32 (beacon_periods[0].start_date, now)
+ && (beacon_periods_nb < 2
+ || less_mod2p32 (now, beacon_periods[1].start_date)));
+ /* Update the first and second period.
+ * This step aims at checking whether the current scheduled access should
+ * be updated. TODO It should be reworked. For example, a better
+ * approach could process as a regular update to work out if the same
+ * results are obtained. I suspect the current approach to give both
+ * false positives and negatives, to not work anymore as soon as NTB plays
+ * against us, or actually, to not work at all... */
+ for (j = 0, i = ctx->beacon_periods_head;
+ j < 2 && j < beacon_periods_nb && i != ctx->beacon_periods_tail;
+ j++, i = CA_ALLOC_NEXT_BEACON_PERIOD (i))
+ {
+ if (ctx->beacon_periods[i].start_date != beacon_periods[j].start_date
+ || (ctx->beacon_periods[i].schedule_index
+ != beacon_periods[j].schedule_index))
+ {
+ /* Non-smooth update. */
+ non_smooth = true;
+ break;
+ }
+ }
+ /* Update other periods. */
+ for (; j < beacon_periods_nb; j++, i = CA_ALLOC_NEXT_BEACON_PERIOD (i))
+ {
+ ctx->beacon_periods[i].start_date = beacon_periods[j].start_date;
+ ctx->beacon_periods[i].schedule_index =
+ beacon_periods[j].schedule_index;
+ /* Add some consistency checks for distracted coders. */
+ ca_schedule_t *sched =
+ &ctx->schedules[beacon_periods[j].schedule_index];
+ dbg_assert (sched->coexistence_mode < MAC_COEXISTENCE_NB
+ && sched->nek_switch < 2
+ && sched->allocations_nb > 0
+ && sched->allocations[0].end_offset_tck > 0);
+ }
+ ctx->beacon_periods_tail = i;
+ dbg_assert ((ctx->beacon_periods_tail - ctx->beacon_periods_head +
+ CA_BEACON_PERIOD_NB) % CA_BEACON_PERIOD_NB ==
+ beacon_periods_nb);
+ /* Fix current beacon period index. */
+ if (current_at_tail)
+ ctx->current_beacon_period = ctx->beacon_periods_tail;
+ /* TODO Handle non-smooth updates. */
+ if (non_smooth)
+ ca_access_update (ctx, phy_date (ctx->phy));
+}
+
diff --git a/cesar/mac/ca/src/backoff.c b/cesar/mac/ca/src/backoff.c
new file mode 100644
index 0000000000..7e3b729408
--- /dev/null
+++ b/cesar/mac/ca/src/backoff.c
@@ -0,0 +1,110 @@
+/* Cesar project {{{
+ *
+ * Copyright (C) 2007 Spidcom
+ *
+ * <<<Licence>>>
+ *
+ * }}} */
+/**
+ * \file mac/ca/src/backoff.c
+ * \brief Backoff handling.
+ * \ingroup mac_ca
+ */
+#include "common/std.h"
+
+#include "mac/ca/inc/context.h"
+
+#define CA_BACKOFF_RND_INIT 0x42421664
+
+void
+ca_backoff_init (ca_t *ctx, u32 seed)
+{
+ dbg_assert (ctx);
+ ctx->backoff.active = false;
+ ctx->backoff.bpc = 0;
+ ctx->backoff.cw = 0;
+ ctx->backoff.bc = 0;
+ ctx->backoff.dc = 0;
+ lib_rnd_init (&ctx->backoff.rnd_context, CA_BACKOFF_RND_INIT ^ seed);
+ CA_TRACE (BACKOFF_INIT);
+}
+
+void
+ca_backoff_new (ca_t *ctx, uint cap)
+{
+ static const uint dc_by_pbc[4] = { 0, 1, 3, 15 };
+ static const uint cw_by_capmsb_by_pbc[2][4] = {
+ { 7, 15, 31, 63 }, { 7, 15, 15, 31 }
+ };
+ dbg_assert (ctx);
+ dbg_assert (cap < 4);
+ if (ctx->backoff.active)
+ ca_backoff_cancel (ctx);
+ if (ctx->backoff.bpc == 0 || ctx->backoff.bc == 0 || ctx->backoff.dc == 0)
+ {
+ uint bpcm = MIN (3u, ctx->backoff.bpc);
+ ctx->backoff.dc = dc_by_pbc[bpcm];
+ ctx->backoff.cw = cw_by_capmsb_by_pbc[cap >> 1][bpcm];
+ ctx->backoff.bpc++;
+ ctx->backoff.bc = lib_rnd32 (&ctx->backoff.rnd_context)
+ & ctx->backoff.cw;
+ }
+ else
+ {
+ uint bpcm = MIN (3u, ctx->backoff.bpc - 1);
+ uint cw = cw_by_capmsb_by_pbc[cap >> 1][bpcm];
+ if (cw != ctx->backoff.cw)
+ {
+ /* Handle CAP change. */
+ ctx->backoff.cw = cw;
+ ctx->backoff.bc = lib_rnd32 (&ctx->backoff.rnd_context)
+ & ctx->backoff.cw;
+ }
+ else
+ {
+ ctx->backoff.bc--;
+ }
+ ctx->backoff.dc--;
+ }
+ ctx->backoff.active = true;
+ CA_TRACE (BACKOFF_NEW, ctx->backoff.bpc, ctx->backoff.cw, ctx->backoff.bc,
+ ctx->backoff.dc);
+}
+
+void
+ca_backoff_deferred (ca_t *ctx, int slot_counter)
+{
+ dbg_assert (ctx);
+ if (ctx->backoff.active)
+ {
+ if (slot_counter > (int) ctx->backoff.bc)
+ ctx->backoff.bc = 0;
+ else if (slot_counter > 0)
+ ctx->backoff.bc -= slot_counter;
+ ctx->backoff.active = false;
+ CA_TRACE (BACKOFF_DEFERRED, slot_counter);
+ }
+}
+
+void
+ca_backoff_success (ca_t *ctx)
+{
+ dbg_assert (ctx);
+ ctx->backoff.bpc = 0;
+ ctx->backoff.active = false;
+ CA_TRACE (BACKOFF_SUCCESS);
+}
+
+void
+ca_backoff_cancel (ca_t *ctx)
+{
+ dbg_assert (ctx);
+ if (ctx->backoff.active)
+ {
+ ctx->backoff.bc++;
+ ctx->backoff.dc++;
+ ctx->backoff.active = false;
+ CA_TRACE (BACKOFF_CANCEL);
+ }
+}
+
diff --git a/cesar/mac/ca/src/ca.c b/cesar/mac/ca/src/ca.c
new file mode 100644
index 0000000000..97f35cc59e
--- /dev/null
+++ b/cesar/mac/ca/src/ca.c
@@ -0,0 +1,190 @@
+/* Cesar project {{{
+ *
+ * Copyright (C) 2007 Spidcom
+ *
+ * <<<Licence>>>
+ *
+ * }}} */
+/**
+ * \file mac/ca/src/ca.c
+ * \brief Channel Access main implementation file.
+ * \ingroup mac_ca
+ */
+#include "common/std.h"
+
+#include "mac/ca/inc/context.h"
+#include "mac/ca/inc/access.h"
+#include "mac/common/timings.h"
+
+#include "hal/phy/phy.h"
+
+ca_t ca_global;
+
+/**
+ * MFS priority comparaison function.
+ * \param left left hand MFS
+ * \param right right hand MFS
+ * \return true iff left has a greater priority than right
+ */
+static bool
+ca_mfs_less (heap_node_t *left, heap_node_t *right);
+
+ca_t *
+ca_init (phy_t *phy, mac_config_t *config, mac_store_t *store)
+{
+ dbg_assert (phy);
+ dbg_assert (config);
+ dbg_assert (store);
+ ca_t *ctx = &ca_global;
+ ctx->state = CA_STATE_IDLE;
+ ctx->phy = phy;
+ ctx->config = config;
+ ctx->store = store;
+ ca_trace_init (ctx);
+ ctx->access_param.mfs = NULL;
+ ctx->access_param.access_date = 0;
+ ctx->access_param.duration_tck = 0;
+ ctx->access_param.cfp = false;
+ ctx->access_param.hybrid = false;
+ ctx->vcs_start_date = 0;
+ ctx->vcs_length_tck = 0;
+ ctx->vcs_eifs = false;
+ ctx->anticipation_tck = 0;
+ ca_backoff_init (ctx, config->seed);
+ ctx->beacon_periods_head = ctx->beacon_periods_tail = 0;
+ ctx->current_beacon_period = 0;
+ ctx->current_allocation_index = 0;
+ heap_init (&ctx->mfs_heap, ca_mfs_less);
+ list_init (&ctx->held);
+ CA_TRACE (INIT);
+ return ctx;
+}
+
+void
+ca_uninit (ca_t *ctx)
+{
+ dbg_assert (ctx && ctx->state == CA_STATE_IDLE);
+ dbg_assert (heap_empty (&ctx->mfs_heap));
+ CA_TRACE (UNINIT);
+ ca_trace_uninit (ctx);
+}
+
+void
+ca_mfs_add (ca_t *ctx, mfs_tx_t *mfs)
+{
+ dbg_assert (ctx);
+ dbg_assert (mfs);
+ CA_TRACE (MFS_ADD, mfs);
+ ca_mfs_update (ctx, mfs);
+}
+
+bool
+ca_mfs_remove (ca_t *ctx, mfs_tx_t *mfs)
+{
+ dbg_assert (ctx);
+ dbg_assert (mfs);
+ CA_TRACE (MFS_REMOVE, mfs);
+ switch (mfs->ca_state)
+ {
+ case CA_MFS_STATE_PRIO_QUEUED:
+ heap_remove (&ctx->mfs_heap, &mfs->ca_prio_link);
+ break;
+ case CA_MFS_STATE_HELD:
+ list_remove (&ctx->held, &mfs->ca_held_link);
+ break;
+ default:
+ ;
+ }
+ mfs->ca_state = CA_MFS_STATE_UNKNOWN;
+ /* The current ACCESS may have changed. */
+ ca_access_update (ctx, phy_date (ctx->phy));
+ return true;
+}
+
+void
+ca_mfs_update (ca_t *ctx, mfs_tx_t *mfs)
+{
+ ca_mfs_state_t new_state;
+ dbg_assert (ctx);
+ dbg_assert_ptr (mfs);
+ CA_TRACE (MFS_UPDATE, mfs);
+ /* Ignore held MFS. */
+ if (mfs->ca_state == CA_MFS_STATE_HELD)
+ return;
+ /* Compute the new MFS state... */
+ if (mfs->seg_nb + mfs->pending_seg_nb != 0)
+ {
+ if (mfs->cfp)
+ new_state = CA_MFS_STATE_CFP_QUEUED;
+ else
+ new_state = CA_MFS_STATE_PRIO_QUEUED;
+ }
+ else
+ new_state = CA_MFS_STATE_UNKNOWN;
+ /* ...and execute the corresponding transition. */
+ if (mfs->ca_state == CA_MFS_STATE_PRIO_QUEUED)
+ {
+ if (new_state == CA_MFS_STATE_PRIO_QUEUED)
+ heap_adjust (&ctx->mfs_heap, &mfs->ca_prio_link);
+ else
+ heap_remove (&ctx->mfs_heap, &mfs->ca_prio_link);
+ }
+ else if (new_state == CA_MFS_STATE_PRIO_QUEUED)
+ heap_insert (&ctx->mfs_heap, &mfs->ca_prio_link);
+ /* Done. */
+ mfs->ca_state = new_state;
+ /* The current ACCESS may have changed. */
+ ca_access_update (ctx, phy_date (ctx->phy));
+}
+
+void
+ca_mfs_hold (ca_t *ctx, mfs_tx_t *mfs)
+{
+ dbg_assert (ctx);
+ dbg_assert_ptr (mfs);
+ CA_TRACE (MFS_HOLD, mfs);
+ /* Do nothing if held yet. */
+ if (mfs->ca_state != CA_MFS_STATE_HELD)
+ {
+ /* Remove from priority queue. */
+ if (mfs->ca_state == CA_MFS_STATE_PRIO_QUEUED)
+ heap_remove (&ctx->mfs_heap, &mfs->ca_prio_link);
+ /* Add to hold list. */
+ mfs->ca_state = CA_MFS_STATE_HELD;
+ list_push (&ctx->held, &mfs->ca_held_link);
+ /* The current ACCESS may have changed. */
+ ca_access_update (ctx, phy_date (ctx->phy));
+ }
+}
+
+void
+ca_mfs_next_beacon_period (ca_t *ctx)
+{
+ dbg_assert (ctx);
+ /* Unhold MFS. */
+ while (!list_empty (&ctx->held))
+ {
+ mfs_tx_t *mfs = PARENT_OF (mfs_tx_t, ca_held_link,
+ list_pop (&ctx->held));
+ mfs->ca_state = CA_MFS_STATE_UNKNOWN;
+ ca_mfs_update (ctx, mfs);
+ }
+}
+
+static bool
+ca_mfs_less (heap_node_t *left, heap_node_t *right)
+{
+ mfs_tx_t *l = PARENT_OF (mfs_tx_t, ca_prio_link, left);
+ mfs_tx_t *r = PARENT_OF (mfs_tx_t, ca_prio_link, right);
+ /* Prefer MFS with something to send. */
+ if (l->seg_nb != 0 && r->seg_nb == 0)
+ return true;
+ if (l->seg_nb == 0 && r->seg_nb != 0)
+ return false;
+ /* Prefer better cap. */
+ if (l->cap != r->cap)
+ return l->cap > r->cap;
+ /* Prefer MME. */
+ return l->common.mme > r->common.mme;
+}
+
diff --git a/cesar/mac/ca/src/trace.c b/cesar/mac/ca/src/trace.c
new file mode 100644
index 0000000000..2dedffa298
--- /dev/null
+++ b/cesar/mac/ca/src/trace.c
@@ -0,0 +1,68 @@
+/* Cesar project {{{
+ *
+ * Copyright (C) 2007 Spidcom
+ *
+ * <<<Licence>>>
+ *
+ * }}} */
+/**
+ * \file mac/ca/src/trace.c
+ * \brief Define Channel Access trace events.
+ * \ingroup mac_ca
+ */
+#include "common/std.h"
+
+#include "mac/ca/inc/context.h"
+
+void
+ca_trace_init (ca_t *ctx)
+{
+ static trace_namespace_t namespace;
+ static const trace_event_id_t event_ids[] =
+ {
+ TRACE_EVENT (CA_TRACE_INIT, "init"),
+ TRACE_EVENT (CA_TRACE_UNINIT, "uninit"),
+ TRACE_EVENT (CA_TRACE_MFS_ADD, "mfs add %x"),
+ TRACE_EVENT (CA_TRACE_MFS_REMOVE, "mfs remove %x"),
+ TRACE_EVENT (CA_TRACE_MFS_UPDATE, "mfs update %x"),
+ TRACE_EVENT (CA_TRACE_MFS_HOLD, "mfs hold %x"),
+
+ TRACE_EVENT (CA_TRACE_BACKOFF_INIT, "backoff init"),
+ TRACE_EVENT (CA_TRACE_BACKOFF_NEW,
+ "backoff new bpc=%u cw=%u bc=%u dc=%u"),
+ TRACE_EVENT (CA_TRACE_BACKOFF_DEFERRED, "backoff deferred count=%u"),
+ TRACE_EVENT (CA_TRACE_BACKOFF_ACCESS, "backoff access"),
+ TRACE_EVENT (CA_TRACE_BACKOFF_SUCCESS, "backoff success"),
+ TRACE_EVENT (CA_TRACE_BACKOFF_CANCEL, "backoff cancel"),
+
+ TRACE_EVENT (CA_TRACE_ALLOC_UPDATE_BEACON_PERIODS,
+ "alloc update beacon periods"),
+
+ TRACE_EVENT (CA_TRACE_ACCESS_ACTIVATE, "access activate date=%x"),
+ TRACE_EVENT (CA_TRACE_ACCESS_DEACTIVATE, "access deactivate"),
+ TRACE_EVENT (CA_TRACE_ACCESS_VCS_RESTART,
+ "access vcs restart start=%x len_tck=%d ant_tck=%d"
+ " eifs=%d"),
+ TRACE_EVENT (CA_TRACE_ACCESS_VCS_RESTART_MFS,
+ " mfs mfs=%x access_date=%x beacon_start=%x dur_tck=%d"
+ " cfp=%d hybrid=%d"),
+ TRACE_EVENT (CA_TRACE_ACCESS_VCS_RESTART_AIFS,
+ " aifs access_date=%x"),
+ TRACE_EVENT (CA_TRACE_ACCESS_GRANT, "access grant mfs=%x date=%x"
+ " dur_tck=%d"),
+ TRACE_EVENT (CA_TRACE_ACCESS_DEFER, "access defer date=%x"
+ " ant_tck=%d"),
+ TRACE_EVENT (CA_TRACE_ACCESS_AIFS, "access aifs"),
+ };
+ dbg_assert (ctx);
+ trace_namespace_init (&namespace, event_ids, COUNT (event_ids));
+ trace_buffer_add (&ctx->trace, "ca", 8, 4, true, &namespace);
+}
+
+void
+ca_trace_uninit (ca_t *ctx)
+{
+ dbg_assert (ctx);
+ trace_buffer_remove (&ctx->trace);
+}
+
diff --git a/cesar/mac/ca/test/ca/Config b/cesar/mac/ca/test/ca/Config
new file mode 100644
index 0000000000..2c7c85cbed
--- /dev/null
+++ b/cesar/mac/ca/test/ca/Config
@@ -0,0 +1,2 @@
+CONFIG_DEBUG_FATAL_CATCH = y
+CONFIG_TRACE = y
diff --git a/cesar/mac/ca/test/ca/Makefile b/cesar/mac/ca/test/ca/Makefile
new file mode 100644
index 0000000000..fc5bead643
--- /dev/null
+++ b/cesar/mac/ca/test/ca/Makefile
@@ -0,0 +1,8 @@
+BASE = ../../../..
+
+HOST_PROGRAMS = test_ca
+test_ca_SOURCES = test_ca.c test_backoff.c test_alloc.c test_access.c \
+ phy_stub.c
+test_ca_MODULES = lib mac/ca mac/common
+
+include $(BASE)/common/make/top.mk
diff --git a/cesar/mac/ca/test/ca/inc/phy_stub.h b/cesar/mac/ca/test/ca/inc/phy_stub.h
new file mode 100644
index 0000000000..b1a4746d35
--- /dev/null
+++ b/cesar/mac/ca/test/ca/inc/phy_stub.h
@@ -0,0 +1,23 @@
+#ifndef inc_phy_stub_h
+#define inc_phy_stub_h
+/* Cesar project {{{
+ *
+ * Copyright (C) 2007 Spidcom
+ *
+ * <<<Licence>>>
+ *
+ * }}} */
+/**
+ * \file inc/phy_stub.h
+ * \brief HAL Phy stub.
+ * \ingroup test
+ */
+
+/** Stub phy structure. */
+struct phy_t
+{
+ u32 date;
+};
+/* Forward declaration in hal/phy/forward.h. */
+
+#endif /* inc_phy_stub_h */
diff --git a/cesar/mac/ca/test/ca/src/phy_stub.c b/cesar/mac/ca/test/ca/src/phy_stub.c
new file mode 100644
index 0000000000..36344455f6
--- /dev/null
+++ b/cesar/mac/ca/test/ca/src/phy_stub.c
@@ -0,0 +1,67 @@
+/* Cesar project {{{
+ *
+ * Copyright (C) 2007 Spidcom
+ *
+ * <<<Licence>>>
+ *
+ * }}} */
+/**
+ * \file src/phy_stub.c
+ * \brief HAL Phy stub.
+ * \ingroup test
+ */
+#include "common/std.h"
+
+#include "hal/phy/phy.h"
+
+#include "inc/phy_stub.h"
+
+phy_t global_phy;
+
+phy_t *
+phy_init (void *user_data, phy_rx_fc_cb_t rx_fc_cb, phy_access_cb_t access_cb,
+ phy_access_conf_cb_t access_conf_cb, phy_pbdma_cb_t pbdma_cb,
+ phy_tx_false_alarm_cb_t tx_false_alarm_cb, phy_deferred_cb_t deferred_cb)
+{
+ phy_t *ctx = &global_phy;
+ ctx->date = 0;
+ return ctx;
+}
+
+void
+phy_uninit (phy_t *ctx)
+{
+ dbg_assert (ctx);
+}
+
+void
+phy_access_backoff_start (phy_t *ctx, u32 date, uint cap)
+{
+ dbg_assert (ctx);
+}
+
+void
+phy_access_timer_program (phy_t *ctx, u32 date)
+{
+ dbg_assert (ctx);
+}
+
+void
+phy_access_timer_cancel (phy_t *ctx)
+{
+ dbg_assert (ctx);
+}
+
+u32
+phy_date (phy_t *ctx)
+{
+ dbg_assert (ctx);
+ return ctx->date;
+}
+
+void
+phy_rx_activate (phy_t *ctx, bool now, u32 date, bool flag)
+{
+ dbg_assert (ctx);
+}
+
diff --git a/cesar/mac/ca/test/ca/src/test_access.c b/cesar/mac/ca/test/ca/src/test_access.c
new file mode 100644
index 0000000000..32efca34a1
--- /dev/null
+++ b/cesar/mac/ca/test/ca/src/test_access.c
@@ -0,0 +1,375 @@
+/* Cesar project {{{
+ *
+ * Copyright (C) 2007 Spidcom
+ *
+ * <<<Licence>>>
+ *
+ * }}} */
+/**
+ * \file src/test_access.c
+ * \brief Test access code.
+ * \ingroup test
+ */
+#include "common/std.h"
+
+#include "mac/ca/inc/context.h"
+#include "mac/ca/inc/alloc.h"
+#include "mac/common/timings.h"
+
+#include "hal/phy/phy.h"
+#include "inc/phy_stub.h"
+
+#include "lib/test.h"
+
+#include <string.h>
+
+#define NB_ITER 100000
+#define NB_PEER 5
+#define NB_GLID 20
+#define RANDOM_START 0
+
+#define PEER_MIN 10
+
+void
+access_random_schedule (lib_rnd_t *rnd, ca_schedule_t *sched, uint length_tck)
+{
+ uint i;
+ const uint mini_tck = MAC_PREAMBLE_TCK + MAC_FC_AV_TCK + MAC_AIFS_TCK;
+ const uint nb = lib_rnd_uniform (rnd, CA_SCHEDULE_SIZE - 3) + 3;
+ /* Coexistence mode and encryption. */
+ sched->coexistence_mode = MAC_COEXISTENCE_SHARED_CSMA_HYBRID_MODE;
+ sched->nek_switch = 0;
+ /* Random allocations. */
+ uint zoffset_tck = 0;
+ sched->allocations_nb = nb;
+ for (i = 0; i < nb - 1; i++)
+ {
+ sched->allocations[i].end_offset_tck =
+ lib_rnd_uniform (rnd, length_tck - zoffset_tck
+ - (nb - i) * mini_tck) + zoffset_tck + mini_tck;
+ zoffset_tck = sched->allocations[i].end_offset_tck;
+ }
+ sched->allocations[nb - 1].end_offset_tck = length_tck;
+ /* Ramdom GLID. */
+ static const struct
+ {
+ u8 glid_min;
+ u8 glid_max;
+ uint prob;
+ } glid_prob[] = {
+ { MAC_LID_SPC_HOLE, MAC_LID_SPC_HOLE, 8 },
+ { MAC_GLID_MIN, MAC_GLID_MAX, 8 + 8 },
+ { MAC_LID_CFPI, MAC_LID_CFPI, 8 + 8 + 1 },
+ { MAC_LID_DISCOVER, MAC_LID_DISCOVER, 8 + 8 + 1 + 2 },
+ { MAC_LID_SHARED_CSMA, MAC_LID_SHARED_CSMA, 8 + 8 + 1 + 2 + 8 },
+ { MAC_LID_LOCAL_CSMA, MAC_LID_LOCAL_CSMA, 8 + 8 + 1 + 2 + 8 + 8 },
+ };
+ sched->allocations[0].glid = MAC_LID_SPC_CENTRAL;
+ for (i = 1; i < nb; i++)
+ {
+ uint r = lib_rnd_uniform (rnd, glid_prob[COUNT (glid_prob) - 1].prob);
+ uint j;
+ DICHOTOMY_SEARCH (0, COUNT (glid_prob), j, r < glid_prob[j].prob);
+ dbg_assert (j < COUNT (glid_prob));
+ sched->allocations[i].glid = glid_prob[j].glid_min
+ + lib_rnd_uniform (rnd, glid_prob[j].glid_max
+ - glid_prob[j].glid_min + 1);
+ }
+}
+
+void
+access_check_vcs_restart (test_t t, ca_t *ca, u32 date, uint duration_tck,
+ bool eifs, const ca_access_alloc_param_t *ap)
+{
+ test_within (t);
+ dbg_assert (ca);
+ uint bp_i = ca_alloc_find_beacon_period (ca, date);
+ dbg_assert (bp_i != ca->beacon_periods_tail);
+ ca_beacon_period_t *bp = &ca->beacon_periods[bp_i];
+ ca_schedule_t *sched = &ca->schedules[bp->schedule_index];
+ dbg_assert (sched->coexistence_mode < MAC_COEXISTENCE_NB);
+ dbg_assert (sched->nek_switch < 2);
+ dbg_assert (sched->allocations_nb);
+ uint alloc_i = ca_alloc_find (sched, date - bp->start_date);
+ dbg_assert (alloc_i < sched->allocations_nb);
+ ca_allocation_t *alloc = &sched->allocations[alloc_i];
+ ca_access_param_t *a = &ca->access_param;
+ test_fail_unless (lesseq_mod2p32 (a->access_date, bp->start_date +
+ alloc->end_offset_tck - MAC_AIFS_TCK));
+ test_fail_unless (lesseq_mod2p32 (date, a->access_date));
+ if (ap)
+ {
+ /* Check current allocation parameters. */
+ test_fail_unless (ap->coexistence_mode == sched->coexistence_mode);
+ test_fail_unless (ap->hybrid == CA_ALLOC_IS_HYBRID
+ (sched->coexistence_mode, alloc->glid));
+ test_fail_unless (ap->nek_switch == sched->nek_switch);
+ }
+ if (a->mfs)
+ {
+ /* MFS scheduled. */
+ uint slot = CA_ALLOC_IS_CSMA (alloc->glid)
+ ? (eifs ? 0 : 2) + ca->backoff.bc
+ : 0;
+ test_fail_unless (a->access_date == date + duration_tck
+ + slot * MAC_SLOT_TCK);
+ test_fail_unless (a->beacon_period_start_date == bp->start_date);
+ test_fail_unless (a->duration_tck == bp->start_date
+ + alloc->end_offset_tck - MAC_AIFS_TCK
+ - a->access_date);
+ test_fail_unless (a->cfp == !CA_ALLOC_IS_CSMA (alloc->glid));
+ test_fail_unless (a->hybrid == CA_ALLOC_IS_HYBRID
+ (sched->coexistence_mode, alloc->glid));
+ }
+ else
+ {
+ /* AIFS scheduled. */
+ test_fail_unless (a->access_date == bp->start_date
+ + alloc->end_offset_tck - MAC_AIFS_TCK);
+ }
+}
+
+void
+access_check_defer (test_t t, ca_t *ca, u32 date)
+{
+ access_check_vcs_restart (t, ca, date, 0, false, NULL);
+}
+
+void
+access_check_aifs (test_t t, ca_t *ca, u32 date,
+ const ca_access_alloc_param_t *ap)
+{
+ dbg_assert_ptr (ap);
+ access_check_vcs_restart (t, ca, date + MAC_AIFS_TCK, 0, false, ap);
+}
+
+void
+access_basic_test_case (test_t t)
+{
+ uint i, j;
+ lib_rnd_t rnd[1];
+ phy_t *phy;
+ mac_config_t config;
+ mac_store_t *store;
+ ca_t *ca;
+ test_case_begin (t, "basic");
+ /* Initialise. */
+ lib_rnd_init (rnd, 1234);
+ phy = phy_init (NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+#if RANDOM_START
+ phy->date = lib_rnd32 (rnd);
+#else
+ phy->date = MAC_MS_TO_TCK (500 / 50);
+#endif
+ mac_config_init (&config);
+ config.tei = 1;
+ store = mac_store_init ();
+ ca = ca_init (phy, &config, store);
+ /* Characteristics for null slots are determined as-is:
+ * - for index < NB_GLID: i = index
+ * - lid = i != 0 ? i + MAC_GLID_MIN : MAC_LID_SPC_CENTRAL
+ * - cap = i % 4
+ * - tei = i % NB_PEER + PEER_MIN
+ * - bcast = false
+ * - cfp = true
+ * - for index >= NB_GLID: i = index - NB_GLID
+ * - lid = i % MAC_PLID_NB + MAC_PLID_MIN
+ * - cap = lid - MAC_PLID_MIN
+ * - tei = i >= MAC_PLID_NB * NB_PEER
+ * ? MAC_TEI_BCAST
+ * : i / MAC_PLID_NB + PEER_MIN
+ * - bcast = i >= MAC_PLID_NB * NB_PEER
+ * - cfp = false
+ */
+ mfs_tx_t *mfses[NB_GLID + MAC_PLID_NB * (1 + NB_PEER)];
+ uint mfses_used = 0;
+ memset (mfses, 0, sizeof (mfses));
+ /* Now the big test.
+ * Create random schedules and random MFS for the STA with TEI = [2,3]. */
+ test_begin (t, "random test")
+ {
+ const int beacon_period_length_tck = MAC_MS_TO_TCK (1000 / 50);
+ ca_beacon_period_t bps[2];
+ /* Initialise first beacon period. */
+ access_random_schedule (rnd, ca_alloc_get_schedule (ca, 3),
+ beacon_period_length_tck);
+ bps[1].start_date = phy->date - beacon_period_length_tck / 2;
+ bps[1].schedule_index = 3;
+ for (i = 0; i < NB_ITER; i++)
+ {
+ /* Make new schedule. */
+ access_random_schedule (rnd, ca_alloc_get_schedule (ca, i % 4),
+ beacon_period_length_tck);
+ /* Make new beacon period. */
+ bps[0] = bps[1];
+ bps[1].start_date = bps[0].start_date + beacon_period_length_tck;
+ bps[1].schedule_index = i % 4;
+ /* Update beacon period. */
+ ca_alloc_update_beacon_periods (ca, bps, COUNT (bps));
+ /* First schedule. */
+ if (i == 0)
+ {
+ const ca_access_alloc_param_t *ap =
+ ca_access_activate (ca, phy->date, 0);
+ access_check_vcs_restart (t, ca, phy->date, MAC_EIFS_10_TCK,
+ false, ap);
+ }
+ while (less_mod2p32 (phy->date, bps[1].start_date))
+ {
+ if (phy->date == ca->access_param.access_date
+ || lib_rnd_flip_coin (rnd, LIB_RND_RATIO (0.5)))
+ {
+ /* Next ACCESS. */
+ phy->date = ca->access_param.access_date;
+ if (ca->access_param.mfs)
+ {
+ mfs_tx_t *mfs = ca->access_param.mfs;
+ dbg_assert (
+ mfs->ca_state == CA_MFS_STATE_PRIO_QUEUED
+ || mfs->ca_state == CA_MFS_STATE_CFP_QUEUED);
+ /* Timings are completely approximated. */
+ int seg_sent = lib_rnd_uniform (rnd, mfs->seg_nb);
+ uint fl_tck = MAC_PREAMBLE_TCK + MAC_FC_AV_TCK
+ + MAC_DX567_TCK * seg_sent;
+ if (fl_tck > ca->access_param.duration_tck)
+ {
+ ca_access_defer (ca, phy->date, 0);
+ access_check_defer (t, ca, phy->date);
+ }
+ else
+ {
+ ca_access_vcs_restart (ca, phy->date,
+ MAC_EIFS_10_TCK, 0, true);
+ access_check_vcs_restart (t, ca, phy->date,
+ MAC_EIFS_10_TCK, true,
+ NULL);
+ if (seg_sent)
+ {
+ mfs->seg_nb -= seg_sent;
+ ca_mfs_update (ca, mfs);
+ ca_access_vcs_restart (ca, phy->date, fl_tck,
+ 0, false);
+ access_check_vcs_restart (t, ca, phy->date,
+ fl_tck, false,
+ NULL);
+ }
+ }
+ }
+ else
+ {
+ const ca_access_alloc_param_t *ap =
+ ca_access_aifs (ca);
+ access_check_aifs (t, ca, phy->date, ap);
+ }
+ }
+ else
+ {
+ /* Random event. */
+ dbg_assert (less_mod2p32 (phy->date,
+ ca->access_param.access_date));
+ u32 next_date = phy->date + lib_rnd_uniform (
+ rnd, ca->access_param.access_date - phy->date);
+ phy->date = next_date;
+ if (mfses_used == 0
+ || lib_rnd_flip_coin (rnd, LIB_RND_RATIO (0.2)))
+ {
+ j = lib_rnd_uniform (rnd, COUNT (mfses));
+ if (!mfses[j])
+ {
+ /* Create a new MFS. */
+ uint lid, cap, tei;
+ bool bcast, cfp;
+ if (j < NB_GLID)
+ {
+ lid = j != 0 ? j + MAC_GLID_MIN
+ : MAC_LID_SPC_CENTRAL;
+ cap = j % 4;
+ tei = j % NB_PEER + PEER_MIN;
+ bcast = false;
+ cfp = true;
+ }
+ else
+ {
+ uint sj = j - NB_GLID;
+ cap = sj % MAC_PLID_NB;
+ lid = cap + MAC_PLID_MIN;
+ bcast = sj >= MAC_PLID_NB * NB_PEER;
+ tei = bcast ? MAC_TEI_BCAST
+ : sj / MAC_PLID_NB + PEER_MIN;
+ cfp = false;
+ }
+ bool added;
+ mfs_tx_t *mfs = mac_store_mfs_add_tx
+ (store, bcast, false, lid, tei, &added);
+ mfs->cfp = cfp;
+ dbg_assert (added);
+ mfs->seg_nb = lib_rnd_uniform (rnd, 100);
+ mfses[j] = mfs;
+ mfses_used++;
+ ca_mfs_add (ca, mfs);
+ }
+ else
+ {
+ /* Remove an MFS. */
+ mfs_tx_t *mfs = mfses[j];
+ ca_mfs_remove (ca, mfs);
+ mac_store_mfs_remove (store,
+ PARENT_OF (mfs_t, tx, mfs));
+ blk_release (mfs);
+ mfses[j] = NULL;
+ mfses_used--;
+ }
+ }
+ else
+ {
+ /* Modify an MFS. */
+ do {
+ j = lib_rnd_uniform (rnd, COUNT (mfses));
+ } while (!mfses[j]);
+ mfs_tx_t *mfs = mfses[j];
+ if (mfs->ca_state != CA_MFS_STATE_HELD)
+ {
+ if (lib_rnd_flip_coin (rnd, LIB_RND_RATIO (0.05)))
+ {
+ ca_mfs_hold (ca, mfs);
+ }
+ else
+ {
+ mfs->seg_nb = lib_rnd_uniform (rnd, 100);
+ ca_mfs_update (ca, mfs);
+ }
+ }
+ }
+ }
+ }
+ }
+ } test_end;
+ /* Uninitialise. */
+ ca_access_deactivate (ca);
+ for (i = 0; i < COUNT (mfses); i++)
+ {
+ mfs_tx_t *mfs = mfses[i];
+ if (mfs)
+ {
+ ca_mfs_remove (ca, mfs);
+ mac_store_mfs_remove (store, PARENT_OF (mfs_t, tx, mfs));
+ blk_release (mfs);
+ }
+ }
+ for (i = 0; i < NB_PEER; i++)
+ {
+ bool ok = mac_store_sta_remove (store, PEER_MIN + i);
+ dbg_assert (ok);
+ }
+ ca_uninit (ca);
+ phy_uninit (phy);
+ mac_store_uninit (store);
+}
+
+void
+access_test_suite (test_t t)
+{
+ test_suite_begin (t, "access");
+ access_basic_test_case (t);
+}
+
diff --git a/cesar/mac/ca/test/ca/src/test_alloc.c b/cesar/mac/ca/test/ca/src/test_alloc.c
new file mode 100644
index 0000000000..ff563015cd
--- /dev/null
+++ b/cesar/mac/ca/test/ca/src/test_alloc.c
@@ -0,0 +1,218 @@
+/* Cesar project {{{
+ *
+ * Copyright (C) 2007 Spidcom
+ *
+ * <<<Licence>>>
+ *
+ * }}} */
+/**
+ * \file src/test_alloc.c
+ * \brief Test allocations code.
+ * \ingroup test
+ */
+#include "common/std.h"
+
+#include "mac/ca/inc/context.h"
+#include "mac/ca/inc/alloc.h"
+#include "mac/common/timings.h"
+
+#include "hal/phy/phy.h"
+#include "inc/phy_stub.h"
+
+#include "lib/test.h"
+
+#define NB_ITER 100000
+
+void
+alloc_basic_test_case (test_t t)
+{
+ uint i, j, k;
+ lib_rnd_t rnd[1];
+ phy_t *phy;
+ mac_config_t config;
+ mac_store_t *store;
+ ca_t *ca;
+ test_case_begin (t, "basic");
+ /* Initialise. */
+ phy = phy_init (NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ mac_config_init (&config);
+ config.tei = 1;
+ store = mac_store_init ();
+ ca = ca_init (phy, &config, store);
+ lib_rnd_init (rnd, 1234);
+ test_begin (t, "is hybrid")
+ {
+ struct
+ {
+ u8 lid;
+ mac_coexistence_mode_t coex;
+ bool expect;
+ } is_hybrid_tab[] = {
+ { 0x80, MAC_COEXISTENCE_AV_ONLY_MODE, false },
+ { 0x8a, MAC_COEXISTENCE_SHARED_CSMA_HYBRID_MODE, false },
+ { 0xa2, MAC_COEXISTENCE_FULL_HYBRID_MODE, true },
+ { 0xcd, MAC_COEXISTENCE_HYBRID_DELIMITERS_MODE, true },
+ { MAC_LID_DISCOVER, MAC_COEXISTENCE_AV_ONLY_MODE, true },
+ { MAC_LID_DISCOVER, MAC_COEXISTENCE_SHARED_CSMA_HYBRID_MODE, true },
+ { MAC_LID_DISCOVER, MAC_COEXISTENCE_FULL_HYBRID_MODE, true },
+ { MAC_LID_DISCOVER, MAC_COEXISTENCE_HYBRID_DELIMITERS_MODE, true },
+ { MAC_LID_SHARED_CSMA, MAC_COEXISTENCE_AV_ONLY_MODE, false },
+ { MAC_LID_SHARED_CSMA, MAC_COEXISTENCE_SHARED_CSMA_HYBRID_MODE, true },
+ { MAC_LID_SHARED_CSMA, MAC_COEXISTENCE_FULL_HYBRID_MODE, true },
+ { MAC_LID_SHARED_CSMA, MAC_COEXISTENCE_HYBRID_DELIMITERS_MODE, true },
+ { MAC_LID_LOCAL_CSMA, MAC_COEXISTENCE_AV_ONLY_MODE, false },
+ { MAC_LID_LOCAL_CSMA, MAC_COEXISTENCE_SHARED_CSMA_HYBRID_MODE, false },
+ { MAC_LID_LOCAL_CSMA, MAC_COEXISTENCE_FULL_HYBRID_MODE, true },
+ { MAC_LID_LOCAL_CSMA, MAC_COEXISTENCE_HYBRID_DELIMITERS_MODE, true },
+ { MAC_LID_DISCOVER, MAC_COEXISTENCE_AV_ONLY_MODE, true },
+ { MAC_LID_DISCOVER, MAC_COEXISTENCE_SHARED_CSMA_HYBRID_MODE, true },
+ { MAC_LID_DISCOVER, MAC_COEXISTENCE_FULL_HYBRID_MODE, true },
+ { MAC_LID_DISCOVER, MAC_COEXISTENCE_HYBRID_DELIMITERS_MODE, true },
+ { MAC_LID_SPC_CENTRAL, MAC_COEXISTENCE_AV_ONLY_MODE, true },
+ { MAC_LID_SPC_CENTRAL, MAC_COEXISTENCE_SHARED_CSMA_HYBRID_MODE, true },
+ { MAC_LID_SPC_CENTRAL, MAC_COEXISTENCE_FULL_HYBRID_MODE, true },
+ { MAC_LID_SPC_CENTRAL, MAC_COEXISTENCE_HYBRID_DELIMITERS_MODE, true },
+ // { MAC_LID_CFPI, ? },
+ };
+ for (i = 0; i < COUNT (is_hybrid_tab); i++)
+ {
+ test_fail_unless (
+ CA_ALLOC_IS_HYBRID (is_hybrid_tab[i].coex,
+ is_hybrid_tab[i].lid) ==
+ is_hybrid_tab[i].expect, "is hybrid mismatch i=%d", i);
+ }
+ } test_end;
+ test_begin (t, "beacon periods")
+ {
+ const int beacon_period_length = MAC_MS_TO_TCK (1000 / 50);
+ ca_beacon_period_t periods[CA_BEACON_PERIOD_NB];
+ uint periods_nb = 0;
+ uint periods_nb_new;
+ uint used;
+ /* Create schedule to satisfy asserts. */
+ for (i = 0; i < CA_SCHEDULE_NB; i++)
+ {
+ ca_schedule_t *sched = ca_alloc_get_schedule (ca, i);
+ sched->coexistence_mode = MAC_COEXISTENCE_AV_ONLY_MODE;
+ sched->nek_switch = 0;
+ sched->allocations_nb = 1;
+ sched->allocations[0].end_offset_tck = beacon_period_length;
+ sched->allocations[0].glid = MAC_LID_SPC_HOLE;
+ }
+ /* Create beacon periods. */
+ for (i = 0; i < NB_ITER; i++)
+ {
+ phy->date = i * beacon_period_length + beacon_period_length / 3;
+ periods_nb_new =
+ lib_rnd_uniform (rnd, CA_BEACON_PERIOD_NB - 2) + 1;
+ for (j = 0; j < periods_nb_new; j++)
+ {
+ /* Change older periods, set new periods. */
+ if (j + 1 >= periods_nb
+ || lib_rnd_flip_coin (rnd, LIB_RND_RATIO (0.3)))
+ {
+ periods[j].start_date = (i + j) * beacon_period_length
+ + lib_rnd_uniform (rnd, beacon_period_length / 100)
+ - beacon_period_length / 200;
+ periods[j].schedule_index =
+ lib_rnd_uniform (rnd, CA_SCHEDULE_NB);
+ }
+ else
+ {
+ periods[j] = periods[j + 1];
+ }
+ }
+ periods_nb = periods_nb_new;
+ /* Update CA periods. */
+ ca_alloc_update_beacon_periods (ca, periods, periods_nb);
+ test_fail_unless (ca->current_beacon_period
+ == ca->beacon_periods_tail);
+ /* Check the update. */
+ for (j = 0, k = ca->beacon_periods_head; j < periods_nb;
+ j++, k = CA_ALLOC_NEXT_BEACON_PERIOD (k))
+ {
+ test_fail_unless (k != ca->beacon_periods_tail);
+ test_fail_unless ((ca->beacon_periods[k].start_date
+ == periods[j].start_date)
+ && (ca->beacon_periods[k].schedule_index
+ == periods[j].schedule_index));
+ }
+ test_fail_unless (k == ca->beacon_periods_tail);
+ /* Test get_schedule. */
+ used = 0;
+ for (j = 0; j < periods_nb; j++)
+ {
+ dbg_assert (periods[j].schedule_index < CA_SCHEDULE_NB);
+ used |= 1 << periods[j].schedule_index;
+ }
+ for (j = 0; used; j++, used >>= 1)
+ {
+ bool caught = false;
+ dbg_fatal_try_begin
+ {
+ ca_alloc_get_schedule (ca, j);
+ }
+ dbg_fatal_try_catch_void ()
+ {
+ caught = true;
+ }
+ dbg_fatal_try_end;
+ test_fail_unless (((used & 1) && caught)
+ || (!(used & 1) && !caught));
+ }
+ /* Test find_beacon_period. */
+ for (j = 0, k = ca->beacon_periods_head; j < periods_nb;
+ j++, k = CA_ALLOC_NEXT_BEACON_PERIOD (k))
+ {
+ test_fail_unless (ca_alloc_find_beacon_period (
+ ca, periods[j].start_date) == k);
+ test_fail_unless (ca_alloc_find_beacon_period (
+ ca, periods[j].start_date
+ + beacon_period_length / 3) == k);
+ }
+ }
+ } test_end;
+ test_begin (t, "alloc find")
+ {
+ ca_schedule_t schedule;
+ uint end;
+ for (i = 0; i < NB_ITER / CA_SCHEDULE_SIZE; i++)
+ {
+ schedule.coexistence_mode = MAC_COEXISTENCE_AV_ONLY_MODE;
+ schedule.nek_switch = 0;
+ schedule.allocations_nb = lib_rnd_uniform (rnd, CA_SCHEDULE_SIZE);
+ end = 0;
+ for (j = 0; j < schedule.allocations_nb; j++)
+ {
+ end = end + 2 + lib_rnd_uniform (
+ rnd, (1 << 24)
+ - (schedule.allocations_nb - j) * 2
+ - end - 2);
+ schedule.allocations[j].end_offset_tck = end;
+ schedule.allocations[j].glid = 0;
+ }
+ end = 0;
+ for (j = 0; j < schedule.allocations_nb; j++)
+ {
+ test_fail_unless (ca_alloc_find (&schedule, end) == j);
+ test_fail_unless (ca_alloc_find (&schedule, end + 1) == j);
+ end = schedule.allocations[j].end_offset_tck;
+ test_fail_unless (ca_alloc_find (&schedule, end) == j + 1);
+ test_fail_unless (ca_alloc_find (&schedule, end + 1)
+ == j + 1);
+ }
+ }
+ } test_end;
+ /* Uninitialise. */
+ ca_uninit (ca);
+ phy_uninit (phy);
+ mac_store_uninit (store);
+}
+
+void
+alloc_test_suite (test_t t)
+{
+ test_suite_begin (t, "alloc");
+ alloc_basic_test_case (t);
+}
+
diff --git a/cesar/mac/ca/test/ca/src/test_backoff.c b/cesar/mac/ca/test/ca/src/test_backoff.c
new file mode 100644
index 0000000000..ed56d12e4a
--- /dev/null
+++ b/cesar/mac/ca/test/ca/src/test_backoff.c
@@ -0,0 +1,129 @@
+/* Cesar project {{{
+ *
+ * Copyright (C) 2007 Spidcom
+ *
+ * <<<Licence>>>
+ *
+ * }}} */
+/**
+ * \file test_backoff.c
+ * \brief Test backoff code.
+ * \ingroup test
+ */
+#include "common/std.h"
+
+#include "mac/ca/inc/context.h"
+
+#include "lib/test.h"
+
+#define NB_ITER 1000000
+
+static void
+backoff_basic_test_case (test_t t)
+{
+ uint i, j;
+ uint cap = 0, slot_count = 0, bpcm;
+ lib_rnd_t rnd;
+ ca_t ca;
+ u32 success_ratio = LIB_RND_RATIO (0.5),
+ same_cap_ratio = LIB_RND_RATIO (0.6);
+ bool last_success = true;
+ uint last_cap = 0,
+ last_bpc = 0,
+ last_cw = 0,
+ last_bc = 0,
+ last_dc = 0;
+ const uint dc_table[4] = { 0, 1, 3, 15 };
+ const uint cw_table[4][4] = {
+ { 7, 15, 31, 63 },
+ { 7, 15, 31, 63 },
+ { 7, 15, 15, 31 },
+ { 7, 15, 15, 31 },
+ };
+ test_case_begin (t, "basic");
+ /* Initialise. */
+ ca_trace_init (&ca);
+ ca_backoff_init (&ca, 0);
+ lib_rnd_init (&rnd, 1234);
+ test_begin (t, "use backoff")
+ {
+ /* Use backoff. */
+ for (i = 0; i < NB_ITER; i++)
+ {
+ /* New frame. */
+ if (!lib_rnd_flip_coin (&rnd, same_cap_ratio))
+ cap = lib_rnd32 (&rnd) % 4;
+ ca_backoff_new (&ca, cap);
+ /* Test unused backoff. */
+ for (j = lib_rnd_uniform (&rnd, 3); j; j--)
+ {
+ if (!lib_rnd_flip_coin (&rnd, same_cap_ratio))
+ cap = lib_rnd32 (&rnd) % 4;
+ ca_backoff_new (&ca, cap);
+ }
+ /* Check backoff. Throw some general rules... */
+ test_verbose_print ("cap = %d, bpc = %d, cw = %2d, bc = %2d, "
+ "dc = %2d ", cap, ca.backoff.bpc,
+ ca.backoff.cw, ca.backoff.bc, ca.backoff.dc);
+ test_verbose_print ("last_success = %d, last_bpc = %d, "
+ "last_bc = %2d, slot_count = %2d",
+ last_success, last_bpc, last_bc, slot_count);
+ bpcm = MIN (3u, ca.backoff.bpc - 1);
+ test_fail_unless (ca.backoff.cw == cw_table[cap][bpcm]);
+ test_fail_unless (ca.backoff.bc <= ca.backoff.cw);
+ test_fail_unless ((last_success && ca.backoff.bpc == 1)
+ || !last_success);
+ test_fail_unless
+ (last_success
+ || (ca.backoff.bpc > last_bpc
+ && (last_bpc == 0 || last_bc - slot_count == 0
+ || last_dc == 0))
+ || (ca.backoff.bpc == last_bpc
+ && (last_bpc != 0 && last_bc - slot_count != 0
+ && last_dc != 0)));
+ test_fail_unless (((last_success || ca.backoff.bpc > last_bpc)
+ && ca.backoff.dc == dc_table[bpcm])
+ || ca.backoff.dc < last_dc);
+ /* Save last state. */
+ last_cap = cap;
+ last_bpc = ca.backoff.bpc;
+ last_cw = ca.backoff.cw;
+ last_bc = ca.backoff.bc;
+ last_dc = ca.backoff.dc;
+ /* Defer or success? */
+ last_success = lib_rnd_flip_coin (&rnd, success_ratio);
+ if (last_success)
+ {
+ for (j = lib_rnd_uniform (&rnd, 2); j; j--)
+ ca_backoff_deferred (&ca, lib_rnd_uniform (&rnd, 64));
+ ca_backoff_success (&ca);
+ }
+ else
+ {
+ uint slot_count_taken;
+ slot_count = lib_rnd_uniform (&rnd, ca.backoff.bc * 2 + 1);
+ slot_count_taken = slot_count >= ca.backoff.bc ? ca.backoff.bc
+ : slot_count;
+ ca_backoff_deferred (&ca, slot_count);
+ slot_count = slot_count_taken;
+ }
+ /* Random defer/cancel. */
+ for (j = lib_rnd_uniform (&rnd, 3); j; j--)
+ {
+ if (lib_rnd_flip_coin (&rnd, LIB_RND_RATIO (0.5)))
+ ca_backoff_deferred (&ca, lib_rnd_uniform (&rnd, 64));
+ else
+ ca_backoff_cancel (&ca);
+ }
+ }
+ } test_end;
+ ca_trace_uninit (&ca);
+}
+
+void
+backoff_test_suite (test_t t)
+{
+ test_suite_begin (t, "backoff");
+ backoff_basic_test_case (t);
+}
+
diff --git a/cesar/mac/ca/test/ca/src/test_ca.c b/cesar/mac/ca/test/ca/src/test_ca.c
new file mode 100644
index 0000000000..ec9b6724ba
--- /dev/null
+++ b/cesar/mac/ca/test/ca/src/test_ca.c
@@ -0,0 +1,39 @@
+/* Cesar project {{{
+ *
+ * Copyright (C) 2007 Spidcom
+ *
+ * <<<Licence>>>
+ *
+ * }}} */
+/**
+ * \file src/test_ca.c
+ * \brief Test Channel Access.
+ * \ingroup test
+ */
+#include "common/std.h"
+
+#include "lib/test.h"
+#include "lib/trace.h"
+
+void
+backoff_test_suite (test_t t);
+
+void
+alloc_test_suite (test_t t);
+
+void
+access_test_suite (test_t t);
+
+int
+main (int argc, char **argv)
+{
+ test_t t;
+ trace_init ();
+ test_init (t, argc, argv);
+ backoff_test_suite (t);
+ alloc_test_suite (t);
+ access_test_suite (t);
+ trace_uninit ();
+ test_result (t);
+ return test_nb_failed (t) == 0 ? 0 : 1;
+}