summaryrefslogtreecommitdiff
path: root/cesar/mac/pbproc/src/mfs.c
blob: 7b1458795aae6006bf2ae238081cd18f41765352 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
/* Cesar project {{{
 *
 * Copyright (C) 2007 Spidcom
 *
 * <<<Licence>>>
 *
 * }}} */
/**
 * \file    mac/pbproc/src/mfs.c
 * \brief   MFS related PB Processing functions.
 * \ingroup pbproc
 */
#include "common/std.h"
#include "mac/pbproc/pbproc.h"

#include "hal/arch/arch.h"
#include "lib/slist.h"

#include "mac/pbproc/inc/context.h"

#include <string.h>

pb_t * ARCH_ILRAM
pbproc_mfs_extract_tail (mfs_tx_t *mfs)
{
    dbg_claim (mfs);
    /* Tail is not owned by the PB Processing, return immediately. */
    if (mfs->pending_seg_nb > 0)
    {
        return mfs->tail;
    }
    else
    {
        volatile mfs_tx_t *vmfs = mfs;
        /* Decrement seg_nb to forbid PB Processing to take the last segment. */
        arch_atomic_add (&vmfs->seg_nb, -1);
        if (vmfs->seg_nb == -1)
        {
            /* Seg_nb was zero, cancel extraction. */
            arch_atomic_add (&vmfs->seg_nb, 1);
            return NULL;
        }
        else
        {
            /* Extraction successful. */
            vmfs->pending_seg_nb++;
            return vmfs->tail;
        }
    }
}

/**
 * Internal version of pbproc_mfs_insert, without PB verifications.
 * \param  mfs  MFS to insert to
 * \param  first  first segment to insert
 * \param  last  last segment to insert
 * \param  nb  number of inserted segments
 * \param  cap  CAP of the added segments, only used for dynamic CAP
 */
void ARCH_ILRAM
pbproc_mfs_insert_ (mfs_tx_t *mfs, pb_t *first, pb_t *last, uint nb,
                    uint cap)
{
    dbg_claim (mfs);
    dbg_claim (first);
    dbg_claim (last);
    dbg_claim (nb > 0);
    dbg_claim (cap < 4);
    volatile mfs_tx_t *vmfs = mfs;
    /* Chain. */
    uint flags = arch_isr_lock ();
    slist_push_back_range (mfs->, first, last, bare);
    arch_isr_unlock (flags);
    /* If dynamic CAP, recompute it. */
    if (vmfs->dynamic_cap)
    {
        uint i;
        int nb_cap;
        nb_cap = nb;
        flags = arch_isr_lock ();
        for (i = 0; i < cap; i++)
        {
            nb_cap += vmfs->cap_seg_nb[i];
            vmfs->cap_seg_nb[i] = 0;
        }
        vmfs->cap_seg_nb[cap] += nb_cap;
        if (cap > vmfs->cap)
        {
            vmfs->cap = cap;
        }
        arch_isr_unlock (flags);
    }
    /* Count pending segments. */
    vmfs->pending_seg_nb += nb;
}

void ARCH_ILRAM
pbproc_mfs_insert (mfs_tx_t *mfs, pb_t *first, pb_t *last, uint nb, uint cap)
{
    dbg_claim (mfs);
    dbg_claim (first);
    dbg_claim (last);
    dbg_claim (nb > 0);
    dbg_claim (cap < 4);
    if (CONFIG_DEBUG_CLAIM)
    {
        uint i;
        pb_t *p;
        u16 ssn;
        ssn = mfs->next_ssn - nb;
        for (i = 0, p = first; i < nb; i++, p = p->next)
        {
            dbg_assert (p);
            if (i != nb - 1)
            {
                dbg_assert (p != last);
                dbg_assert_ptr (p->next);
            }
            else
                dbg_assert (p == last);
            dbg_assert (p->data);
            dbg_assert (p->header.ssn == ssn);
            ssn = p->header.ssn + 1;
            dbg_assert (p->header.vpbf);
            dbg_assert (p->header.mmqf == mfs->common.mme);
            dbg_assert (!p->header.opsf);
            dbg_assert (p->header.rsvd == 0);
        }
    }
    pbproc_mfs_insert_ (mfs, first, last, nb, cap);
}

void ARCH_ILRAM
pbproc_mfs_provide (mfs_tx_t *mfs, uint nb)
{
    dbg_claim (mfs);
    dbg_assert ((int) nb <= mfs->pending_seg_nb);
    volatile mfs_tx_t *vmfs = mfs;
    /* PB Processing can now use the newly inserted segments. */
    vmfs->pending_seg_nb -= nb;
    arch_atomic_add (&vmfs->seg_nb, nb);
}

void
pbproc_mfs_beacon_prepare (pbproc_t *ctx, mfs_tx_t *mfs, pb_beacon_t *pb,
                           const pbproc_tx_beacon_params_t *params)
{
    dbg_assert (ctx);
    dbg_assert_ptr (mfs);
    dbg_assert_ptr (pb);
    dbg_assert (params);
    dbg_assert (mfs->beacon);
    dbg_assert (mfs->seg_nb <= 1 && mfs->pending_seg_nb == 0
                && !mfs->dynamic_cap);
    dbg_assert (!params->bpsto
                || (params->bpsto > pb->data
                    && params->bpsto <= pb->data + MAC_PB136_BYTES - 3));
    /* Copy extra parameters after payload. */
    pbproc_tx_beacon_params_t *after_payload = (void *) (pb->data
                                                         + MAC_PB136_BYTES);
    *after_payload = *params;
    /* Cancel the previous beacon if possible. */
    pb_t *tail = pbproc_mfs_extract_tail (mfs);
    if (tail)
    {
        /* Unchain, the MFS can not be active because there is no segment to
         * send. */
        dbg_assert (mfs->seg_nb == 0);
        dbg_assert (mfs->head == tail && mfs->tail == tail);
        mfs->head = NULL;
        dbg_invalid_ptr (mfs->tail);
        mfs->pending_seg_nb--;
        /* Release previous beacon. */
        blk_release_desc (&tail->blk);
    }
    /* Add the segment. */
    pbproc_mfs_insert_ (mfs, (pb_t *) pb, (pb_t *) pb, 1, 0);
    pbproc_mfs_provide (mfs, 1);
}

bool
pbproc_mfs_expire (pbproc_t *ctx, mfs_tx_t *mfs, u32 expiration_ntb,
                   u32 *first_pb_expiration_ntb)
{
    dbg_assert (ctx);
    dbg_assert (mfs);
    dbg_assert (first_pb_expiration_ntb);
    bool empty;
    uint skip = 0;
    pb_t *first = NULL, *p, *last_skip = NULL;
    /* Release list, to release PB with ISR unlocked. */
    pb_t *release_head = NULL, *release_tail;
    dbg_invalid_ptr (release_tail);
    int release_nb = 0;
    /* Start of critical section. */
    uint flags = arch_isr_lock ();
    /* Is this MFS being transmitted? */
    if (ctx->prep_mpdu.main_mfs == mfs && ctx->prep_mpdu.main_seg_nb)
    {
        skip = ctx->prep_mpdu.main_seg_nb_reserved;
        first = ctx->prep_mpdu.main_head; /* May be NULL. */
    }
    /* Travel into segments. */
    p = mfs->head;
    if (p)
    {
        /* Skip reserved PB. */
        for (p = mfs->head; skip && p != mfs->tail; skip--)
        {
            last_skip = p;
            p = p->next;
        }
        if (skip == 0)
        {
            /* No more to skip, may begin to expire segments. */
            if (lesseq_mod2p32 (p->expiration_ntb, expiration_ntb))
            {
                release_head = p;
                release_nb++;
                /* Find last segment to expire. */
                while (p != mfs->tail
                       && lesseq_mod2p32 (p->next->expiration_ntb,
                                          expiration_ntb))
                {
                    p = p->next;
                    release_nb++;
                }
                release_tail = p;
                /* Slice MFS list at last expired segment. */
                slist_sever (mfs->, last_skip, p, bare);
                /* Update segment count. */
                if (mfs->seg_nb >= release_nb)
                    mfs->seg_nb -= release_nb;
                else
                {
                    dbg_assert (mfs->pending_seg_nb
                                >= release_nb - mfs->seg_nb);
                    mfs->pending_seg_nb -= release_nb - mfs->seg_nb;
                    mfs->seg_nb = 0;
                }
            }
        }
    }
    /* Find first PB. */
    if (first)
        *first_pb_expiration_ntb = first->expiration_ntb;
    else if (mfs->head)
        *first_pb_expiration_ntb = mfs->head->expiration_ntb;
    else if (DEBUG)
        *first_pb_expiration_ntb = 0xDEADEEEE;
    /* Is empty? */
    empty = !first && !mfs->head;
    /* End of critical section. */
    arch_isr_unlock (flags);
    /* Release expired segments. */
    if (release_head)
        blk_release_desc_range (&release_head->blk, &release_tail->blk);
    /* Done. */
    return empty;
}

void
pbproc_mfs_remove_all (mfs_tx_t *mfs)
{
    pb_t *head, *tail;
    dbg_assert (mfs);
    /* Check the MFS can no longer be selected for TX. */
    dbg_assert (mfs->ca_state == CA_MFS_STATE_REMOVED);
    /* Take segments from PBProc. */
    uint flags = arch_isr_lock ();
    mfs->seg_nb = 0;
    head = mfs->head;
    tail = mfs->tail;
    slist_init (mfs->, bare);
    arch_isr_unlock (flags);
    mfs->pending_seg_nb = 0;
    /* Free extracted segments. */
    if (head)
        blk_release_desc_range (&head->blk, &tail->blk);
}

void
pbproc_mfs_cap_update (mfs_tx_t *mfs, uint nb)
{
    int i, left;
    dbg_assert (mfs);
    dbg_assert (nb > 0);
    i = mfs->cap;
    left = nb;
    /* Decrements counters. */
    while (mfs->cap_seg_nb[i] < left)
    {
        dbg_assert (i != 0);
        left -= mfs->cap_seg_nb[i];
        mfs->cap_seg_nb[i] = 0;
        i--;
    }
    mfs->cap_seg_nb[i] -= left;
    mfs->cap = i;
}