/* Cesar project {{{ * * Copyright (C) 2008 Spidcom * * <<>> * * }}} */ /** * \file lib/src/slab.c * \brief Slab allocator. * \ingroup lib * * Implementation * -------------- * * Slabs are allocated when there is no free object in existing slabs. * Existing slabs with free objects are listed in the "partial" list. * * When a new slab is allocated, it is sliced in aligned objects which are * chained in the "free" list. */ #include "common/std.h" #include "lib/slab.h" #include "config/slab/alloc/scramble.h" #include "lib/blk.h" #include "lib/restrack.h" #include "hal/arch/arch.h" #if CONFIG_SLAB_ALLOC_SCRAMBLE # include #endif /** Slab data header size. */ #define SLAB_DATA_HEADER_SIZE (sizeof (slab_data_t)) /** Reference counter size. */ #define SLAB_REFCNT_SIZE 4 /** Find reference counter from object. */ #define REFCNT(obj) ((int *) (obj) - 1) /** Alignment of returned objects. */ #define SLAB_ALIGN_BYTES 8 /** Align a value on SLAB_ALIGN_BYTES. */ #define SLAB_ALIGN(v) (((v) + SLAB_ALIGN_BYTES - 1) & ~(SLAB_ALIGN_BYTES - 1)) /* Forward declaration. */ typedef struct slab_t slab_t; /** Object. */ struct slab_object_t { /** Pointer to next objects. */ struct slab_object_t *next; }; typedef struct slab_object_t slab_object_t; /** Slab data header. */ struct slab_data_t { /** Slab descriptor for this slab. */ slab_t *slab; /** Free objects chain. */ slab_object_t *free; /** Number of free objects in this slab. */ uint free_nb; }; typedef struct slab_data_t slab_data_t; /** Slab descriptor. */ struct slab_t { /** Slab cache owning this slab. */ slab_cache_t *slab_cache; /** Slab data. */ slab_data_t *data; /** List node. */ list_node_t node; }; /** * Free a slab cache allocated object. * \param object referenced object */ static void slab_free_ (void *object __FL); void slab_init (void) { /* Nothing. */ } void slab_cache_init (slab_cache_t *cache, const char *name, uint object_size, slab_object_destructor_t object_destructor) { dbg_assert (cache); dbg_assert (name); dbg_assert (object_size > 0 && object_size < BLK_SIZE - SLAB_ALIGN (SLAB_DATA_HEADER_SIZE + SLAB_REFCNT_SIZE)); /* Initialise cache. */ cache->name = name; cache->object_size = object_size; list_init (&cache->partial); cache->full_nb = 0; cache->object_destructor = object_destructor; /* Compute offsets. */ cache->object_offset_first = SLAB_ALIGN (SLAB_DATA_HEADER_SIZE + SLAB_REFCNT_SIZE); cache->object_offset = SLAB_ALIGN (object_size + SLAB_REFCNT_SIZE); cache->object_per_slab = (BLK_SIZE - cache->object_offset_first) / cache->object_offset; } void slab_cache_uninit (slab_cache_t *cache) { dbg_assert (cache); dbg_assert (list_empty (&cache->partial)); dbg_assert (cache->full_nb == 0); /* Nothing. */ } /** * Allocate a new slab. * \param cache cache structure * \return the new slab. */ static slab_t * slab_grow (slab_cache_t *cache) { uint i; dbg_assert (cache); dbg_assert (list_empty (&cache->partial)); /* Allocate a new slab. */ slab_t *s = (slab_t *) blk_alloc_desc (); s->slab_cache = cache; list_init_node (&s->node); /* Prepare it for allocation. */ slab_data_t *d = s->data; d->slab = s; d->free_nb = cache->object_per_slab; uint off = (uint) d + cache->object_offset_first; d->free = (slab_object_t *) off; slab_object_t *o = d->free; for (i = cache->object_per_slab - 1; i; i--) { off += cache->object_offset; o->next = (slab_object_t *) off; o = o->next; } o->next = NULL; /* Add to partial. */ list_push_back (&cache->partial, &s->node); /* Done. */ return s; } /** * Release a slab from the slab cache. * \param cache cache structure * \param slab slab to release */ static void slab_shrink (slab_cache_t *cache, slab_t *slab) { dbg_assert (cache); dbg_assert (slab); /* Remove from partial unless full (this happens when only one object fit * per slab). */ if (slab->data->free_nb) list_remove (&cache->partial, &slab->node); else cache->full_nb--; /* Release. */ blk_release_desc ((blk_t *) slab); } void * slab_alloc_ (slab_cache_t *cache __FL) { slab_t *s; dbg_blame (cache); /* Lock. */ arch_dsr_lock (); /* Allocate a new slab if no free object found. */ if (list_empty (&cache->partial)) s = slab_grow (cache); else s = PARENT_OF (slab_t, node, list_begin (&cache->partial)); /* Take an object out. */ dbg_assert (s->data->free_nb); s->data->free_nb--; slab_object_t *new = s->data->free; s->data->free = new->next; /* Prepare object. */ *REFCNT (new) = 1; #if CONFIG_SLAB_ALLOC_SCRAMBLE memset (new, 0x42, cache->object_size); #endif /* Unlist if slab is full. */ if (s->data->free_nb == 0) { list_remove (&cache->partial, &s->node); cache->full_nb++; } /* Unlock. */ arch_dsr_unlock (); /* Book keeping. */ restrack_create (NULL, new, _fl_, 1); /* Done. */ return new; } void slab_addref_ (void *object __FL) { dbg_blame_ptr (object); /* Update reference counter. */ arch_atomic_add_lock (REFCNT (object), 1); /* Book keeping. */ restrack_update (NULL, object, _fl_, 1); } void slab_release_ (void *object __FL) { dbg_blame_ptr (object); dbg_blame (*REFCNT (object) != 0); /* Book keeping. */ restrack_update (NULL, object, _fl_, -1); /* Update reference counter. */ if (arch_atomic_add_lock (REFCNT (object), -1) == 0) { /* Lock. */ arch_dsr_lock (); /* Free memory if no references left. */ slab_free_ (object __fl); /* Unlock. */ arch_dsr_unlock (); } } static void slab_free_ (void *object __FL) { dbg_assert_ptr (object); /* Find slab structures. */ slab_data_t *d = (slab_data_t *) ((uint) object & ~(BLK_SIZE - 1)); slab_t *s = d->slab; dbg_assert (s->data == d); slab_cache_t *cache = s->slab_cache; /* Call destructor. */ if (cache->object_destructor) cache->object_destructor (object); /* Book keeping. */ restrack_destroy (NULL, object, _fl_, 0); /* May release the slab. */ if (d->free_nb == cache->object_per_slab - 1) slab_shrink (cache, s); else { /* Remove the slab. */ if (d->free_nb) list_remove (&cache->partial, &s->node); else cache->full_nb--; /* Push the object in. */ d->free_nb++; ((slab_object_t *) object)->next = d->free; d->free = object; /* Add to partial tail. */ list_push_back (&cache->partial, &s->node); } }