summaryrefslogtreecommitdiff
path: root/cesar/ecos/packages/kernel/current/src/sched
diff options
context:
space:
mode:
authorsave2008-04-07 14:17:42 +0000
committersave2008-04-07 14:17:42 +0000
commit3d58a62727346b7ac1a6cb36fed1a06ed72228dd (patch)
treed7788c3cf9f76426aef0286d0202e2097f0fa0eb /cesar/ecos/packages/kernel/current/src/sched
parent095dca4b0a8d4924093bab424f71f588fdd84613 (diff)
Moved the complete svn base into the cesar directory.
git-svn-id: svn+ssh://pessac/svn/cesar/trunk@1769 017c9cb6-072f-447c-8318-d5b54f68fe89
Diffstat (limited to 'cesar/ecos/packages/kernel/current/src/sched')
-rw-r--r--cesar/ecos/packages/kernel/current/src/sched/bitmap.cxx323
-rw-r--r--cesar/ecos/packages/kernel/current/src/sched/lottery.cxx456
-rw-r--r--cesar/ecos/packages/kernel/current/src/sched/mlqueue.cxx885
-rw-r--r--cesar/ecos/packages/kernel/current/src/sched/sched.cxx741
4 files changed, 2405 insertions, 0 deletions
diff --git a/cesar/ecos/packages/kernel/current/src/sched/bitmap.cxx b/cesar/ecos/packages/kernel/current/src/sched/bitmap.cxx
new file mode 100644
index 0000000000..5800e50ded
--- /dev/null
+++ b/cesar/ecos/packages/kernel/current/src/sched/bitmap.cxx
@@ -0,0 +1,323 @@
+//==========================================================================
+//
+// sched/bitmap.cxx
+//
+// Bitmap scheduler class implementation
+//
+//==========================================================================
+//####ECOSGPLCOPYRIGHTBEGIN####
+// -------------------------------------------
+// This file is part of eCos, the Embedded Configurable Operating System.
+// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc.
+//
+// eCos is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 2 or (at your option) any later version.
+//
+// eCos is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with eCos; if not, write to the Free Software Foundation, Inc.,
+// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+//
+// As a special exception, if other files instantiate templates or use macros
+// or inline functions from this file, or you compile this file and link it
+// with other works to produce a work based on this file, this file does not
+// by itself cause the resulting work to be covered by the GNU General Public
+// License. However the source code for this file must still be made available
+// in accordance with section (3) of the GNU General Public License.
+//
+// This exception does not invalidate any other reasons why a work based on
+// this file might be covered by the GNU General Public License.
+//
+// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc.
+// at http://sources.redhat.com/ecos/ecos-license/
+// -------------------------------------------
+//####ECOSGPLCOPYRIGHTEND####
+//==========================================================================
+//#####DESCRIPTIONBEGIN####
+//
+// Author(s): nickg
+// Contributors: nickg
+// Date: 1997-09-16
+// Purpose: Bitmap scheduler class implementation
+// Description: This file contains the implementations of
+// Cyg_Scheduler_Implementation and Cyg_SchedThread_Implementation.
+//
+//
+//####DESCRIPTIONEND####
+//
+//==========================================================================
+
+#include <pkgconf/kernel.h>
+
+#include <cyg/kernel/ktypes.h> // base kernel types
+#include <cyg/infra/cyg_trac.h> // tracing macros
+#include <cyg/infra/cyg_ass.h> // assertion macros
+
+#include <cyg/kernel/sched.hxx> // our header
+
+#include <cyg/hal/hal_arch.h> // Architecture specific definitions
+
+#include <cyg/kernel/thread.inl> // thread inlines
+#include <cyg/kernel/sched.inl> // scheduler inlines
+
+#ifdef CYGSEM_KERNEL_SCHED_BITMAP
+
+//==========================================================================
+// Cyg_Scheduler_Implementation class members
+
+// -------------------------------------------------------------------------
+// Constructor.
+
+Cyg_Scheduler_Implementation::Cyg_Scheduler_Implementation()
+{
+ CYG_REPORT_FUNCTION();
+
+ // At present we cannot init run_queue here because the absence of
+ // ordering of static constructors means that we could do this
+ // after the static idle thread has been created. (Guess how I
+ // found this out!)
+// run_queue = 0;
+
+}
+
+// -------------------------------------------------------------------------
+// Choose the best thread to run next
+
+Cyg_Thread *Cyg_Scheduler_Implementation::schedule()
+{
+ CYG_REPORT_FUNCTION();
+
+ // The run queue may _never_ be empty, there is always
+ // an idle thread at the lowest priority.
+
+ CYG_ASSERT(run_queue != 0, "Run queue empty");
+
+ cyg_uint32 index;
+
+ HAL_LSBIT_INDEX(index, run_queue);
+
+ return thread_table[index];
+}
+
+// -------------------------------------------------------------------------
+
+void Cyg_Scheduler_Implementation::add_thread(Cyg_Thread *thread)
+{
+ CYG_REPORT_FUNCTION();
+
+ CYG_ASSERT((CYG_THREAD_MIN_PRIORITY >= thread->priority)
+ && (CYG_THREAD_MAX_PRIORITY <= thread->priority),
+ "Priority out of range!");
+
+ CYG_ASSERT( thread_table[thread->priority] == NULL ||
+ thread_table[thread->priority] == thread,
+ "Duplicate thread priorities" );
+
+ CYG_ASSERT( (run_queue & (1<<thread->priority)) == 0,
+ "Run queue bit already set" );
+
+ // If the thread is on some other queue, remove it
+ // here.
+ if( thread->queue != NULL )
+ {
+ thread->queue->remove(thread);
+ thread->queue = NULL;
+ }
+
+ run_queue |= 1<<thread->priority;
+
+ // If the new thread is higher priority than the
+ // current thread, request a reschedule.
+
+ if( thread->priority < Cyg_Scheduler::get_current_thread()->priority )
+ set_need_reschedule();
+}
+
+// -------------------------------------------------------------------------
+
+void Cyg_Scheduler_Implementation::rem_thread(Cyg_Thread *thread)
+{
+ CYG_REPORT_FUNCTION();
+
+ CYG_ASSERT( thread_table[thread->priority] == thread,
+ "Invalid thread priority" );
+
+ CYG_ASSERT( (run_queue & (1<<thread->priority)) != 0,
+ "Run queue bit not set" );
+
+ run_queue &= ~(1<<thread->priority);
+
+ if( thread == Cyg_Scheduler::get_current_thread() )
+ set_need_reschedule();
+}
+
+// -------------------------------------------------------------------------
+// Set up initial idle thread
+
+void Cyg_Scheduler_Implementation::set_idle_thread( Cyg_Thread *thread, HAL_SMP_CPU_TYPE cpu )
+{
+ CYG_REPORT_FUNCTION();
+
+ // Make the thread the current thread for this CPU.
+
+ current_thread[cpu] = thread;
+
+ // This will insert the thread in the run queues and make it
+ // available to execute.
+ thread->resume();
+}
+
+// -------------------------------------------------------------------------
+// register thread with scheduler
+
+void Cyg_Scheduler_Implementation::register_thread(Cyg_Thread *thread)
+{
+ CYG_REPORT_FUNCTION();
+
+ thread_table[thread->priority] = thread;
+}
+
+// -------------------------------------------------------------------------
+
+// deregister thread
+void Cyg_Scheduler_Implementation::deregister_thread(Cyg_Thread *thread)
+{
+ CYG_REPORT_FUNCTION();
+
+ thread_table[thread->priority] = NULL;
+}
+
+// -------------------------------------------------------------------------
+// Test the given priority for uniqueness
+
+cyg_bool Cyg_Scheduler_Implementation::unique( cyg_priority priority)
+{
+ CYG_REPORT_FUNCTION();
+
+ return thread_table[priority] == NULL;
+}
+
+
+//==========================================================================
+// Cyg_Cyg_SchedThread_Implementation class members
+
+Cyg_SchedThread_Implementation::Cyg_SchedThread_Implementation
+(
+ CYG_ADDRWORD sched_info
+)
+{
+ CYG_REPORT_FUNCTION();
+
+#if 1
+ // Assign this thread's priority to the supplied sched_info
+ // or the next highest priority available.
+
+ priority = cyg_priority(sched_info);
+
+ while( !Cyg_Scheduler::scheduler.unique(priority) )
+ priority++;
+
+#else
+ // Assign initial priorities to threads in descending order of
+ // creation.
+
+ static cyg_priority init_priority = 0;
+
+ priority = init_priority++;
+#endif
+
+}
+
+// -------------------------------------------------------------------------
+
+void Cyg_SchedThread_Implementation::yield()
+{
+ CYG_REPORT_FUNCTION();
+
+ // We cannot yield in this scheduler
+}
+
+//==========================================================================
+// Cyg_ThreadQueue_Implementation class members
+
+Cyg_ThreadQueue_Implementation::Cyg_ThreadQueue_Implementation()
+{
+ CYG_REPORT_FUNCTION();
+
+ wait_queue = 0; // empty queue
+
+ CYG_REPORT_RETURN();
+}
+
+
+void Cyg_ThreadQueue_Implementation::enqueue(Cyg_Thread *thread)
+{
+ CYG_REPORT_FUNCTION();
+
+ wait_queue |= 1<<thread->priority;
+ thread->queue = CYG_CLASSFROMBASE(Cyg_ThreadQueue,
+ Cyg_ThreadQueue_Implementation,
+ this);
+}
+
+// -------------------------------------------------------------------------
+
+Cyg_Thread *Cyg_ThreadQueue_Implementation::dequeue()
+{
+ CYG_REPORT_FUNCTION();
+
+ // Isolate ls bit in run_queue.
+ cyg_sched_bitmap next_thread = wait_queue & -wait_queue;
+
+ if( next_thread == 0 ) return NULL;
+
+ wait_queue &= ~next_thread;
+
+ cyg_uint32 index;
+
+ HAL_LSBIT_INDEX(index, next_thread);
+
+ Cyg_Thread *thread = Cyg_Scheduler::scheduler.thread_table[index];
+
+ thread->queue = NULL;
+
+ return thread;
+}
+
+// -------------------------------------------------------------------------
+
+Cyg_Thread *Cyg_ThreadQueue_Implementation::highpri()
+{
+ CYG_REPORT_FUNCTION();
+
+ // Isolate ls bit in run_queue.
+ cyg_sched_bitmap next_thread = wait_queue & -wait_queue;
+
+ if( next_thread == 0 ) return NULL;
+
+ cyg_uint32 index;
+
+ HAL_LSBIT_INDEX(index, next_thread);
+
+ return Cyg_Scheduler::scheduler.thread_table[index];
+}
+
+// -------------------------------------------------------------------------
+
+void Cyg_ThreadQueue_Implementation::remove(Cyg_Thread *thread)
+{
+ CYG_REPORT_FUNCTION();
+
+ wait_queue &= ~(1<<thread->priority);
+ thread->queue = NULL;
+}
+
+#endif
+
+// -------------------------------------------------------------------------
+// EOF sched/bitmap.cxx
diff --git a/cesar/ecos/packages/kernel/current/src/sched/lottery.cxx b/cesar/ecos/packages/kernel/current/src/sched/lottery.cxx
new file mode 100644
index 0000000000..128f189e47
--- /dev/null
+++ b/cesar/ecos/packages/kernel/current/src/sched/lottery.cxx
@@ -0,0 +1,456 @@
+//==========================================================================
+//
+// sched/lottery.cxx
+//
+// Lottery scheduler class implementation
+//
+//==========================================================================
+//####ECOSGPLCOPYRIGHTBEGIN####
+// -------------------------------------------
+// This file is part of eCos, the Embedded Configurable Operating System.
+// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc.
+//
+// eCos is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 2 or (at your option) any later version.
+//
+// eCos is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with eCos; if not, write to the Free Software Foundation, Inc.,
+// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+//
+// As a special exception, if other files instantiate templates or use macros
+// or inline functions from this file, or you compile this file and link it
+// with other works to produce a work based on this file, this file does not
+// by itself cause the resulting work to be covered by the GNU General Public
+// License. However the source code for this file must still be made available
+// in accordance with section (3) of the GNU General Public License.
+//
+// This exception does not invalidate any other reasons why a work based on
+// this file might be covered by the GNU General Public License.
+//
+// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc.
+// at http://sources.redhat.com/ecos/ecos-license/
+// -------------------------------------------
+//####ECOSGPLCOPYRIGHTEND####
+//==========================================================================
+//#####DESCRIPTIONBEGIN####
+//
+// Author(s): nickg
+// Contributors: nickg
+// Date: 1997-09-16
+// Purpose: Lottery scheduler class implementation
+// Description: This file contains the implementations of
+// Cyg_Scheduler_Implementation and
+// Cyg_SchedThread_Implementation.
+//
+//
+//####DESCRIPTIONEND####
+//
+//==========================================================================
+
+#include <pkgconf/kernel.h>
+
+#include <cyg/kernel/ktypes.h> // base kernel types
+#include <cyg/infra/cyg_trac.h> // tracing macros
+#include <cyg/infra/cyg_ass.h> // assertion macros
+
+#include <cyg/kernel/sched.hxx> // our header
+#include <cyg/kernel/intr.hxx> // interrupt defines, for Cyg_HAL_Clock
+
+#include <cyg/hal/hal_arch.h> // Architecture specific definitions
+
+
+#include <cyg/kernel/thread.inl> // thread inlines
+#include <cyg/kernel/sched.inl> // scheduler inlines
+
+#ifdef CYGSEM_KERNEL_SCHED_LOTTERY
+
+#define CYG_ENABLE_TRACE 1
+
+//==========================================================================
+// Cyg_Scheduler_Implementation class static members
+
+#ifdef CYGSEM_KERNEL_SCHED_TIMESLICE
+
+cyg_count32 Cyg_Scheduler_Implementation::timeslice_count =
+ CYGNUM_KERNEL_SCHED_TIMESLICE_TICKS;
+
+#endif
+
+//==========================================================================
+// Cyg_Scheduler_Implementation class members
+
+// -------------------------------------------------------------------------
+// Constructor.
+
+Cyg_Scheduler_Implementation::Cyg_Scheduler_Implementation()
+{
+ CYG_REPORT_FUNCTION();
+
+ total_tickets = 0;
+ rand_seed = 1;
+}
+
+// -------------------------------------------------------------------------
+// Choose the best thread to run next
+
+Cyg_Thread *Cyg_Scheduler_Implementation::schedule()
+{
+ CYG_REPORT_FUNCTION();
+
+#ifdef CYGPKG_HAL_POWERPC
+
+ // PowerPc specific version of random number generator.
+ register cyg_int32 r1 asm("r4");
+ r1 = rand_seed;
+ asm(
+ "li 7,0;"
+ "ori 7,7,33614;"
+ "mulhwu 5,7,%0;"
+ "mullw 6,7,%0;"
+ "srawi 6,6,1;"
+ "add %0,5,6;"
+ "cmpwi %0,0;"
+ "bge 1f;"
+ "slwi %0,%0,1;"
+ "srwi %0,%0,1;"
+ "addi %0,%0,1;"
+ "1:;"
+ : "=r"(r1)
+ : "0"(r1)
+ : "r5", "r6", "r7"
+ );
+ rand_seed = r1;
+
+#else
+#if 1
+ rand_seed = (rand_seed * 1103515245) + 1234;
+ cyg_int32 r1 = rand_seed & 0x7FFFFFFF;
+#else
+ // Generic implementation of RNG.
+#if( CYG_BYTEORDER == CYG_MSBFIRST )
+#define _LO 1
+#define _HI 0
+#else
+#define _LO 0
+#define _HI 1
+#endif
+ union { cyg_int64 r64; cyg_int32 r32[2]; } u;
+ u.r64 = (cyg_int64)rand_seed * 33614LL;
+ cyg_int32 r1 = u.r32[_HI] + (u.r32[_LO]>>1);
+ if( r1 < 0 )
+ r1 = (r1 & 0x7FFFFFFF) + 1;
+ rand_seed = r1;
+#undef _LO
+#undef _HI
+#endif
+#endif
+
+ cyg_int32 ticket = r1 % total_tickets;
+ cyg_int32 tick = ticket;
+ Cyg_Thread *thread = run_queue.highpri();
+
+ // Search the run queue for the thread with the
+ // given ticket.
+ while( ticket > 0 )
+ {
+ ticket -= thread->priority;
+ if( ticket <= 0 ) break;
+ thread = thread->next;
+
+ CYG_ASSERT( thread != run_queue.highpri(), "Looping in scheduler");
+ }
+
+ CYG_TRACE3( CYG_ENABLE_TRACE,
+ "seed %08x ticket %d thread %08x",
+ rand_seed, tick, thread);
+
+ // If the thread has any compensation tickets, take them away since
+ // it has just won.
+
+ if( thread->compensation_tickets > 0 )
+ {
+ thread->priority -= thread->compensation_tickets;
+ total_tickets -= thread->compensation_tickets;
+ thread->compensation_tickets = 0;
+ }
+
+ // Re-insert thread at head of list. This reduces runtime by
+ // putting the large ticket holders at the front of the list.
+
+// run_queue.remove(thread);
+// run_queue.enqueue(thread);
+
+ CYG_CHECK_DATA_PTR( thread, "Invalid next thread pointer");
+ CYG_ASSERTCLASS( thread, "Bad next thread" );
+
+ return thread;
+}
+
+// -------------------------------------------------------------------------
+
+void Cyg_Scheduler_Implementation::add_thread(Cyg_Thread *thread)
+{
+ CYG_REPORT_FUNCTION();
+
+ // If the thread is on some other queue, remove it
+ // here.
+ if( thread->queue != NULL )
+ {
+ thread->queue->remove(thread);
+ thread->queue = NULL;
+ }
+
+ total_tickets += thread->priority;
+
+ run_queue.enqueue(thread);
+}
+
+// -------------------------------------------------------------------------
+
+void Cyg_Scheduler_Implementation::rem_thread(Cyg_Thread *thread)
+{
+ CYG_REPORT_FUNCTION();
+
+ run_queue.remove(thread);
+
+ total_tickets -= thread->priority;
+
+ // Compensate the thread for the segment of the quantum that
+ // it used. This makes it more likely to win the lottery next time
+ // it is scheduled. We only do this for threads that have voluntarily
+ // given up the CPU.
+
+// if( thread->get_state() != Cyg_Thread::RUNNING )
+ {
+#if 0
+ cyg_uint32 hal_ticks;
+ HAL_CLOCK_READ( &hal_ticks );
+ thread->compensation_tickets = thread->priority *
+ CYGNUM_KERNEL_COUNTERS_RTC_PERIOD / hal_ticks;
+#else
+ thread->compensation_tickets = (thread->priority *
+ CYGNUM_KERNEL_SCHED_TIMESLICE_TICKS) / timeslice_count;
+
+#endif
+ thread->priority += thread->compensation_tickets;
+ }
+}
+
+// -------------------------------------------------------------------------
+// register thread with scheduler
+
+void Cyg_Scheduler_Implementation::register_thread(Cyg_Thread *thread)
+{
+ CYG_REPORT_FUNCTION();
+
+ // No registration necessary in this scheduler
+}
+
+// -------------------------------------------------------------------------
+
+// deregister thread
+void Cyg_Scheduler_Implementation::deregister_thread(Cyg_Thread *thread)
+{
+ CYG_REPORT_FUNCTION();
+
+ // No registration necessary in this scheduler
+}
+
+// -------------------------------------------------------------------------
+// Test the given priority for uniqueness
+
+cyg_bool Cyg_Scheduler_Implementation::unique( cyg_priority priority)
+{
+ CYG_REPORT_FUNCTION();
+
+ // Priorities are not unique
+ return true;
+}
+
+//==========================================================================
+// Support for timeslicing option
+
+#ifdef CYGSEM_KERNEL_SCHED_TIMESLICE
+
+void Cyg_Scheduler_Implementation::timeslice()
+{
+ CYG_REPORT_FUNCTION();
+
+ if( --timeslice_count <= 0 )
+ {
+ CYG_INSTRUMENT_SCHED(TIMESLICE,0,0);
+
+ // Force a reschedule on each timeslice
+ need_reschedule = true;
+ timeslice_count = CYGNUM_KERNEL_SCHED_TIMESLICE_TICKS;
+ }
+}
+
+#endif
+
+//==========================================================================
+// Cyg_Cyg_SchedThread_Implementation class members
+
+Cyg_SchedThread_Implementation::Cyg_SchedThread_Implementation
+(
+ CYG_ADDRWORD sched_info
+)
+{
+ CYG_REPORT_FUNCTION();
+
+ priority = cyg_priority(sched_info);
+
+ // point the next and prev field at this thread.
+
+ next = prev = CYG_CLASSFROMBASE(Cyg_Thread,
+ Cyg_SchedThread_Implementation,
+ this);
+}
+
+// -------------------------------------------------------------------------
+// Insert thread in front of this
+
+void Cyg_SchedThread_Implementation::insert( Cyg_Thread *thread)
+{
+ CYG_REPORT_FUNCTION();
+
+ thread->next = CYG_CLASSFROMBASE(Cyg_Thread,
+ Cyg_SchedThread_Implementation,
+ this);
+ thread->prev = prev;
+ prev->next = thread;
+ prev = thread;
+}
+
+// -------------------------------------------------------------------------
+// remove this from queue
+
+void Cyg_SchedThread_Implementation::remove()
+{
+ CYG_REPORT_FUNCTION();
+
+ next->prev = prev;
+ prev->next = next;
+ next = prev = CYG_CLASSFROMBASE(Cyg_Thread,
+ Cyg_SchedThread_Implementation,
+ this);
+}
+
+// -------------------------------------------------------------------------
+// Yield the processor to another thread
+
+void Cyg_SchedThread_Implementation::yield()
+{
+ CYG_REPORT_FUNCTION();
+
+
+}
+
+//==========================================================================
+// Cyg_ThreadQueue_Implementation class members
+
+void Cyg_ThreadQueue_Implementation::enqueue(Cyg_Thread *thread)
+{
+ CYG_REPORT_FUNCTION();
+
+ // Always put thread at head of queue
+ if( queue == NULL ) queue = thread;
+ else
+ {
+ queue->insert(thread);
+// queue->next->insert(thread);
+// queue = thread;
+ }
+
+ thread->queue = CYG_CLASSFROMBASE(Cyg_ThreadQueue,
+ Cyg_ThreadQueue_Implementation,
+ this);
+}
+
+// -------------------------------------------------------------------------
+
+Cyg_Thread *Cyg_ThreadQueue_Implementation::dequeue()
+{
+ CYG_REPORT_FUNCTION();
+
+ if( queue == NULL ) return NULL;
+
+ Cyg_Thread *thread = queue;
+
+ if( thread->next == thread )
+ {
+ // sole thread on list, NULL out ptr
+ queue = NULL;
+ }
+ else
+ {
+ // advance to next and remove thread
+ queue = thread->next;
+ thread->remove();
+ }
+
+ thread->queue = NULL;
+
+ return thread;
+}
+
+// -------------------------------------------------------------------------
+
+Cyg_Thread *Cyg_ThreadQueue_Implementation::highpri()
+{
+ CYG_REPORT_FUNCTION();
+
+ return queue;
+}
+
+// -------------------------------------------------------------------------
+
+void Cyg_ThreadQueue_Implementation::remove(Cyg_Thread *thread)
+{
+ CYG_REPORT_FUNCTION();
+
+ // If the thread we want is the at the head
+ // of the list, and is on its own, clear the
+ // list and return. Otherwise advance to the
+ // next thread and remove ours. If the thread
+ // is not at the head of the list, just dequeue
+ // it.
+
+ thread->queue = NULL;
+
+ if( queue == thread )
+ {
+ if( thread->next == thread )
+ {
+ queue = NULL;
+ return;
+ }
+ else queue = thread->next;
+ }
+
+ thread->Cyg_SchedThread_Implementation::remove();
+
+}
+
+// -------------------------------------------------------------------------
+// Rotate the front thread on the queue to the back.
+
+void Cyg_ThreadQueue_Implementation::rotate()
+{
+ CYG_REPORT_FUNCTION();
+
+ queue = queue->next;
+}
+
+// -------------------------------------------------------------------------
+
+#endif
+
+// -------------------------------------------------------------------------
+// EOF sched/lottery.cxx
diff --git a/cesar/ecos/packages/kernel/current/src/sched/mlqueue.cxx b/cesar/ecos/packages/kernel/current/src/sched/mlqueue.cxx
new file mode 100644
index 0000000000..8acadf76a3
--- /dev/null
+++ b/cesar/ecos/packages/kernel/current/src/sched/mlqueue.cxx
@@ -0,0 +1,885 @@
+//==========================================================================
+//
+// sched/mlqueue.cxx
+//
+// Multi-level queue scheduler class implementation
+//
+//==========================================================================
+//####ECOSGPLCOPYRIGHTBEGIN####
+// -------------------------------------------
+// This file is part of eCos, the Embedded Configurable Operating System.
+// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc.
+//
+// eCos is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 2 or (at your option) any later version.
+//
+// eCos is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with eCos; if not, write to the Free Software Foundation, Inc.,
+// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+//
+// As a special exception, if other files instantiate templates or use macros
+// or inline functions from this file, or you compile this file and link it
+// with other works to produce a work based on this file, this file does not
+// by itself cause the resulting work to be covered by the GNU General Public
+// License. However the source code for this file must still be made available
+// in accordance with section (3) of the GNU General Public License.
+//
+// This exception does not invalidate any other reasons why a work based on
+// this file might be covered by the GNU General Public License.
+//
+// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc.
+// at http://sources.redhat.com/ecos/ecos-license/
+// -------------------------------------------
+//####ECOSGPLCOPYRIGHTEND####
+//==========================================================================
+//#####DESCRIPTIONBEGIN####
+//
+// Author(s): nickg
+// Contributors: jlarmour
+// Date: 1999-02-17
+// Purpose: Multilevel queue scheduler class implementation
+// Description: This file contains the implementations of
+// Cyg_Scheduler_Implementation and
+// Cyg_SchedThread_Implementation.
+//
+//
+//####DESCRIPTIONEND####
+//
+//==========================================================================
+
+#include <pkgconf/kernel.h>
+
+#include <cyg/kernel/ktypes.h> // base kernel types
+#include <cyg/infra/cyg_trac.h> // tracing macros
+#include <cyg/infra/cyg_ass.h> // assertion macros
+
+#include <cyg/kernel/sched.hxx> // our header
+
+#include <cyg/hal/hal_arch.h> // Architecture specific definitions
+
+#include <cyg/kernel/thread.inl> // thread inlines
+#include <cyg/kernel/sched.inl> // scheduler inlines
+
+#ifdef CYGSEM_KERNEL_SCHED_MLQUEUE
+
+//==========================================================================
+// Cyg_Scheduler_Implementation class static members
+
+#ifdef CYGSEM_KERNEL_SCHED_TIMESLICE
+
+cyg_ucount32 Cyg_Scheduler_Implementation::timeslice_count[CYGNUM_KERNEL_CPU_MAX];
+
+#endif
+
+
+//==========================================================================
+// Cyg_Scheduler_Implementation class members
+
+// -------------------------------------------------------------------------
+// Constructor.
+
+Cyg_Scheduler_Implementation::Cyg_Scheduler_Implementation()
+{
+ CYG_REPORT_FUNCTION();
+
+ queue_map = 0;
+
+#ifdef CYGPKG_KERNEL_SMP_SUPPORT
+
+ pending_map = 0;
+
+ for( int i = 0; i < CYGNUM_KERNEL_SCHED_PRIORITIES; i++ )
+ pending[i] = 0;
+
+#endif
+
+ for( int i = 0; i < CYGNUM_KERNEL_CPU_MAX; i++ )
+ {
+#ifdef CYGSEM_KERNEL_SCHED_TIMESLICE
+ timeslice_count[i] = CYGNUM_KERNEL_SCHED_TIMESLICE_TICKS;
+#endif
+ need_reschedule[i] = true;
+ }
+
+ CYG_REPORT_RETURN();
+}
+
+// -------------------------------------------------------------------------
+// Choose the best thread to run next
+
+Cyg_Thread *
+Cyg_Scheduler_Implementation::schedule(void)
+{
+ CYG_REPORT_FUNCTYPE("returning thread %08x");
+
+ // The run queue may _never_ be empty, there is always
+ // an idle thread at the lowest priority.
+
+ CYG_ASSERT( queue_map != 0, "Run queue empty");
+ CYG_ASSERT( queue_map & (1<<CYG_THREAD_MIN_PRIORITY), "Idle thread vanished!!!");
+ CYG_ASSERT( !run_queue[CYG_THREAD_MIN_PRIORITY].empty(), "Idle thread vanished!!!");
+
+#ifdef CYGPKG_KERNEL_SMP_SUPPORT
+
+ Cyg_Thread *current = get_current_thread();
+ register cyg_uint32 index;
+
+ CYG_ASSERT( current->cpu != CYG_KERNEL_CPU_NONE, "Current thread does not have CPU set!");
+
+ // If the current thread is still runnable, return it to pending
+ // state so that it can be considered alongside any other threads
+ // for execution.
+ if( current->get_state() == Cyg_Thread::RUNNING )
+ {
+ current->cpu = CYG_KERNEL_CPU_NONE;
+ pending[current->priority]++;
+ pending_map |= (1<<current->priority);
+ }
+ else
+ {
+ // Otherwise, ensure that the thread is no longer marked as
+ // running.
+ current->cpu = CYG_KERNEL_CPU_NONE;
+ }
+
+
+ HAL_LSBIT_INDEX(index, pending_map);
+
+ Cyg_RunQueue *queue = &run_queue[index];
+
+ CYG_ASSERT( !queue->empty(), "Queue for index empty");
+ CYG_ASSERT( pending[index] > 0, "Pending array and map disagree");
+
+ Cyg_Thread *thread = queue->get_head();
+
+ // We know there is a runnable thread in this queue, If the thread
+ // we got is not it, scan until we find it. While not constant time,
+ // this search has an upper bound of the number of CPUs in the system.
+
+ while( thread->cpu != CYG_KERNEL_CPU_NONE )
+ thread = thread->get_next();
+
+ // Take newly scheduled thread out of pending map
+ thread->cpu = CYG_KERNEL_CPU_THIS();
+ if( --pending[index] == 0 )
+ pending_map &= ~(1<<index);
+
+#else
+
+ register cyg_uint32 index;
+
+ HAL_LSBIT_INDEX(index, queue_map);
+
+ Cyg_RunQueue *queue = &run_queue[index];
+
+ CYG_ASSERT( !queue->empty(), "Queue for index empty");
+
+ Cyg_Thread *thread = queue->get_head();
+
+#endif
+
+ CYG_INSTRUMENT_MLQ( SCHEDULE, thread, index);
+
+ CYG_ASSERT( thread != NULL , "No threads in run queue");
+ CYG_ASSERT( thread->queue == NULL , "Runnable thread on a queue!");
+
+ CYG_REPORT_RETVAL(thread);
+
+ return thread;
+}
+
+// -------------------------------------------------------------------------
+
+void
+Cyg_Scheduler_Implementation::add_thread(Cyg_Thread *thread)
+{
+ CYG_REPORT_FUNCTION();
+ CYG_REPORT_FUNCARG1("thread=%08x", thread);
+
+ cyg_priority pri = thread->priority;
+ Cyg_RunQueue *queue = &run_queue[pri];
+
+ CYG_INSTRUMENT_MLQ( ADD, thread, pri);
+
+ CYG_ASSERT((CYG_THREAD_MIN_PRIORITY >= pri)
+ && (CYG_THREAD_MAX_PRIORITY <= pri),
+ "Priority out of range!");
+
+ CYG_ASSERT( ((queue_map & (1<<pri))!=0) == ((!run_queue[pri].empty())!=0), "Map and queue disagree");
+
+ // If the thread is on some other queue, remove it
+ // here.
+ if( thread->queue != NULL )
+ {
+ thread->queue->remove(thread);
+ }
+
+ if( queue->empty() )
+ {
+ // set the map bit and ask for a reschedule if this is a
+ // new highest priority thread.
+
+ queue_map |= (1<<pri);
+
+ }
+ // else the queue already has an occupant, queue behind him
+
+ queue->add_tail(thread);
+
+ // If the new thread is higher priority than any
+ // current thread, request a reschedule.
+
+ set_need_reschedule(thread);
+
+#ifdef CYGPKG_KERNEL_SMP_SUPPORT
+
+ // If the thread is not currently running, increment the pending
+ // count for the priority, and if necessary set the bit in the
+ // pending map.
+
+ if( thread->cpu == CYG_KERNEL_CPU_NONE )
+ {
+ if( pending[pri]++ == 0 )
+ pending_map |= (1<<pri);
+ }
+ // Otherwise the pending count will be dealt with in schedule().
+
+#endif
+
+ CYG_ASSERT( thread->queue == NULL , "Runnable thread on a queue!");
+ CYG_ASSERT( queue_map != 0, "Run queue empty");
+ CYG_ASSERT( queue_map & (1<<pri), "Queue map bit not set for pri");
+ CYG_ASSERT( !run_queue[pri].empty(), "Queue for pri empty");
+ CYG_ASSERT( ((queue_map & (1<<pri))!=0) == ((!run_queue[pri].empty())!=0), "Map and queue disagree");
+ CYG_ASSERT( queue_map & (1<<CYG_THREAD_MIN_PRIORITY), "Idle thread vanished!!!");
+ CYG_ASSERT( !run_queue[CYG_THREAD_MIN_PRIORITY].empty(), "Idle thread vanished!!!");
+
+ CYG_REPORT_RETURN();
+}
+
+// -------------------------------------------------------------------------
+
+void
+Cyg_Scheduler_Implementation::rem_thread(Cyg_Thread *thread)
+{
+ CYG_REPORT_FUNCTION();
+ CYG_REPORT_FUNCARG1("thread=%08x", thread);
+
+ CYG_ASSERT( queue_map != 0, "Run queue empty");
+
+ cyg_priority pri = thread->priority;
+ Cyg_RunQueue *queue = &run_queue[pri];
+
+ CYG_INSTRUMENT_MLQ( REM, thread, pri);
+
+ CYG_ASSERT( pri != CYG_THREAD_MIN_PRIORITY, "Idle thread trying to sleep!");
+ CYG_ASSERT( !run_queue[CYG_THREAD_MIN_PRIORITY].empty(), "Idle thread vanished!!!");
+
+#ifdef CYGPKG_KERNEL_SMP_SUPPORT
+
+ if( thread->cpu == CYG_KERNEL_CPU_NONE )
+ {
+ // If the thread is not running, then we need to adjust the
+ // pending count array and map if necessary.
+
+ if( --pending[pri] == 0 )
+ pending_map &= ~(1<<pri);
+ }
+ else
+ {
+ // If the target thread is currently running on a different
+ // CPU, send a reschedule interrupt there to deschedule it.
+ if( thread->cpu != CYG_KERNEL_CPU_THIS() )
+ CYG_KERNEL_CPU_RESCHEDULE_INTERRUPT( thread->cpu, 0 );
+ }
+ // If the thread is current running on this CPU, then the pending
+ // count will be dealt with in schedule().
+
+#endif
+
+ CYG_ASSERT( queue_map & (1<<pri), "Queue map bit not set for pri");
+ CYG_ASSERT( !run_queue[pri].empty(), "Queue for pri empty");
+
+ // remove thread from queue
+ queue->remove(thread);
+
+ if( queue->empty() )
+ {
+ // If this was only thread in
+ // queue, clear map.
+
+ queue_map &= ~(1<<pri);
+ }
+
+ CYG_ASSERT( queue_map != 0, "Run queue empty");
+ CYG_ASSERT( queue_map & (1<<CYG_THREAD_MIN_PRIORITY), "Idle thread vanished!!!");
+ CYG_ASSERT( !run_queue[CYG_THREAD_MIN_PRIORITY].empty(), "Idle thread vanished!!!");
+ CYG_ASSERT( ((queue_map & (1<<pri))!=0) == ((!run_queue[pri].empty())!=0), "Map and queue disagree");
+
+ CYG_REPORT_RETURN();
+}
+
+// -------------------------------------------------------------------------
+// Set the need_reschedule flag
+// This function overrides the definition in Cyg_Scheduler_Base and tests
+// for a reschedule condition based on the priorities of the given thread
+// and the current thread(s).
+
+void Cyg_Scheduler_Implementation::set_need_reschedule(Cyg_Thread *thread)
+{
+#ifndef CYGPKG_KERNEL_SMP_SUPPORT
+
+ if( current_thread[0]->priority > thread->priority ||
+ current_thread[0]->get_state() != Cyg_Thread::RUNNING )
+ need_reschedule[0] = true;
+
+#else
+
+ HAL_SMP_CPU_TYPE cpu_this = CYG_KERNEL_CPU_THIS();
+ HAL_SMP_CPU_TYPE cpu_count = CYG_KERNEL_CPU_COUNT();
+
+ // Start with current CPU. If we can do the job locally then
+ // that is most efficient. Only go on to other CPUs if that is
+ // not possible.
+
+ for(int i = 0; i < cpu_count; i++)
+ {
+ HAL_SMP_CPU_TYPE cpu =
+ HAL_SMP_CPU_COUNT2IDX ( (i + HAL_SMP_CPU_IDX2COUNT( cpu_this ) ) % cpu_count );
+
+ // If a CPU is not already marked for rescheduling, and its
+ // current thread is of lower priority than _thread_, then
+ // set its need_reschedule flag.
+
+ Cyg_Thread *cur = current_thread[cpu];
+
+ if( (!need_reschedule[cpu]) &&
+ (cur->priority > thread->priority)
+ )
+ {
+ need_reschedule[cpu] = true;
+
+ if( cpu != cpu_this )
+ {
+ // All processors other than this one need to be sent
+ // a reschedule interrupt.
+
+ CYG_INSTRUMENT_SMP( RESCHED_SEND, cpu, 0 );
+ CYG_KERNEL_CPU_RESCHEDULE_INTERRUPT( cpu, 0 );
+ }
+
+ // Having notionally rescheduled _thread_ onto the cpu, we
+ // now see if we can reschedule the former current thread of
+ // that CPU onto another.
+
+ thread = cur;
+ }
+ }
+
+#endif
+}
+
+// -------------------------------------------------------------------------
+// Set up initial idle thread
+
+void Cyg_Scheduler_Implementation::set_idle_thread( Cyg_Thread *thread, HAL_SMP_CPU_TYPE cpu )
+{
+ // Make the thread the current thread for this CPU.
+
+ current_thread[cpu] = thread;
+
+ // This will insert the thread in the run queues and make it
+ // available to execute.
+ thread->resume();
+
+#ifdef CYGPKG_KERNEL_SMP_SUPPORT
+
+ thread->cpu = cpu;
+
+ // In SMP, we need to take this thread out of the pending array
+ // and map.
+
+ cyg_priority pri = thread->priority;
+ if( --pending[pri] == 0 )
+ pending_map &= ~(1<<pri);
+#endif
+
+}
+
+// -------------------------------------------------------------------------
+// register thread with scheduler
+
+void
+Cyg_Scheduler_Implementation::register_thread(Cyg_Thread *thread)
+{
+ CYG_REPORT_FUNCTION();
+ CYG_REPORT_FUNCARG1("thread=%08x", thread);
+ // No registration necessary in this scheduler
+ CYG_REPORT_RETURN();
+}
+
+// -------------------------------------------------------------------------
+
+// deregister thread
+void
+Cyg_Scheduler_Implementation::deregister_thread(Cyg_Thread *thread)
+{
+ CYG_REPORT_FUNCTION();
+ CYG_REPORT_FUNCARG1("thread=%08x", thread);
+ // No registration necessary in this scheduler
+ CYG_REPORT_RETURN();
+}
+
+// -------------------------------------------------------------------------
+// Test the given priority for uniqueness
+
+cyg_bool
+Cyg_Scheduler_Implementation::unique( cyg_priority priority)
+{
+ CYG_REPORT_FUNCTYPE("returning %d");
+ CYG_REPORT_FUNCARG1("priority=%d", priority);
+ // Priorities are not unique
+ CYG_REPORT_RETVAL(true);
+ return true;
+}
+
+//==========================================================================
+// Support for timeslicing option
+
+#ifdef CYGSEM_KERNEL_SCHED_TIMESLICE
+
+// -------------------------------------------------------------------------
+
+void
+Cyg_Scheduler_Implementation::timeslice(void)
+{
+#ifdef CYGDBG_KERNEL_TRACE_TIMESLICE
+ CYG_REPORT_FUNCTION();
+#endif
+
+#ifdef CYGPKG_KERNEL_SMP_SUPPORT
+
+ int c;
+ HAL_SMP_CPU_TYPE cpu;
+ HAL_SMP_CPU_TYPE cpu_count = CYG_KERNEL_CPU_COUNT();
+ HAL_SMP_CPU_TYPE cpu_this = CYG_KERNEL_CPU_THIS();
+
+ for( c = 0; c < cpu_count; c++ )
+ {
+ cpu = HAL_SMP_CPU_COUNT2IDX(c);
+
+ if( --timeslice_count[cpu] == 0 )
+ if( cpu == cpu_this )
+ timeslice_cpu();
+ else CYG_KERNEL_CPU_TIMESLICE_INTERRUPT( cpu, 0 );
+ }
+
+#else
+
+ if( --timeslice_count[CYG_KERNEL_CPU_THIS()] == 0 )
+ timeslice_cpu();
+
+#endif
+
+#ifdef CYGDBG_KERNEL_TRACE_TIMESLICE
+ CYG_REPORT_RETURN();
+#endif
+}
+
+// -------------------------------------------------------------------------
+
+void
+Cyg_Scheduler_Implementation::timeslice_cpu(void)
+{
+#ifdef CYGDBG_KERNEL_TRACE_TIMESLICE
+ CYG_REPORT_FUNCTION();
+#endif
+
+ Cyg_Thread *thread = get_current_thread();
+ HAL_SMP_CPU_TYPE cpu_this = CYG_KERNEL_CPU_THIS();
+
+ CYG_ASSERT( queue_map != 0, "Run queue empty");
+ CYG_ASSERT( queue_map & (1<<CYG_THREAD_MIN_PRIORITY), "Idle thread vanished!!!");
+
+#ifdef CYGSEM_KERNEL_SCHED_TIMESLICE_ENABLE
+ if( thread->timeslice_enabled &&
+ timeslice_count[cpu_this] == 0 )
+#else
+ if( timeslice_count[cpu_this] == 0 )
+#endif
+ {
+ CYG_INSTRUMENT_SCHED(TIMESLICE,0,0);
+#ifdef CYGDBG_KERNEL_TRACE_TIMESLICE
+ CYG_TRACE0( true, "quantum consumed, time to reschedule" );
+#endif
+
+ CYG_ASSERT( get_sched_lock() > 0 , "Timeslice called with zero sched_lock");
+
+ // Only try to rotate the run queue if the current thread is running.
+ // Otherwise we are going to reschedule anyway.
+ if( thread->get_state() == Cyg_Thread::RUNNING )
+ {
+ Cyg_Scheduler *sched = &Cyg_Scheduler::scheduler;
+
+ CYG_INSTRUMENT_MLQ( TIMESLICE, thread, 0);
+
+ CYG_ASSERTCLASS( thread, "Bad current thread");
+ CYG_ASSERTCLASS( sched, "Bad scheduler");
+
+ cyg_priority pri = thread->priority;
+ Cyg_RunQueue *queue = &sched->run_queue[pri];
+
+#ifdef CYGPKG_KERNEL_SMP_SUPPORT
+
+ // In SMP systems we set the head of the queue to point to
+ // the thread immediately after the current
+ // thread. schedule() will then pick that thread, or one
+ // after it to run next.
+
+ queue->to_head( thread->get_next() );
+#else
+ queue->rotate();
+#endif
+
+ if( queue->get_head() != thread )
+ sched->set_need_reschedule();
+
+ timeslice_count[cpu_this] = CYGNUM_KERNEL_SCHED_TIMESLICE_TICKS;
+ }
+ }
+
+
+ CYG_ASSERT( queue_map & (1<<CYG_THREAD_MIN_PRIORITY), "Idle thread vanished!!!");
+ CYG_ASSERT( !run_queue[CYG_THREAD_MIN_PRIORITY].empty(), "Idle thread vanished!!!");
+#ifdef CYGDBG_KERNEL_TRACE_TIMESLICE
+ CYG_REPORT_RETURN();
+#endif
+}
+
+// -------------------------------------------------------------------------
+
+__externC void cyg_scheduler_timeslice_cpu(void)
+{
+ Cyg_Scheduler::scheduler.timeslice_cpu();
+}
+
+#endif
+
+//==========================================================================
+// Cyg_SchedThread_Implementation class members
+
+Cyg_SchedThread_Implementation::Cyg_SchedThread_Implementation
+(
+ CYG_ADDRWORD sched_info
+)
+{
+ CYG_REPORT_FUNCTION();
+ CYG_REPORT_FUNCARG1("sched_info=%08x", sched_info);
+
+ // Set priority to the supplied value.
+ priority = (cyg_priority)sched_info;
+
+#ifdef CYGSEM_KERNEL_SCHED_TIMESLICE_ENABLE
+ // If timeslice_enabled exists, set it true by default
+ timeslice_enabled = true;
+#endif
+#ifdef CYGPKG_KERNEL_SMP_SUPPORT
+ cpu = CYG_KERNEL_CPU_NONE;
+#endif
+
+ CYG_REPORT_RETURN();
+}
+
+// -------------------------------------------------------------------------
+// Yield the processor to another thread
+
+void
+Cyg_SchedThread_Implementation::yield(void)
+{
+ CYG_REPORT_FUNCTION();
+
+ // Prevent preemption
+ Cyg_Scheduler::lock();
+
+ Cyg_Thread *thread = CYG_CLASSFROMBASE(Cyg_Thread,
+ Cyg_SchedThread_Implementation,
+ this);
+
+ // Only do this if this thread is running. If it is not, there
+ // is no point.
+
+ if( thread->get_state() == Cyg_Thread::RUNNING )
+ {
+ // To yield we simply rotate the appropriate
+ // run queue to the next thread and reschedule.
+
+ CYG_INSTRUMENT_MLQ( YIELD, thread, 0);
+
+ CYG_ASSERTCLASS( thread, "Bad current thread");
+
+ Cyg_Scheduler *sched = &Cyg_Scheduler::scheduler;
+
+ CYG_ASSERTCLASS( sched, "Bad scheduler");
+
+ cyg_priority pri = thread->priority;
+ Cyg_RunQueue *queue = &sched->run_queue[pri];
+
+#ifdef CYGPKG_KERNEL_SMP_SUPPORT
+
+ // In SMP systems we set the head of the queue to point to
+ // the thread immediately after the current
+ // thread. schedule() will then pick that thread, or one
+ // after it to run next.
+
+ queue->to_head( thread->get_next() );
+#else
+ queue->rotate();
+#endif
+
+ if( queue->get_head() != thread )
+ sched->set_need_reschedule();
+
+#ifdef CYGSEM_KERNEL_SCHED_TIMESLICE
+ // Reset the timeslice counter so that this thread gets a full
+ // quantum.
+ else Cyg_Scheduler::reset_timeslice_count();
+#endif
+ }
+
+ // Unlock the scheduler and switch threads
+#ifdef CYGDBG_USE_ASSERTS
+ // This test keeps the assertions in unlock_inner() happy if
+ // need_reschedule was not set above.
+ if( !Cyg_Scheduler::get_need_reschedule() )
+ Cyg_Scheduler::unlock();
+ else
+#endif
+ Cyg_Scheduler::unlock_reschedule();
+
+
+ CYG_REPORT_RETURN();
+}
+
+// -------------------------------------------------------------------------
+// Rotate the run queue at a specified priority.
+// (pri is the decider, not this, so the routine is static)
+
+void
+Cyg_SchedThread_Implementation::rotate_queue( cyg_priority pri )
+{
+ CYG_REPORT_FUNCTION();
+ CYG_REPORT_FUNCARG1("priority=%d", pri);
+
+ // Prevent preemption
+ Cyg_Scheduler::lock();
+
+ Cyg_Scheduler *sched = &Cyg_Scheduler::scheduler;
+
+ CYG_ASSERTCLASS( sched, "Bad scheduler");
+
+ Cyg_RunQueue *queue = &sched->run_queue[pri];
+
+ if ( !queue->empty() ) {
+ queue->rotate();
+ sched->set_need_reschedule();
+ }
+
+ // Unlock the scheduler and switch threads
+ Cyg_Scheduler::unlock();
+
+ CYG_REPORT_RETURN();
+}
+
+// -------------------------------------------------------------------------
+// Move this thread to the head of its queue
+// (not necessarily a scheduler queue)
+
+void
+Cyg_SchedThread_Implementation::to_queue_head( void )
+{
+ CYG_REPORT_FUNCTION();
+
+ // Prevent preemption
+ Cyg_Scheduler::lock();
+
+ Cyg_Thread *thread = CYG_CLASSFROMBASE(Cyg_Thread,
+ Cyg_SchedThread_Implementation,
+ this);
+
+ CYG_ASSERTCLASS( thread, "Bad current thread");
+
+ Cyg_ThreadQueue *q = thread->get_current_queue();
+ if( q != NULL )
+ q->to_head( thread );
+ else if( thread->in_list() )
+ {
+ // If the queue pointer is NULL then it is on a run
+ // queue. Move the thread to the head of it's priority list
+ // and force a reschedule.
+
+ Cyg_Scheduler *sched = &Cyg_Scheduler::scheduler;
+ sched->run_queue[thread->priority].to_head( thread );
+ sched->set_need_reschedule( thread );
+ }
+
+ // Unlock the scheduler and switch threads
+ Cyg_Scheduler::unlock();
+
+ CYG_REPORT_RETURN();
+}
+
+//==========================================================================
+// Cyg_ThreadQueue_Implementation class members
+
+// -------------------------------------------------------------------------
+
+void
+Cyg_ThreadQueue_Implementation::enqueue(Cyg_Thread *thread)
+{
+ CYG_REPORT_FUNCTION();
+ CYG_REPORT_FUNCARG1("thread=%08x", thread);
+
+ CYG_INSTRUMENT_MLQ( ENQUEUE, this, thread );
+
+#ifdef CYGIMP_KERNEL_SCHED_SORTED_QUEUES
+
+ // Insert the thread into the queue in priority order.
+
+ Cyg_Thread *qhead = get_head();
+
+ if( qhead == NULL ) add_tail( thread );
+ else if( qhead == qhead->get_next() )
+ {
+ // There is currently only one thread in the queue, join it
+ // and adjust the queue pointer to point to the highest
+ // priority of the two. If they are the same priority,
+ // leave the pointer pointing to the oldest.
+
+ qhead->insert( thread );
+
+ if( thread->priority < qhead->priority )
+ to_head(thread);
+ }
+ else
+ {
+ // There is more than one thread in the queue. First check
+ // whether we are of higher priority than the head and if
+ // so just jump in at the front. Also check whether we are
+ // lower priority than the tail and jump onto the end.
+ // Otherwise we really have to search the queue to find
+ // our place.
+
+ if( thread->priority < qhead->priority )
+ {
+ qhead->insert( thread );
+ to_head(thread);
+ }
+ else if( thread->priority > get_tail()->priority )
+ {
+ // We are lower priority than any thread in the queue,
+ // go in at the end.
+
+ add_tail( thread );
+ }
+ else
+ {
+ // Search the queue. We do this backwards so that we
+ // always add new threads after any that have the same
+ // priority.
+
+ // Because of the previous tests we know that this
+ // search will terminate before we hit the head of the
+ // queue, hence we do not need to check for that
+ // condition.
+
+ Cyg_Thread *qtmp = get_tail();
+
+ // Scan the queue until we find a higher or equal
+ // priority thread.
+
+ while( qtmp->priority > thread->priority )
+ qtmp = qtmp->get_prev();
+
+ // Append ourself after the node pointed to by qtmp.
+
+ qtmp->append( thread );
+ }
+ }
+#else
+ // Just add the thread to the tail of the list
+ add_tail( thread );
+#endif
+
+ thread->queue = CYG_CLASSFROMBASE(Cyg_ThreadQueue,
+ Cyg_ThreadQueue_Implementation,
+ this);
+ CYG_REPORT_RETURN();
+}
+
+// -------------------------------------------------------------------------
+
+Cyg_Thread *
+Cyg_ThreadQueue_Implementation::dequeue(void)
+{
+ CYG_REPORT_FUNCTYPE("returning thread %08x");
+
+ Cyg_Thread *thread = rem_head();
+
+ CYG_INSTRUMENT_MLQ( DEQUEUE, this, thread );
+
+ if( thread != NULL )
+ thread->queue = NULL;
+
+ CYG_REPORT_RETVAL(thread);
+ return thread;
+}
+
+// -------------------------------------------------------------------------
+
+void
+Cyg_ThreadQueue_Implementation::remove( Cyg_Thread *thread )
+{
+ CYG_REPORT_FUNCTION();
+ CYG_REPORT_FUNCARG1("thread=%08x", thread);
+
+ CYG_INSTRUMENT_MLQ( REMOVE, this, thread );
+
+ thread->queue = NULL;
+
+ Cyg_CList_T<Cyg_Thread>::remove( thread );
+
+ CYG_REPORT_RETURN();
+}
+
+// -------------------------------------------------------------------------
+
+Cyg_Thread *
+Cyg_ThreadQueue_Implementation::highpri(void)
+{
+ CYG_REPORT_FUNCTYPE("returning thread %08x");
+ CYG_REPORT_RETVAL(get_head());
+ return get_head();
+}
+
+// -------------------------------------------------------------------------
+
+inline void
+Cyg_ThreadQueue_Implementation::set_thread_queue(Cyg_Thread *thread,
+ Cyg_ThreadQueue *tq )
+
+{
+ thread->queue = tq;
+}
+
+// -------------------------------------------------------------------------
+
+#endif
+
+// -------------------------------------------------------------------------
+// EOF sched/mlqueue.cxx
diff --git a/cesar/ecos/packages/kernel/current/src/sched/sched.cxx b/cesar/ecos/packages/kernel/current/src/sched/sched.cxx
new file mode 100644
index 0000000000..cf8249f941
--- /dev/null
+++ b/cesar/ecos/packages/kernel/current/src/sched/sched.cxx
@@ -0,0 +1,741 @@
+//==========================================================================
+//
+// sched/sched.cxx
+//
+// Scheduler class implementations
+//
+//==========================================================================
+//####ECOSGPLCOPYRIGHTBEGIN####
+// -------------------------------------------
+// This file is part of eCos, the Embedded Configurable Operating System.
+// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc.
+//
+// eCos is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 2 or (at your option) any later version.
+//
+// eCos is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with eCos; if not, write to the Free Software Foundation, Inc.,
+// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+//
+// As a special exception, if other files instantiate templates or use macros
+// or inline functions from this file, or you compile this file and link it
+// with other works to produce a work based on this file, this file does not
+// by itself cause the resulting work to be covered by the GNU General Public
+// License. However the source code for this file must still be made available
+// in accordance with section (3) of the GNU General Public License.
+//
+// This exception does not invalidate any other reasons why a work based on
+// this file might be covered by the GNU General Public License.
+//
+// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc.
+// at http://sources.redhat.com/ecos/ecos-license/
+// -------------------------------------------
+//####ECOSGPLCOPYRIGHTEND####
+//==========================================================================
+//#####DESCRIPTIONBEGIN####
+//
+// Author(s): nickg
+// Contributors: nickg
+// Date: 1997-09-15
+// Purpose: Scheduler class implementation
+// Description: This file contains the definitions of the scheduler class
+// member functions that are common to all scheduler
+// implementations.
+//
+//####DESCRIPTIONEND####
+//
+//==========================================================================
+
+#include <pkgconf/kernel.h>
+
+#include <cyg/kernel/ktypes.h> // base kernel types
+#include <cyg/infra/cyg_trac.h> // tracing macros
+#include <cyg/infra/cyg_ass.h> // assertion macros
+#include <cyg/kernel/instrmnt.h> // instrumentation
+
+#include <cyg/kernel/sched.hxx> // our header
+
+#include <cyg/kernel/thread.hxx> // thread classes
+#include <cyg/kernel/intr.hxx> // Interrupt interface
+
+#include <cyg/hal/hal_arch.h> // Architecture specific definitions
+
+#include <cyg/kernel/thread.inl> // thread inlines
+#include <cyg/kernel/sched.inl> // scheduler inlines
+
+//-------------------------------------------------------------------------
+// Some local tracing control - a default.
+#ifdef CYGDBG_USE_TRACING
+# if !defined( CYGDBG_INFRA_DEBUG_TRACE_ASSERT_SIMPLE ) && \
+ !defined( CYGDBG_INFRA_DEBUG_TRACE_ASSERT_FANCY )
+ // ie. not a tracing implementation that takes a long time to output
+
+# ifndef CYGDBG_KERNEL_TRACE_UNLOCK_INNER
+# define CYGDBG_KERNEL_TRACE_UNLOCK_INNER
+# endif // control not already defined
+
+# endif // trace implementation not ..._SIMPLE && not ..._FANCY
+#endif // CYGDBG_USE_TRACING
+
+// -------------------------------------------------------------------------
+// Static Cyg_Scheduler class members
+
+// We start with sched_lock at 1 so that any kernel code we
+// call during initialization will not try to reschedule.
+
+CYGIMP_KERNEL_SCHED_LOCK_DEFINITIONS;
+
+Cyg_Thread *volatile Cyg_Scheduler_Base::current_thread[CYGNUM_KERNEL_CPU_MAX];
+
+volatile cyg_bool Cyg_Scheduler_Base::need_reschedule[CYGNUM_KERNEL_CPU_MAX];
+
+Cyg_Scheduler Cyg_Scheduler::scheduler CYG_INIT_PRIORITY( SCHEDULER );
+
+volatile cyg_ucount32 Cyg_Scheduler_Base::thread_switches[CYGNUM_KERNEL_CPU_MAX];
+
+#ifdef CYGPKG_KERNEL_SMP_SUPPORT
+
+CYG_BYTE cyg_sched_cpu_interrupt[CYGNUM_KERNEL_CPU_MAX][sizeof(Cyg_Interrupt)]
+ CYGBLD_ANNOTATE_VARIABLE_SCHED;
+
+__externC cyg_ISR cyg_hal_cpu_message_isr;
+__externC cyg_DSR cyg_hal_cpu_message_dsr;
+
+inline void *operator new(size_t size, void *ptr) { return ptr; };
+
+#endif
+
+// -------------------------------------------------------------------------
+// Scheduler unlock function.
+
+// This is only called when there is the potential for real work to be
+// done. Other cases are handled in Cyg_Scheduler::unlock() which is
+// an inline; _or_ this function may have been called from
+// Cyg_Scheduler::reschedule(), or Cyg_Scheduler::unlock_reschedule. The
+// new_lock argument contains the value that the scheduler lock should
+// have after this function has completed. If it is zero then the lock is
+// being released and some extra work (running ASRs, checking for DSRs) is
+// done before returning. If it is non-zero then it must equal the
+// current value of the lock, and is used to indicate that we want to
+// reacquire the scheduler lock before returning. This latter option
+// only makes any sense if the current thread is no longer runnable,
+// e.g. sleeping, otherwise this function will do nothing.
+// This approach of passing in the lock value at the end effectively
+// makes the scheduler lock a form of per-thread variable. Each call
+// to unlock_inner() carries with it the value the scheduler should
+// have when it reschedules this thread back, and leaves this function.
+// When it is non-zero, and the thread is rescheduled, no ASRS are run,
+// or DSRs processed. By doing this, it makes it possible for threads
+// that want to go to sleep to wake up with the scheduler lock in the
+// same state it was in before.
+
+void Cyg_Scheduler::unlock_inner( cyg_ucount32 new_lock )
+{
+#ifdef CYGDBG_KERNEL_TRACE_UNLOCK_INNER
+ CYG_REPORT_FUNCTION();
+#endif
+
+ do {
+
+ CYG_PRECONDITION( new_lock==0 ? get_sched_lock() == 1 :
+ ((get_sched_lock() == new_lock) || (get_sched_lock() == new_lock+1)),
+ "sched_lock not at expected value" );
+
+#ifdef CYGIMP_KERNEL_INTERRUPTS_DSRS
+
+ // Call any pending DSRs. Do this here to ensure that any
+ // threads that get awakened are properly scheduled.
+
+ if( new_lock == 0 && Cyg_Interrupt::DSRs_pending() )
+ Cyg_Interrupt::call_pending_DSRs();
+#endif
+
+ Cyg_Thread *current = get_current_thread();
+
+ CYG_ASSERTCLASS( current, "Bad current thread" );
+
+#ifdef CYGFUN_KERNEL_ALL_THREADS_STACK_CHECKING
+ // should have CYGVAR_KERNEL_THREADS_LIST
+ current = Cyg_Thread::get_list_head();
+ while ( current ) {
+ current->check_stack();
+ current = current->get_list_next();
+ }
+ current = get_current_thread();
+#endif
+
+#ifdef CYGFUN_KERNEL_THREADS_STACK_CHECKING
+ current->check_stack();
+#endif
+
+ // If the current thread is going to sleep, or someone
+ // wants a reschedule, choose another thread to run
+
+ if( current->state != Cyg_Thread::RUNNING || get_need_reschedule() ) {
+
+ CYG_INSTRUMENT_SCHED(RESCHEDULE,0,0);
+
+ // Get the next thread to run from scheduler
+ Cyg_Thread *next = scheduler.schedule();
+
+ CYG_CHECK_DATA_PTR( next, "Invalid next thread pointer");
+ CYG_ASSERTCLASS( next, "Bad next thread" );
+
+ if( current != next )
+ {
+
+ CYG_INSTRUMENT_THREAD(SWITCH,current,next);
+
+ // Count this thread switch
+ thread_switches[CYG_KERNEL_CPU_THIS()]++;
+
+#ifdef CYGFUN_KERNEL_THREADS_STACK_CHECKING
+ next->check_stack(); // before running it
+#endif
+
+ // Switch contexts
+ HAL_THREAD_SWITCH_CONTEXT( &current->stack_ptr,
+ &next->stack_ptr );
+
+ // Worry here about possible compiler
+ // optimizations across the above call that may try to
+ // propogate common subexpresions. We would end up
+ // with the expression from one thread in its
+ // successor. This is only a worry if we do not save
+ // and restore the complete register set. We need a
+ // way of marking functions that return into a
+ // different context. A temporary fix would be to
+ // disable CSE (-fdisable-cse) in the compiler.
+
+ // We return here only when the current thread is
+ // rescheduled. There is a bit of housekeeping to do
+ // here before we are allowed to go on our way.
+
+ CYG_CHECK_DATA_PTR( current, "Invalid current thread pointer");
+ CYG_ASSERTCLASS( current, "Bad current thread" );
+
+ current_thread[CYG_KERNEL_CPU_THIS()] = current; // restore current thread pointer
+ }
+
+#ifdef CYGSEM_KERNEL_SCHED_TIMESLICE
+ // Reset the timeslice counter so that this thread gets a full
+ // quantum.
+ reset_timeslice_count();
+#endif
+
+ clear_need_reschedule(); // finished rescheduling
+ }
+
+ if( new_lock == 0 )
+ {
+
+#ifdef CYGSEM_KERNEL_SCHED_ASR_SUPPORT
+
+ // Check whether the ASR is pending and not inhibited. If
+ // we can call it, then transfer this info to a local
+ // variable (call_asr) and clear the pending flag. Note
+ // that we only do this if the scheduler lock is about to
+ // be zeroed. In any other circumstance we are not
+ // unlocking.
+
+ cyg_bool call_asr = false;
+
+ if( (current->asr_inhibit == 0) && current->asr_pending )
+ {
+ call_asr = true;
+ current->asr_pending = false;
+ }
+#endif
+
+ HAL_REORDER_BARRIER(); // Make sure everything above has happened
+ // by this point
+ zero_sched_lock(); // Clear the lock
+ HAL_REORDER_BARRIER();
+
+#ifdef CYGIMP_KERNEL_INTERRUPTS_DSRS
+
+ // Now check whether any DSRs got posted during the thread
+ // switch and if so, go around again. Making this test after
+ // the lock has been zeroed avoids a race condition in which
+ // a DSR could have been posted during a reschedule, but would
+ // not be run until the _next_ time we release the sched lock.
+
+ if( Cyg_Interrupt::DSRs_pending() ) {
+ inc_sched_lock(); // reclaim the lock
+ continue; // go back to head of loop
+ }
+
+#endif
+ // Otherwise the lock is zero, we can return.
+
+// CYG_POSTCONDITION( get_sched_lock() == 0, "sched_lock not zero" );
+
+#ifdef CYGSEM_KERNEL_SCHED_ASR_SUPPORT
+ // If the test within the sched_lock indicating that the ASR
+ // be called was true, call it here. Calling the ASR must be
+ // the very last thing we do here, since it must run as close
+ // to "user" state as possible.
+
+ if( call_asr ) current->asr(current->asr_data);
+#endif
+
+ }
+ else
+ {
+ // If new_lock is non-zero then we restore the sched_lock to
+ // the value given.
+
+ HAL_REORDER_BARRIER();
+
+ set_sched_lock(new_lock);
+
+ HAL_REORDER_BARRIER();
+ }
+
+#ifdef CYGDBG_KERNEL_TRACE_UNLOCK_INNER
+ CYG_REPORT_RETURN();
+#endif
+ return;
+
+ } while( 1 );
+
+ CYG_FAIL( "Should not be executed" );
+}
+
+// -------------------------------------------------------------------------
+// Start the scheduler. This is called after the initial threads have been
+// created to start scheduling. It gets any other CPUs running, and then
+// enters the scheduler.
+
+void Cyg_Scheduler::start()
+{
+ CYG_REPORT_FUNCTION();
+
+#ifdef CYGPKG_KERNEL_SMP_SUPPORT
+
+ HAL_SMP_CPU_TYPE cpu;
+
+ for( cpu = 0; cpu < CYG_KERNEL_CPU_START_COUNT(); cpu++ )
+ {
+ // Don't start this CPU, it is running already!
+ if( cpu == CYG_KERNEL_CPU_THIS() )
+ continue;
+
+ CYG_KERNEL_CPU_START( cpu );
+ }
+
+#endif
+
+ start_cpu();
+}
+
+// -------------------------------------------------------------------------
+// Start scheduling on this CPU. This is called on each CPU in the system
+// when it is started.
+
+void Cyg_Scheduler::start_cpu()
+{
+ CYG_REPORT_FUNCTION();
+
+#ifdef CYGPKG_KERNEL_SMP_SUPPORT
+
+ // Set up the inter-CPU interrupt for this CPU
+
+ Cyg_Interrupt * intr = new( (void *)&cyg_sched_cpu_interrupt[HAL_SMP_CPU_THIS()] )
+ Cyg_Interrupt( CYGNUM_HAL_SMP_CPU_INTERRUPT_VECTOR( HAL_SMP_CPU_THIS() ),
+ 0,
+ 0,
+ cyg_hal_cpu_message_isr,
+ cyg_hal_cpu_message_dsr
+ );
+
+ intr->set_cpu( intr->get_vector(), HAL_SMP_CPU_THIS() );
+
+ intr->attach();
+
+ intr->unmask_interrupt( intr->get_vector() );
+
+#endif
+
+ // Get the first thread to run from scheduler
+ register Cyg_Thread *next = scheduler.schedule();
+
+ CYG_ASSERTCLASS( next, "Bad initial thread" );
+
+ clear_need_reschedule(); // finished rescheduling
+ set_current_thread(next); // restore current thread pointer
+
+#ifdef CYGVAR_KERNEL_COUNTERS_CLOCK
+ // Reference the real time clock. This ensures that at least one
+ // reference to the kernel_clock.o object exists, without which
+ // the object will not be included while linking.
+ CYG_REFERENCE_OBJECT( Cyg_Clock::real_time_clock );
+#endif
+
+ // Load the first thread. This will also enable interrupts since
+ // the initial state of all threads is to have interrupts enabled.
+
+ HAL_THREAD_LOAD_CONTEXT( &next->stack_ptr );
+
+}
+
+// -------------------------------------------------------------------------
+// SMP support functions
+
+#ifdef CYGPKG_KERNEL_SMP_SUPPORT
+
+// This is called on each secondary CPU on its interrupt stack after
+// the initial CPU has initialized the world.
+
+externC void cyg_kernel_smp_startup()
+{
+ CYG_INSTRUMENT_SMP( CPU_START, CYG_KERNEL_CPU_THIS(), 0 );
+ Cyg_Scheduler::lock();
+ Cyg_Scheduler::start_cpu();
+}
+
+// This is called from the DSR of the inter-CPU interrupt to cause a
+// reschedule when the scheduler lock is zeroed.
+
+__externC void cyg_scheduler_set_need_reschedule()
+{
+ CYG_INSTRUMENT_SMP( RESCHED_RECV, 0, 0 );
+ Cyg_Scheduler::need_reschedule[HAL_SMP_CPU_THIS()] = true;
+}
+
+#endif
+
+// -------------------------------------------------------------------------
+// Consistency checker
+
+#ifdef CYGDBG_USE_ASSERTS
+
+cyg_bool Cyg_Scheduler::check_this( cyg_assert_class_zeal zeal) const
+{
+ CYG_REPORT_FUNCTION();
+
+ // check that we have a non-NULL pointer first
+ if( this == NULL ) return false;
+
+ switch( zeal )
+ {
+ case cyg_system_test:
+ case cyg_extreme:
+ case cyg_thorough:
+ if( !get_current_thread()->check_this(zeal) ) return false;
+ case cyg_quick:
+ case cyg_trivial:
+ case cyg_none:
+ default:
+ break;
+ };
+
+ return true;
+}
+
+#endif
+
+//==========================================================================
+// SchedThread members
+
+// -------------------------------------------------------------------------
+// Static data members
+
+#ifdef CYGSEM_KERNEL_SCHED_ASR_SUPPORT
+
+# ifdef CYGSEM_KERNEL_SCHED_ASR_GLOBAL
+Cyg_ASR *Cyg_SchedThread::asr = &Cyg_SchedThread::asr_default;
+# endif
+
+# ifdef CYGSEM_KERNEL_SCHED_ASR_DATA_GLOBAL
+CYG_ADDRWORD Cyg_SchedThread::asr_data = 0;
+# endif
+
+#endif // CYGSEM_KERNEL_SCHED_ASR_SUPPORT
+
+// -------------------------------------------------------------------------
+// Constructor
+
+Cyg_SchedThread::Cyg_SchedThread(Cyg_Thread *thread, CYG_ADDRWORD sched_info)
+: Cyg_SchedThread_Implementation(sched_info)
+{
+ CYG_REPORT_FUNCTION();
+
+ queue = NULL;
+
+#ifdef CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL
+
+ mutex_count = 0;
+
+#ifdef CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_SIMPLE
+
+ priority_inherited = false;
+
+#endif
+#endif
+
+#ifdef CYGSEM_KERNEL_SCHED_ASR_SUPPORT
+
+ asr_inhibit = 0;
+ asr_pending = false;
+
+#ifndef CYGSEM_KERNEL_SCHED_ASR_GLOBAL
+ asr = asr_default;
+#endif
+#ifdef CYGSEM_KERNEL_SCHED_ASR_DATA_GLOBAL
+ asr_data = NULL
+#endif
+
+#endif
+}
+
+// -------------------------------------------------------------------------
+// ASR support functions
+
+#ifdef CYGSEM_KERNEL_SCHED_ASR_SUPPORT
+
+// -------------------------------------------------------------------------
+// Set ASR
+// Install a new ASR, returning the old one.
+
+void Cyg_SchedThread::set_asr( Cyg_ASR *new_asr, CYG_ADDRWORD new_data,
+ Cyg_ASR **old_asr, CYG_ADDRWORD *old_data)
+{
+ CYG_REPORT_FUNCTION();
+
+ // Do this with the scheduler locked...
+ Cyg_Scheduler::lock();
+
+ if( old_asr != NULL ) *old_asr = asr;
+ if( old_data != NULL ) *old_data = asr_data;
+
+ // If new_asr is NULL, do not change the ASR,
+ // but only change the data.
+ if( new_asr != NULL ) asr = new_asr;
+ asr_data = new_data;
+
+ Cyg_Scheduler::unlock();
+}
+
+// -------------------------------------------------------------------------
+// Clear ASR
+
+void Cyg_SchedThread::clear_asr()
+{
+ CYG_REPORT_FUNCTION();
+
+ // Do this with the scheduler locked...
+ Cyg_Scheduler::lock();
+
+ // Reset ASR to default.
+ asr = asr_default;
+ asr_data = 0;
+
+ Cyg_Scheduler::unlock();
+}
+
+// -------------------------------------------------------------------------
+// Default ASR function.
+// having this avoids our having to worry about ever seeing a NULL
+// pointer as the ASR function.
+
+void Cyg_SchedThread::asr_default(CYG_ADDRWORD data)
+{
+ CYG_REPORT_FUNCTION();
+
+ data=data;
+ return;
+}
+
+#endif
+
+// -------------------------------------------------------------------------
+// Generic priority protocol support
+
+#ifdef CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL
+
+void Cyg_SchedThread::set_inherited_priority( cyg_priority pri, Cyg_Thread *thread )
+{
+ CYG_REPORT_FUNCTION();
+
+#ifdef CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_SIMPLE
+
+ // This is the comon code for priority inheritance and ceiling
+ // protocols. This implementation provides a simplified version of
+ // the protocol.
+
+ Cyg_Thread *self = CYG_CLASSFROMBASE(Cyg_Thread,
+ Cyg_SchedThread,
+ this);
+
+ CYG_ASSERT( mutex_count > 0, "Non-positive mutex count");
+
+ // Compare with *current* priority in case thread has already
+ // inherited - for relay case below.
+ if( pri < priority )
+ {
+ cyg_priority mypri = priority;
+ cyg_bool already_inherited = priority_inherited;
+
+ // If this is first inheritance, copy the old pri
+ // and set inherited flag. We clear it before setting the
+ // pri since set_priority() is inheritance aware.
+ // This is called with the sched locked, so no race conditions.
+
+ priority_inherited = false; // so that set_prio DTRT
+
+ self->set_priority( pri );
+
+ if( !already_inherited )
+ original_priority = mypri;
+
+ priority_inherited = true; // regardless, because it is now
+
+ }
+
+#endif
+}
+
+void Cyg_SchedThread::relay_inherited_priority( Cyg_Thread *ex_owner, Cyg_ThreadQueue *pqueue)
+{
+ CYG_REPORT_FUNCTION();
+
+#ifdef CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_SIMPLE
+
+ // A simple implementation of priority inheritance.
+ // At its simplest, this member does nothing.
+
+ // If there is anyone else waiting, then the *new* owner inherits from
+ // the current one, since that is a maxima of the others waiting.
+ // (It's worth not doing if there's nobody waiting to prevent
+ // unneccessary priority skew.) This could be viewed as a discovered
+ // priority ceiling.
+
+ if ( !pqueue->empty() )
+ set_inherited_priority( ex_owner->get_current_priority(), ex_owner );
+
+#endif
+}
+
+void Cyg_SchedThread::clear_inherited_priority()
+{
+ CYG_REPORT_FUNCTION();
+
+#ifdef CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_SIMPLE
+
+ // A simple implementation of priority inheritance/ceiling
+ // protocols. The simplification in this algorithm is that we do
+ // not reduce our priority until we have freed all mutexes
+ // claimed. Hence we can continue to run at an artificially high
+ // priority even when we should not. However, since nested
+ // mutexes are rare, the thread we have inherited from is likely
+ // to be locking the same mutexes we are, and mutex claim periods
+ // should be very short, the performance difference between this
+ // and a more complex algorithm should be negligible. The most
+ // important advantage of this algorithm is that it is fast and
+ // deterministic.
+
+ Cyg_Thread *self = CYG_CLASSFROMBASE(Cyg_Thread,
+ Cyg_SchedThread,
+ this);
+
+ CYG_ASSERT( mutex_count >= 0, "Non-positive mutex count");
+
+ if( mutex_count == 0 && priority_inherited )
+ {
+ priority_inherited = false;
+
+ // Only make an effort if the priority must change
+ if( priority < original_priority )
+ self->set_priority( original_priority );
+
+ }
+
+#endif
+}
+
+#endif // CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL
+
+// -------------------------------------------------------------------------
+// Priority inheritance support.
+
+#ifdef CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_INHERIT
+
+// -------------------------------------------------------------------------
+// Inherit the priority of the provided thread if it
+// has a higher priority than ours.
+
+void Cyg_SchedThread::inherit_priority( Cyg_Thread *thread)
+{
+ CYG_REPORT_FUNCTION();
+
+ Cyg_Thread *self = CYG_CLASSFROMBASE(Cyg_Thread,
+ Cyg_SchedThread,
+ this);
+
+ CYG_ASSERT( mutex_count > 0, "Non-positive mutex count");
+ CYG_ASSERT( self != thread, "Trying to inherit from self!");
+
+ self->set_inherited_priority( thread->get_current_priority(), thread );
+
+}
+
+// -------------------------------------------------------------------------
+// Inherit the priority of the ex-owner thread or from the queue if it
+// has a higher priority than ours.
+
+void Cyg_SchedThread::relay_priority( Cyg_Thread *ex_owner, Cyg_ThreadQueue *pqueue)
+{
+ CYG_REPORT_FUNCTION();
+
+ relay_inherited_priority( ex_owner, pqueue );
+}
+
+// -------------------------------------------------------------------------
+// Lose a priority inheritance
+
+void Cyg_SchedThread::disinherit_priority()
+{
+ CYG_REPORT_FUNCTION();
+
+ CYG_ASSERT( mutex_count >= 0, "Non-positive mutex count");
+
+ clear_inherited_priority();
+}
+
+#endif // CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_INHERIT
+
+// -------------------------------------------------------------------------
+// Priority ceiling support
+
+#ifdef CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_CEILING
+
+void Cyg_SchedThread::set_priority_ceiling( cyg_priority pri )
+{
+ CYG_REPORT_FUNCTION();
+
+ CYG_ASSERT( mutex_count > 0, "Non-positive mutex count");
+
+ set_inherited_priority( pri );
+
+}
+
+void Cyg_SchedThread::clear_priority_ceiling( )
+{
+ CYG_REPORT_FUNCTION();
+
+ CYG_ASSERT( mutex_count >= 0, "Non-positive mutex count");
+
+ clear_inherited_priority();
+}
+
+#endif // CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_CEILING
+
+// -------------------------------------------------------------------------
+// EOF sched/sched.cxx