This is the mail archive of the ecos-patches@sources.redhat.com mailing list for the eCos project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Preliminary patch for bug #60194


This patch should hopefully address the issue described in Bugzilla bug
#60194 ("mempool code locks scheduler, not mutex").  It simply replaces
the scheduler lock calls with mutex calls.  I have some code that makes
heavy use of the heap allocator and tested this patch under load, and it
seems to work fine.

Diffs against mempoolt.hxx and mempoolt.inl follow:

--- mempoolt.hxx.orig	2003-08-08 17:17:35.000000000 -0400
+++ mempoolt.hxx	2003-08-08 01:34:27.000000000 -0400
@@ -61,6 +61,7 @@
 #include <cyg/kernel/ktypes.h>
 #include <cyg/infra/cyg_ass.h>         // assertion macros
 #include <cyg/kernel/thread.hxx>
+#include <cyg/kernel/mutex.hxx>

 template <class T>
 class Cyg_Mempoolt
@@ -68,6 +69,7 @@
 private:
     T pool;                             // underlying memory manager
     Cyg_ThreadQueue queue;              // queue of waiting threads
+    Cyg_Mutex       mutex;              // mutex for synchronization

 public:


--- mempoolt.inl.orig	2003-08-08 17:17:45.000000000 -0400
+++ mempoolt.inl	2003-08-08 01:34:44.000000000 -0400
@@ -59,7 +59,7 @@
 //==========================================================================

 #include <cyg/kernel/thread.inl>  // implementation eg. Cyg_Thread::self();
-#include <cyg/kernel/sched.inl>   // implementation eg. Cyg_Scheduler::lock();
+#include <cyg/kernel/mutex.inl>   // implementation eg. Cyg_Mutex

 // -------------------------------------------------------------------------
 // Constructor; we _require_ these arguments and just pass them through to
@@ -77,8 +77,8 @@
 template <class T>
 Cyg_Mempoolt<T>::~Cyg_Mempoolt()  // destructor
 {
-    // Prevent preemption
-    Cyg_Scheduler::lock();
+    // Get the lock
+    mutex.lock();

     while ( ! queue.empty() ) {
         Cyg_Thread *thread = queue.dequeue();
@@ -86,8 +86,8 @@
         thread->wake();
     }

-    // Unlock the scheduler and maybe switch threads
-    Cyg_Scheduler::unlock();
+    // Unlock the pool
+    mutex.unlock();
 }

 // -------------------------------------------------------------------------
@@ -100,8 +100,8 @@

     Cyg_Thread *self = Cyg_Thread::self();

-    // Prevent preemption
-    Cyg_Scheduler::lock();
+    // Lock the heap
+    mutex.lock();
     CYG_ASSERTCLASS( this, "Bad this pointer");

     // Loop while we got no memory, sleeping each time around the
@@ -115,12 +115,12 @@
         self->sleep();
         queue.enqueue( self );

-        CYG_ASSERT( 1 == Cyg_Scheduler::get_sched_lock(),
-                    "Called with non-zero scheduler lock");
+        CYG_ASSERT( NULL != mutex.get_owner(),
+                    "Called without pool mutex being obtained.");

-        // Unlock scheduler and allow other threads to run
-        Cyg_Scheduler::unlock();
-        Cyg_Scheduler::lock();
+        // Unlock the mutex and allow other threads to run
+        mutex.unlock();
+        mutex.lock();

         CYG_ASSERTCLASS( this, "Bad this pointer");

@@ -144,8 +144,8 @@
     if ( ! result )
         ret = NULL;

-    // Unlock the scheduler and maybe switch threads
-    Cyg_Scheduler::unlock();
+    // Unlock the pool and maybe switch threads
+    mutex.unlock();
     CYG_REPORT_RETVAL( ret );
     return ret;
 }
@@ -161,8 +161,8 @@

     Cyg_Thread *self = Cyg_Thread::self();

-    // Prevent preemption
-    Cyg_Scheduler::lock();
+    // Lock the pool.
+    mutex.lock();
     CYG_ASSERTCLASS( this, "Bad this pointer");

     // Loop while we got no memory, sleeping each time around the
@@ -186,12 +186,12 @@
         self->sleep();
         queue.enqueue( self );

-        CYG_ASSERT( 1 == Cyg_Scheduler::get_sched_lock(),
-                    "Called with non-zero scheduler lock");
+        CYG_ASSERT( NULL != mutex.get_owner(),
+                    "Called without pool mutex obtained");

-        // Unlock scheduler and allow other threads to run
-        Cyg_Scheduler::unlock();
-        Cyg_Scheduler::lock();
+        // Unlock the mutex and allow other threads to run
+        mutex.unlock();
+        mutex.lock();

         CYG_ASSERTCLASS( this, "Bad this pointer");
         switch( self->get_wake_reason() )
@@ -222,8 +222,8 @@
     // clear the timer; if it actually fired, no worries.
     self->clear_timer();

-    // Unlock the scheduler and maybe switch threads
-    Cyg_Scheduler::unlock();
+    // Unlock the pool and maybe switch threads
+    mutex.unlock();
     CYG_REPORT_RETVAL( ret );
     return ret;
 }
@@ -237,16 +237,16 @@
 {
     CYG_REPORT_FUNCTION();

-    // Prevent preemption
-    Cyg_Scheduler::lock();
+    // Lock the pool
+    mutex.lock();
     CYG_ASSERTCLASS( this, "Bad this pointer");

     cyg_uint8 *ret = pool.alloc( size );

     CYG_ASSERTCLASS( this, "Bad this pointer");

-    // Unlock the scheduler and maybe switch threads
-    Cyg_Scheduler::unlock();
+    // Unlock the mutex and maybe switch threads
+    mutex.unlock();
     CYG_REPORT_RETVAL( ret );
     return ret;
 }
@@ -258,8 +258,8 @@
 cyg_bool
 Cyg_Mempoolt<T>::free( cyg_uint8 *p, cyg_int32 size )
 {
-    // Prevent preemption
-    Cyg_Scheduler::lock();
+    // Lock the pool
+    mutex.lock();
     CYG_ASSERTCLASS( this, "Bad this pointer");

     cyg_int32 ret = pool.free( p, size );
@@ -279,8 +279,8 @@
         // we cannot yield here; if a higher prio thread can't satisfy its
         // request it would re-queue and we would loop forever
     }
-    // Unlock the scheduler and maybe switch threads
-    Cyg_Scheduler::unlock();
+    // Unlock the mutex and maybe switch threads
+    mutex.unlock();
     return ret;
 }

@@ -300,14 +300,14 @@
 inline cyg_int32
 Cyg_Mempoolt<T>::get_totalmem()
 {
-    // Prevent preemption
-    Cyg_Scheduler::lock();
+    // Lock the pool
+    mutex.lock();
     CYG_ASSERTCLASS( this, "Bad this pointer");

     cyg_int32 ret = pool.get_totalmem();

-    // Unlock the scheduler and maybe switch threads
-    Cyg_Scheduler::unlock();
+    // Unlock the mutex and maybe switch threads
+    mutex.unlock();
     return ret;
 }

@@ -315,14 +315,14 @@
 inline cyg_int32
 Cyg_Mempoolt<T>::get_freemem()
 {
-    // Prevent preemption
-    Cyg_Scheduler::lock();
+    // Lock the pool
+    mutex.lock();
     CYG_ASSERTCLASS( this, "Bad this pointer");

     cyg_int32 ret = pool.get_freemem();

-    // Unlock the scheduler and maybe switch threads
-    Cyg_Scheduler::unlock();
+    // Unlock the mutex and maybe switch threads
+    mutex.unlock();
     return ret;
 }

@@ -334,14 +334,14 @@
 Cyg_Mempoolt<T>::get_arena(
     cyg_uint8 * &base, cyg_int32 &size, CYG_ADDRWORD &arg_thru )
 {
-    // Prevent preemption
-    Cyg_Scheduler::lock();
+    // Lock the pool
+    mutex.lock();
     CYG_ASSERTCLASS( this, "Bad this pointer");

     pool.get_arena( base, size, arg_thru );

-    // Unlock the scheduler and maybe switch threads
-    Cyg_Scheduler::unlock();
+    // Unlock the mutex and maybe switch threads
+    mutex.unlock();
 }

 // -------------------------------------------------------------------------
@@ -353,14 +353,14 @@
 {
     cyg_int32 ret;

-    // Prevent preemption
-    Cyg_Scheduler::lock();
+    // Lock the pool
+    mutex.lock();
     CYG_ASSERTCLASS( this, "Bad this pointer");

     ret = pool.get_allocation_size( ptr );

-    // Unlock the scheduler and maybe switch threads
-    Cyg_Scheduler::unlock();
+    // Unlock the mutex and maybe switch threads
+    mutex.unlock();

     return ret;
 }


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]