patches/uClibc/0.9.30.2/270-malloc-fix-race-condition-and-other-bugs-in-the-no-m.patch
author "Yann E. MORIN" <yann.morin.1998@anciens.enib.fr>
Tue Aug 02 18:26:53 2011 +0200 (2011-08-02)
changeset 2592 4908eb2b6f17
permissions -rw-r--r--
scripts/functions: cvs retrieval first tries the mirror for tarballs

The cvs download helper looks for the local tarballs dir to see if it
can find a pre-downloaded tarball, and if it does not find it, does
the actual fetch to upstream via cvs.

In the process, it does not even try to get a tarball from the local
mirror, which can be useful if the mirror has been pre-populated
manually (or with a previously downloaded tree).

Fake a tarball get with the standard tarball-download helper, but
without specifying any upstream URL, which makes the helper directly
try the LAN mirror.

Of course, if no mirror is specified, no URL wil be available, and
the standard cvs retrieval will kick in.

Signed-off-by: "Yann E. MORIN" <yann.morin.1998@anciens.enib.fr>
     1 From fa476d01f1c1990a92ee49d1f1c557b83805d0e9 Mon Sep 17 00:00:00 2001
     2 From: Freeman Wang <xwang@ubicom.com>
     3 Date: Sat, 19 Dec 2009 13:43:00 -0800
     4 Subject: [PATCH 09/15] malloc: fix race condition and other bugs in the no-mmu malloc
     5 
     6 Fixes multiple race conditions on mmb list. This was done by
     7 making the mmb_heap_lock into a recursive lock and making the
     8 regular heap_lock extend to cover the mmb heap handling.
     9 
    10 Also move the new_mmb allocation up to before the mmb list is
    11 iterated through to find the insertion point. When the mmb_heap
    12 also runs out and needs to be extended when the regular heap is
    13 just extended, the mmb list could be messed up.
    14 
    15 Signed-off-by: Freeman Wang <xwang@ubicom.com>
    16 Signed-off-by: Austin Foxley <austinf@cetoncorp.com>
    17 ---
    18  libc/stdlib/malloc/free.c   |    6 +++---
    19  libc/stdlib/malloc/malloc.c |    7 ++++---
    20  2 files changed, 7 insertions(+), 6 deletions(-)
    21 
    22 diff --git a/libc/stdlib/malloc/free.c b/libc/stdlib/malloc/free.c
    23 index 90e18f4..741248a 100644
    24 --- a/libc/stdlib/malloc/free.c
    25 +++ b/libc/stdlib/malloc/free.c
    26 @@ -179,14 +179,14 @@ __free_to_heap (void *mem, struct heap_free_area **heap
    27  	      /* Start searching again from the end of this block.  */
    28  	      start = mmb_end;
    29  
    30 +	      /* Release the descriptor block we used.  */
    31 +	      free_to_heap (mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);
    32 +
    33  	      /* We have to unlock the heap before we recurse to free the mmb
    34  		 descriptor, because we might be unmapping from the mmb
    35  		 heap.  */
    36                __heap_unlock (heap_lock);
    37  
    38 -	      /* Release the descriptor block we used.  */
    39 -	      free_to_heap (mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);
    40 -
    41  	      /* Do the actual munmap.  */
    42  	      munmap ((void *)mmb_start, mmb_end - mmb_start);
    43  
    44 diff --git a/libc/stdlib/malloc/malloc.c b/libc/stdlib/malloc/malloc.c
    45 index 71f9e58..84a6acd 100644
    46 --- a/libc/stdlib/malloc/malloc.c
    47 +++ b/libc/stdlib/malloc/malloc.c
    48 @@ -48,7 +48,7 @@ struct malloc_mmb *__malloc_mmapped_blocks = 0;
    49  HEAP_DECLARE_STATIC_FREE_AREA (initial_mmb_fa, 48); /* enough for 3 mmbs */
    50  struct heap_free_area *__malloc_mmb_heap = HEAP_INIT_WITH_FA (initial_mmb_fa);
    51  #ifdef HEAP_USE_LOCKING
    52 -pthread_mutex_t __malloc_mmb_heap_lock = PTHREAD_MUTEX_INITIALIZER;
    53 +pthread_mutex_t __malloc_mmb_heap_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
    54  #endif
    55  #endif /* __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
    56  
    57 @@ -151,19 +151,19 @@ __malloc_from_heap (size_t size, struct heap_free_area **heap
    58  	  /* Try again to allocate.  */
    59  	  mem = __heap_alloc (heap, &size);
    60  
    61 -	  __heap_unlock (heap_lock);
    62  
    63  #if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__)
    64  	  /* Insert a record of BLOCK in sorted order into the
    65  	     __malloc_mmapped_blocks list.  */
    66  
    67 +	  new_mmb = malloc_from_heap (sizeof *new_mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);
    68 +
    69  	  for (prev_mmb = 0, mmb = __malloc_mmapped_blocks;
    70  	       mmb;
    71  	       prev_mmb = mmb, mmb = mmb->next)
    72  	    if (block < mmb->mem)
    73  	      break;
    74  
    75 -	  new_mmb = malloc_from_heap (sizeof *new_mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);
    76  	  new_mmb->next = mmb;
    77  	  new_mmb->mem = block;
    78  	  new_mmb->size = block_size;
    79 @@ -177,6 +177,7 @@ __malloc_from_heap (size_t size, struct heap_free_area **heap
    80  			    (unsigned)new_mmb,
    81  			    (unsigned)new_mmb->mem, block_size);
    82  #endif /* !MALLOC_USE_SBRK && __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
    83 +	  __heap_unlock (heap_lock);
    84  	}
    85      }
    86  
    87 -- 
    88 1.6.6.1
    89