patches/uClibc/0.9.30.2/270-malloc-fix-race-condition-and-other-bugs-in-the-no-m.patch
author "Yann E. MORIN" <yann.morin.1998@anciens.enib.fr>
Sun Jul 17 17:54:21 2011 +0200 (2011-07-17)
changeset 2888 dd71df95903a
permissions -rw-r--r--
cc/gcc: pass the companion libs prefix to cc_core

In case of canadian-cross, the companion libraries are not the same for
the core cc (they run on 'build') as they are for the final cc (they run
on 'host').

Prepare for this differentiation (coming later), while retaining the
current behavior (to use the same compblibs).

Signed-off-by: "Yann E. MORIN" <yann.morin.1998@anciens.enib.fr>
     1 From fa476d01f1c1990a92ee49d1f1c557b83805d0e9 Mon Sep 17 00:00:00 2001
     2 From: Freeman Wang <xwang@ubicom.com>
     3 Date: Sat, 19 Dec 2009 13:43:00 -0800
     4 Subject: [PATCH 09/15] malloc: fix race condition and other bugs in the no-mmu malloc
     5 
     6 Fixes multiple race conditions on mmb list. This was done by
     7 making the mmb_heap_lock into a recursive lock and making the
     8 regular heap_lock extend to cover the mmb heap handling.
     9 
    10 Also move the new_mmb allocation up to before the mmb list is
    11 iterated through to find the insertion point. When the mmb_heap
    12 also runs out and needs to be extended when the regular heap is
    13 just extended, the mmb list could be messed up.
    14 
    15 Signed-off-by: Freeman Wang <xwang@ubicom.com>
    16 Signed-off-by: Austin Foxley <austinf@cetoncorp.com>
    17 ---
    18  libc/stdlib/malloc/free.c   |    6 +++---
    19  libc/stdlib/malloc/malloc.c |    7 ++++---
    20  2 files changed, 7 insertions(+), 6 deletions(-)
    21 
    22 diff --git a/libc/stdlib/malloc/free.c b/libc/stdlib/malloc/free.c
    23 index 90e18f4..741248a 100644
    24 --- a/libc/stdlib/malloc/free.c
    25 +++ b/libc/stdlib/malloc/free.c
    26 @@ -179,14 +179,14 @@ __free_to_heap (void *mem, struct heap_free_area **heap
    27  	      /* Start searching again from the end of this block.  */
    28  	      start = mmb_end;
    29  
    30 +	      /* Release the descriptor block we used.  */
    31 +	      free_to_heap (mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);
    32 +
    33  	      /* We have to unlock the heap before we recurse to free the mmb
    34  		 descriptor, because we might be unmapping from the mmb
    35  		 heap.  */
    36                __heap_unlock (heap_lock);
    37  
    38 -	      /* Release the descriptor block we used.  */
    39 -	      free_to_heap (mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);
    40 -
    41  	      /* Do the actual munmap.  */
    42  	      munmap ((void *)mmb_start, mmb_end - mmb_start);
    43  
    44 diff --git a/libc/stdlib/malloc/malloc.c b/libc/stdlib/malloc/malloc.c
    45 index 71f9e58..84a6acd 100644
    46 --- a/libc/stdlib/malloc/malloc.c
    47 +++ b/libc/stdlib/malloc/malloc.c
    48 @@ -48,7 +48,7 @@ struct malloc_mmb *__malloc_mmapped_blocks = 0;
    49  HEAP_DECLARE_STATIC_FREE_AREA (initial_mmb_fa, 48); /* enough for 3 mmbs */
    50  struct heap_free_area *__malloc_mmb_heap = HEAP_INIT_WITH_FA (initial_mmb_fa);
    51  #ifdef HEAP_USE_LOCKING
    52 -pthread_mutex_t __malloc_mmb_heap_lock = PTHREAD_MUTEX_INITIALIZER;
    53 +pthread_mutex_t __malloc_mmb_heap_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
    54  #endif
    55  #endif /* __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
    56  
    57 @@ -151,19 +151,19 @@ __malloc_from_heap (size_t size, struct heap_free_area **heap
    58  	  /* Try again to allocate.  */
    59  	  mem = __heap_alloc (heap, &size);
    60  
    61 -	  __heap_unlock (heap_lock);
    62  
    63  #if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__)
    64  	  /* Insert a record of BLOCK in sorted order into the
    65  	     __malloc_mmapped_blocks list.  */
    66  
    67 +	  new_mmb = malloc_from_heap (sizeof *new_mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);
    68 +
    69  	  for (prev_mmb = 0, mmb = __malloc_mmapped_blocks;
    70  	       mmb;
    71  	       prev_mmb = mmb, mmb = mmb->next)
    72  	    if (block < mmb->mem)
    73  	      break;
    74  
    75 -	  new_mmb = malloc_from_heap (sizeof *new_mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);
    76  	  new_mmb->next = mmb;
    77  	  new_mmb->mem = block;
    78  	  new_mmb->size = block_size;
    79 @@ -177,6 +177,7 @@ __malloc_from_heap (size_t size, struct heap_free_area **heap
    80  			    (unsigned)new_mmb,
    81  			    (unsigned)new_mmb->mem, block_size);
    82  #endif /* !MALLOC_USE_SBRK && __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
    83 +	  __heap_unlock (heap_lock);
    84  	}
    85      }
    86  
    87 -- 
    88 1.6.6.1
    89