lite_malloc.c 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118
  1. #include <stdlib.h>
  2. #include <stdint.h>
  3. #include <limits.h>
  4. #include <errno.h>
  5. #include <sys/mman.h>
  6. #include "libc.h"
  7. #include "lock.h"
  8. #include "syscall.h"
  9. #include "fork_impl.h"
  10. #define ALIGN 16
  11. /* This function returns true if the interval [old,new]
  12. * intersects the 'len'-sized interval below &libc.auxv
  13. * (interpreted as the main-thread stack) or below &b
  14. * (the current stack). It is used to defend against
  15. * buggy brk implementations that can cross the stack. */
  16. static int traverses_stack_p(uintptr_t old, uintptr_t new)
  17. {
  18. const uintptr_t len = 8<<20;
  19. uintptr_t a, b;
  20. b = (uintptr_t)libc.auxv;
  21. a = b > len ? b-len : 0;
  22. if (new>a && old<b) return 1;
  23. b = (uintptr_t)&b;
  24. a = b > len ? b-len : 0;
  25. if (new>a && old<b) return 1;
  26. return 0;
  27. }
  28. static volatile int lock[1];
  29. volatile int *const __bump_lockptr = lock;
  30. static void *__simple_malloc(size_t n)
  31. {
  32. static uintptr_t brk, cur, end;
  33. static unsigned mmap_step;
  34. size_t align=1;
  35. void *p;
  36. if (n > SIZE_MAX/2) {
  37. errno = ENOMEM;
  38. return 0;
  39. }
  40. if (!n) n++;
  41. while (align<n && align<ALIGN)
  42. align += align;
  43. LOCK(lock);
  44. cur += -cur & align-1;
  45. if (n > end-cur) {
  46. size_t req = n - (end-cur) + PAGE_SIZE-1 & -PAGE_SIZE;
  47. if (!cur) {
  48. brk = __syscall(SYS_brk, 0);
  49. brk += -brk & PAGE_SIZE-1;
  50. cur = end = brk;
  51. }
  52. if (brk == end && req < SIZE_MAX-brk
  53. && !traverses_stack_p(brk, brk+req)
  54. && __syscall(SYS_brk, brk+req)==brk+req) {
  55. brk = end += req;
  56. } else {
  57. int new_area = 0;
  58. req = n + PAGE_SIZE-1 & -PAGE_SIZE;
  59. /* Only make a new area rather than individual mmap
  60. * if wasted space would be over 1/8 of the map. */
  61. if (req-n > req/8) {
  62. /* Geometric area size growth up to 64 pages,
  63. * bounding waste by 1/8 of the area. */
  64. size_t min = PAGE_SIZE<<(mmap_step/2);
  65. if (min-n > end-cur) {
  66. if (req < min) {
  67. req = min;
  68. if (mmap_step < 12)
  69. mmap_step++;
  70. }
  71. new_area = 1;
  72. }
  73. }
  74. void *mem = __mmap(0, req, PROT_READ|PROT_WRITE,
  75. MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
  76. if (mem == MAP_FAILED || !new_area) {
  77. UNLOCK(lock);
  78. return mem==MAP_FAILED ? 0 : mem;
  79. }
  80. cur = (uintptr_t)mem;
  81. end = cur + req;
  82. }
  83. }
  84. p = (void *)cur;
  85. cur += n;
  86. UNLOCK(lock);
  87. return p;
  88. }
  89. weak_alias(__simple_malloc, __libc_malloc_impl);
  90. void *__libc_malloc(size_t n)
  91. {
  92. return __libc_malloc_impl(n);
  93. }
  94. static void *default_malloc(size_t n)
  95. {
  96. return __libc_malloc_impl(n);
  97. }
  98. weak_alias(default_malloc, malloc);