victim = _int_malloc (ar_ptr, bytes); /* Retry with another arena only if we were able to find a usable arena before. */ if (!victim && ar_ptr != NULL) { LIBC_PROBE (memory_malloc_retry, 1, bytes); ar_ptr = arena_get_retry (ar_ptr, bytes); victim = _int_malloc (ar_ptr, bytes); }
if (ar_ptr != NULL) (void) mutex_unlock (&ar_ptr->mutex);
/* Little security check which won't hurt performance: the allocator never wrapps around at the end of the address space. Therefore we can exclude some size values which might appear here by accident or by "design" from some intruder. */ if (__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0) || __builtin_expect (misaligned_chunk (oldp), 0)) { malloc_printerr (check_action, "realloc(): invalid pointer", oldmem, ar_ptr); returnNULL; }
checked_request2size (bytes, nb);
if (chunk_is_mmapped (oldp)) { void *newmem;
#if HAVE_MREMAP newp = mremap_chunk (oldp, nb); if (newp) return chunk2mem (newp); #endif /* Note the extra SIZE_SZ overhead. */ if (oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */
/* Must alloc, copy, free. */ newmem = __libc_malloc (bytes); if (newmem == 0) return0; /* propagate failure */
/* Call all functions registered with `atexit' and `on_exit', in the reverse of the order in which they were registered perform stdio cleanup, and terminate program execution with STATUS. */ void attribute_hidden __run_exit_handlers(int status, struct exit_function_list **listp, bool run_list_atexit) { /* First, call the TLS destructors. */ #ifndef SHARED if (&__call_tls_dtors != NULL) #endif __call_tls_dtors();
/* We do it this way to handle recursive calls to exit () made by the functions registered with `atexit' and `on_exit'. We call everyone on the list and use the status value in the last exit (). */ while (*listp != NULL) { structexit_function_list *cur = *listp;
void internal_function _dl_fini (void) { /* Lots of fun ahead. We have to call the destructors for all still loaded objects, in all namespaces. The problem is that the ELF specification now demands that dependencies between the modules are taken into account. I.e., the destructor for a module is called before the ones for any of its dependencies. To make things more complicated, we cannot simply use the reverse order of the constructors. Since the user might have loaded objects using `dlopen' there are possibly several other modules with its dependencies to be taken into account. Therefore we have to start determining the order of the modules once again from the beginning. */
/* We run the destructors of the main namespaces last. As for the other namespaces, we pick run the destructors in them in reverse order of the namespace ID. */ #ifdef SHARED int do_audit = 0; again: #endif for (Lmid_t ns = GL(dl_nns) - 1; ns >= 0; --ns) { /* Protect against concurrent loads and unloads. */ __rtld_lock_lock_recursive (GL(dl_load_lock));
unsignedint nloaded = GL(dl_ns)[ns]._ns_nloaded; /* No need to do anything for empty namespaces or those used for auditing DSOs. */ if (nloaded == 0 #ifdef SHARED || GL(dl_ns)[ns]._ns_loaded->l_auditing != do_audit #endif ) __rtld_lock_unlock_recursive (GL(dl_load_lock)); else { /* Now we can allocate an array to hold all the pointers and copy the pointers in. */ struct link_map *maps[nloaded];
unsignedint i; structlink_map *l; assert (nloaded != 0 || GL(dl_ns)[ns]._ns_loaded == NULL); for (l = GL(dl_ns)[ns]._ns_loaded, i = 0; l != NULL; l = l->l_next) /* Do not handle ld.so in secondary namespaces. */ if (l == l->l_real) { assert (i < nloaded);
maps[i] = l; l->l_idx = i; ++i;
/* Bump l_direct_opencount of all objects so that they are not dlclose()ed from underneath us. */ ++l->l_direct_opencount; } assert (ns != LM_ID_BASE || i == nloaded); assert (ns == LM_ID_BASE || i == nloaded || i == nloaded - 1); unsignedint nmaps = i;
/* Now we have to do the sorting. */ _dl_sort_fini (maps, nmaps, NULL, ns);
/* We do not rely on the linked list of loaded object anymore from this point on. We have our own list here (maps). The various members of this list cannot vanish since the open count is too high and will be decremented in this loop. So we release the lock so that some code which might be called from a destructor can directly or indirectly access the lock. */ __rtld_lock_unlock_recursive (GL(dl_load_lock));
/* 'maps' now contains the objects in the right order. Now call the destructors. We have to process this array from the front. */ for (i = 0; i < nmaps; ++i) { structlink_map *l = maps[i];
if (l->l_init_called) { /* Make sure nothing happens if we are called twice. */ l->l_init_called = 0;
/* Is there a destructor function? */ if (l->l_info[DT_FINI_ARRAY] != NULL || l->l_info[DT_FINI] != NULL) { /* When debugging print a message first. */ if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS, 0)) _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n", DSO_FILENAME (l->l_name), ns);
/* First see whether an array is given. */ if (l->l_info[DT_FINI_ARRAY] != NULL) { ElfW(Addr) *array = (ElfW(Addr) *) (l->l_addr + l->l_info[DT_FINI_ARRAY]->d_un.d_ptr); unsignedint i = (l->l_info[DT_FINI_ARRAYSZ]->d_un.d_val / sizeof (ElfW(Addr))); while (i-- > 0) ((fini_t) array[i]) (); }
/* Next try the old-style destructor. */ if (l->l_info[DT_FINI] != NULL) DL_CALL_DT_FINI (l, l->l_addr + l->l_info[DT_FINI]->d_un.d_ptr); }
#ifdef SHARED /* Auditing checkpoint: another object closed. */ if (!do_audit && __builtin_expect (GLRO(dl_naudit) > 0, 0)) { structaudit_ifaces *afct = GLRO(dl_audit); for (unsignedint cnt = 0; cnt < GLRO(dl_naudit); ++cnt) { if (afct->objclose != NULL) /* Return value is ignored. */ (void) afct->objclose (&l->l_audit[cnt].cookie);
afct = afct->next; } } #endif }
/* Correct the previous increment. */ --l->l_direct_opencount; } } }
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_STATISTICS)) _dl_debug_printf ("\nruntime linker statistics:\n" " final number of relocations: %lu\n" "final number of relocations from cache: %lu\n", GL(dl_num_relocations), GL(dl_num_cache_relocations)); #endif }