source: trunk/src/emx/include/Attic/cpp/stl_allo.h@ 18

Last change on this file since 18 was 18, checked in by bird, 23 years ago

Initial revision

  • Property cvs2svn:cvs-rev set to 1.1
  • Property svn:eol-style set to native
  • Property svn:executable set to *
File size: 20.8 KB
Line 
1/*
2 * Copyright (c) 1996-1997
3 * Silicon Graphics Computer Systems, Inc.
4 *
5 * Permission to use, copy, modify, distribute and sell this software
6 * and its documentation for any purpose is hereby granted without fee,
7 * provided that the above copyright notice appear in all copies and
8 * that both that copyright notice and this permission notice appear
9 * in supporting documentation. Silicon Graphics makes no
10 * representations about the suitability of this software for any
11 * purpose. It is provided "as is" without express or implied warranty.
12 */
13
14/* NOTE: This is an internal header file, included by other STL headers.
15 * You should not attempt to use it directly.
16 */
17
18#ifndef __SGI_STL_INTERNAL_ALLOC_H
19#define __SGI_STL_INTERNAL_ALLOC_H
20
21#ifdef __SUNPRO_CC
22# define __PRIVATE public
23 // Extra access restrictions prevent us from really making some things
24 // private.
25#else
26# define __PRIVATE private
27#endif
28
29#ifdef __STL_STATIC_TEMPLATE_MEMBER_BUG
30# define __USE_MALLOC
31#endif
32
33
34// This implements some standard node allocators. These are
35// NOT the same as the allocators in the C++ draft standard or in
36// in the original STL. They do not encapsulate different pointer
37// types; indeed we assume that there is only one pointer type.
38// The allocation primitives are intended to allocate individual objects,
39// not larger arenas as with the original STL allocators.
40
41#if 0
42# include <new>
43# define __THROW_BAD_ALLOC throw bad_alloc
44#elif !defined(__THROW_BAD_ALLOC)
45# include <iostream.h>
46# define __THROW_BAD_ALLOC cerr << "out of memory" << endl; exit(1)
47#endif
48
49#ifndef __ALLOC
50# define __ALLOC alloc
51#endif
52#ifdef __STL_WIN32THREADS
53# include <windows.h>
54#endif
55
56#include <stddef.h>
57#include <stdlib.h>
58#include <string.h>
59#include <assert.h>
60#ifndef __RESTRICT
61# define __RESTRICT
62#endif
63
64#if !defined(__STL_PTHREADS) && !defined(_NOTHREADS) \
65 && !defined(__STL_SGI_THREADS) && !defined(__STL_WIN32THREADS)
66# define _NOTHREADS
67#endif
68
69# ifdef __STL_PTHREADS
70 // POSIX Threads
71 // This is dubious, since this is likely to be a high contention
72 // lock. Performance may not be adequate.
73# include <pthread.h>
74# define __NODE_ALLOCATOR_LOCK \
75 if (threads) pthread_mutex_lock(&__node_allocator_lock)
76# define __NODE_ALLOCATOR_UNLOCK \
77 if (threads) pthread_mutex_unlock(&__node_allocator_lock)
78# define __NODE_ALLOCATOR_THREADS true
79# define __VOLATILE volatile // Needed at -O3 on SGI
80# endif
81# ifdef __STL_WIN32THREADS
82 // The lock needs to be initialized by constructing an allocator
83 // objects of the right type. We do that here explicitly for alloc.
84# define __NODE_ALLOCATOR_LOCK \
85 EnterCriticalSection(&__node_allocator_lock)
86# define __NODE_ALLOCATOR_UNLOCK \
87 LeaveCriticalSection(&__node_allocator_lock)
88# define __NODE_ALLOCATOR_THREADS true
89# define __VOLATILE volatile // may not be needed
90# endif /* WIN32THREADS */
91# ifdef __STL_SGI_THREADS
92 // This should work without threads, with sproc threads, or with
93 // pthreads. It is suboptimal in all cases.
94 // It is unlikely to even compile on nonSGI machines.
95
96 extern "C" {
97 extern int __us_rsthread_malloc;
98 }
99 // The above is copied from malloc.h. Including <malloc.h>
100 // would be cleaner but fails with certain levels of standard
101 // conformance.
102# define __NODE_ALLOCATOR_LOCK if (threads && __us_rsthread_malloc) \
103 { __lock(&__node_allocator_lock); }
104# define __NODE_ALLOCATOR_UNLOCK if (threads && __us_rsthread_malloc) \
105 { __unlock(&__node_allocator_lock); }
106# define __NODE_ALLOCATOR_THREADS true
107# define __VOLATILE volatile // Needed at -O3 on SGI
108# endif
109# ifdef _NOTHREADS
110// Thread-unsafe
111# define __NODE_ALLOCATOR_LOCK
112# define __NODE_ALLOCATOR_UNLOCK
113# define __NODE_ALLOCATOR_THREADS false
114# define __VOLATILE
115# endif
116
117__STL_BEGIN_NAMESPACE
118
119#if defined(__sgi) && !defined(__GNUC__) && (_MIPS_SIM != _MIPS_SIM_ABI32)
120#pragma set woff 1174
121#endif
122
123// Malloc-based allocator. Typically slower than default alloc below.
124// Typically thread-safe and more storage efficient.
125#ifdef __STL_STATIC_TEMPLATE_MEMBER_BUG
126# ifdef __DECLARE_GLOBALS_HERE
127 void (* __malloc_alloc_oom_handler)() = 0;
128 // g++ 2.7.2 does not handle static template data members.
129# else
130 extern void (* __malloc_alloc_oom_handler)();
131# endif
132#endif
133
134template <int inst>
135class __malloc_alloc_template {
136
137private:
138
139static void *oom_malloc(size_t);
140
141static void *oom_realloc(void *, size_t);
142
143#ifndef __STL_STATIC_TEMPLATE_MEMBER_BUG
144 static void (* __malloc_alloc_oom_handler)();
145#endif
146
147public:
148
149static void * allocate(size_t n)
150{
151 void *result = malloc(n);
152 if (0 == result) result = oom_malloc(n);
153 return result;
154}
155
156static void deallocate(void *p, size_t /* n */)
157{
158 free(p);
159}
160
161static void * reallocate(void *p, size_t /* old_sz */, size_t new_sz)
162{
163 void * result = realloc(p, new_sz);
164 if (0 == result) result = oom_realloc(p, new_sz);
165 return result;
166}
167
168static void (* set_malloc_handler(void (*f)()))()
169{
170 void (* old)() = __malloc_alloc_oom_handler;
171 __malloc_alloc_oom_handler = f;
172 return(old);
173}
174
175};
176
177// malloc_alloc out-of-memory handling
178
179#ifndef __STL_STATIC_TEMPLATE_MEMBER_BUG
180template <int inst>
181void (* __malloc_alloc_template<inst>::__malloc_alloc_oom_handler)() = 0;
182#endif
183
184template <int inst>
185void * __malloc_alloc_template<inst>::oom_malloc(size_t n)
186{
187 void (* my_malloc_handler)();
188 void *result;
189
190 for (;;) {
191 my_malloc_handler = __malloc_alloc_oom_handler;
192 if (0 == my_malloc_handler) { __THROW_BAD_ALLOC; }
193 (*my_malloc_handler)();
194 result = malloc(n);
195 if (result) return(result);
196 }
197}
198
199template <int inst>
200void * __malloc_alloc_template<inst>::oom_realloc(void *p, size_t n)
201{
202 void (* my_malloc_handler)();
203 void *result;
204
205 for (;;) {
206 my_malloc_handler = __malloc_alloc_oom_handler;
207 if (0 == my_malloc_handler) { __THROW_BAD_ALLOC; }
208 (*my_malloc_handler)();
209 result = realloc(p, n);
210 if (result) return(result);
211 }
212}
213
214typedef __malloc_alloc_template<0> malloc_alloc;
215
216template<class T, class Alloc>
217class simple_alloc {
218
219public:
220 static T *allocate(size_t n)
221 { return 0 == n? 0 : (T*) Alloc::allocate(n * sizeof (T)); }
222 static T *allocate(void)
223 { return (T*) Alloc::allocate(sizeof (T)); }
224 static void deallocate(T *p, size_t n)
225 { if (0 != n) Alloc::deallocate(p, n * sizeof (T)); }
226 static void deallocate(T *p)
227 { Alloc::deallocate(p, sizeof (T)); }
228};
229
230// Allocator adaptor to check size arguments for debugging.
231// Reports errors using assert. Checking can be disabled with
232// NDEBUG, but it's far better to just use the underlying allocator
233// instead when no checking is desired.
234// There is some evidence that this can confuse Purify.
235template <class Alloc>
236class debug_alloc {
237
238private:
239
240enum {extra = 8}; // Size of space used to store size. Note
241 // that this must be large enough to preserve
242 // alignment.
243
244public:
245
246static void * allocate(size_t n)
247{
248 char *result = (char *)Alloc::allocate(n + extra);
249 *(size_t *)result = n;
250 return result + extra;
251}
252
253static void deallocate(void *p, size_t n)
254{
255 char * real_p = (char *)p - extra;
256 assert(*(size_t *)real_p == n);
257 Alloc::deallocate(real_p, n + extra);
258}
259
260static void * reallocate(void *p, size_t old_sz, size_t new_sz)
261{
262 char * real_p = (char *)p - extra;
263 assert(*(size_t *)real_p == old_sz);
264 char * result = (char *)
265 Alloc::reallocate(real_p, old_sz + extra, new_sz + extra);
266 *(size_t *)result = new_sz;
267 return result + extra;
268}
269
270
271};
272
273
274# ifdef __USE_MALLOC
275
276typedef malloc_alloc alloc;
277typedef malloc_alloc single_client_alloc;
278
279# else
280
281
282// Default node allocator.
283// With a reasonable compiler, this should be roughly as fast as the
284// original STL class-specific allocators, but with less fragmentation.
285// Default_alloc_template parameters are experimental and MAY
286// DISAPPEAR in the future. Clients should just use alloc for now.
287//
288// Important implementation properties:
289// 1. If the client request an object of size > __MAX_BYTES, the resulting
290// object will be obtained directly from malloc.
291// 2. In all other cases, we allocate an object of size exactly
292// ROUND_UP(requested_size). Thus the client has enough size
293// information that we can return the object to the proper free list
294// without permanently losing part of the object.
295//
296
297// The first template parameter specifies whether more than one thread
298// may use this allocator. It is safe to allocate an object from
299// one instance of a default_alloc and deallocate it with another
300// one. This effectively transfers its ownership to the second one.
301// This may have undesirable effects on reference locality.
302// The second parameter is unreferenced and serves only to allow the
303// creation of multiple default_alloc instances.
304// Node that containers built on different allocator instances have
305// different types, limiting the utility of this approach.
306#ifdef __SUNPRO_CC
307// breaks if we make these template class members:
308 enum {__ALIGN = 8};
309 enum {__MAX_BYTES = 128};
310 enum {__NFREELISTS = __MAX_BYTES/__ALIGN};
311#endif
312
313template <bool threads, int inst>
314class __default_alloc_template {
315
316private:
317 // Really we should use static const int x = N
318 // instead of enum { x = N }, but few compilers accept the former.
319# ifndef __SUNPRO_CC
320 enum {__ALIGN = 8};
321 enum {__MAX_BYTES = 128};
322 enum {__NFREELISTS = __MAX_BYTES/__ALIGN};
323# endif
324 static size_t ROUND_UP(size_t bytes) {
325 return (((bytes) + __ALIGN-1) & ~(__ALIGN - 1));
326 }
327__PRIVATE:
328 union obj {
329 union obj * free_list_link;
330 char client_data[1]; /* The client sees this. */
331 };
332private:
333# ifdef __SUNPRO_CC
334 static obj * __VOLATILE free_list[];
335 // Specifying a size results in duplicate def for 4.1
336# else
337 static obj * __VOLATILE free_list[__NFREELISTS];
338# endif
339 static size_t FREELIST_INDEX(size_t bytes) {
340 return (((bytes) + __ALIGN-1)/__ALIGN - 1);
341 }
342
343 // Returns an object of size n, and optionally adds to size n free list.
344 static void *refill(size_t n);
345 // Allocates a chunk for nobjs of size size. nobjs may be reduced
346 // if it is inconvenient to allocate the requested number.
347 static char *chunk_alloc(size_t size, int &nobjs);
348
349 // Chunk allocation state.
350 static char *start_free;
351 static char *end_free;
352 static size_t heap_size;
353
354# ifdef __STL_SGI_THREADS
355 static volatile unsigned long __node_allocator_lock;
356 static void __lock(volatile unsigned long *);
357 static inline void __unlock(volatile unsigned long *);
358# endif
359
360# ifdef __STL_PTHREADS
361 static pthread_mutex_t __node_allocator_lock;
362# endif
363
364# ifdef __STL_WIN32THREADS
365 static CRITICAL_SECTION __node_allocator_lock;
366 static bool __node_allocator_lock_initialized;
367
368 public:
369 __default_alloc_template() {
370 // This assumes the first constructor is called before threads
371 // are started.
372 if (!__node_allocator_lock_initialized) {
373 InitializeCriticalSection(&__node_allocator_lock);
374 __node_allocator_lock_initialized = true;
375 }
376 }
377 private:
378# endif
379
380 class lock {
381 public:
382 lock() { __NODE_ALLOCATOR_LOCK; }
383 ~lock() { __NODE_ALLOCATOR_UNLOCK; }
384 };
385 friend class lock;
386
387public:
388
389 /* n must be > 0 */
390 static void * allocate(size_t n)
391 {
392 obj * __VOLATILE * my_free_list;
393 obj * __RESTRICT result;
394
395 if (n > (size_t) __MAX_BYTES) {
396 return(malloc_alloc::allocate(n));
397 }
398 my_free_list = free_list + FREELIST_INDEX(n);
399 // Acquire the lock here with a constructor call.
400 // This ensures that it is released in exit or during stack
401 // unwinding.
402# ifndef _NOTHREADS
403 /*REFERENCED*/
404 lock lock_instance;
405# endif
406 result = *my_free_list;
407 if (result == 0) {
408 void *r = refill(ROUND_UP(n));
409 return r;
410 }
411 *my_free_list = result -> free_list_link;
412 return (result);
413 };
414
415 /* p may not be 0 */
416 static void deallocate(void *p, size_t n)
417 {
418 obj *q = (obj *)p;
419 obj * __VOLATILE * my_free_list;
420
421 if (n > (size_t) __MAX_BYTES) {
422 malloc_alloc::deallocate(p, n);
423 return;
424 }
425 my_free_list = free_list + FREELIST_INDEX(n);
426 // acquire lock
427# ifndef _NOTHREADS
428 /*REFERENCED*/
429 lock lock_instance;
430# endif /* _NOTHREADS */
431 q -> free_list_link = *my_free_list;
432 *my_free_list = q;
433 // lock is released here
434 }
435
436 static void * reallocate(void *p, size_t old_sz, size_t new_sz);
437
438} ;
439
440typedef __default_alloc_template<__NODE_ALLOCATOR_THREADS, 0> alloc;
441typedef __default_alloc_template<false, 0> single_client_alloc;
442
443
444
445/* We allocate memory in large chunks in order to avoid fragmenting */
446/* the malloc heap too much. */
447/* We assume that size is properly aligned. */
448/* We hold the allocation lock. */
449template <bool threads, int inst>
450char*
451__default_alloc_template<threads, inst>::chunk_alloc(size_t size, int& nobjs)
452{
453 char * result;
454 size_t total_bytes = size * nobjs;
455 size_t bytes_left = end_free - start_free;
456
457 if (bytes_left >= total_bytes) {
458 result = start_free;
459 start_free += total_bytes;
460 return(result);
461 } else if (bytes_left >= size) {
462 nobjs = bytes_left/size;
463 total_bytes = size * nobjs;
464 result = start_free;
465 start_free += total_bytes;
466 return(result);
467 } else {
468 size_t bytes_to_get = 2 * total_bytes + ROUND_UP(heap_size >> 4);
469 // Try to make use of the left-over piece.
470 if (bytes_left > 0) {
471 obj * __VOLATILE * my_free_list =
472 free_list + FREELIST_INDEX(bytes_left);
473
474 ((obj *)start_free) -> free_list_link = *my_free_list;
475 *my_free_list = (obj *)start_free;
476 }
477 start_free = (char *)malloc(bytes_to_get);
478 if (0 == start_free) {
479 int i;
480 obj * __VOLATILE * my_free_list, *p;
481 // Try to make do with what we have. That can't
482 // hurt. We do not try smaller requests, since that tends
483 // to result in disaster on multi-process machines.
484 for (i = size; i <= __MAX_BYTES; i += __ALIGN) {
485 my_free_list = free_list + FREELIST_INDEX(i);
486 p = *my_free_list;
487 if (0 != p) {
488 *my_free_list = p -> free_list_link;
489 start_free = (char *)p;
490 end_free = start_free + i;
491 return(chunk_alloc(size, nobjs));
492 // Any leftover piece will eventually make it to the
493 // right free list.
494 }
495 }
496 end_free = 0; // In case of exception.
497 start_free = (char *)malloc_alloc::allocate(bytes_to_get);
498 // This should either throw an
499 // exception or remedy the situation. Thus we assume it
500 // succeeded.
501 }
502 heap_size += bytes_to_get;
503 end_free = start_free + bytes_to_get;
504 return(chunk_alloc(size, nobjs));
505 }
506}
507
508
509/* Returns an object of size n, and optionally adds to size n free list.*/
510/* We assume that n is properly aligned. */
511/* We hold the allocation lock. */
512template <bool threads, int inst>
513void* __default_alloc_template<threads, inst>::refill(size_t n)
514{
515 int nobjs = 20;
516 char * chunk = chunk_alloc(n, nobjs);
517 obj * __VOLATILE * my_free_list;
518 obj * result;
519 obj * current_obj, * next_obj;
520 int i;
521
522 if (1 == nobjs) return(chunk);
523 my_free_list = free_list + FREELIST_INDEX(n);
524
525 /* Build free list in chunk */
526 result = (obj *)chunk;
527 *my_free_list = next_obj = (obj *)(chunk + n);
528 for (i = 1; ; i++) {
529 current_obj = next_obj;
530 next_obj = (obj *)((char *)next_obj + n);
531 if (nobjs - 1 == i) {
532 current_obj -> free_list_link = 0;
533 break;
534 } else {
535 current_obj -> free_list_link = next_obj;
536 }
537 }
538 return(result);
539}
540
541template <bool threads, int inst>
542void*
543__default_alloc_template<threads, inst>::reallocate(void *p,
544 size_t old_sz,
545 size_t new_sz)
546{
547 void * result;
548 size_t copy_sz;
549
550 if (old_sz > (size_t) __MAX_BYTES && new_sz > (size_t) __MAX_BYTES) {
551 return(realloc(p, new_sz));
552 }
553 if (ROUND_UP(old_sz) == ROUND_UP(new_sz)) return(p);
554 result = allocate(new_sz);
555 copy_sz = new_sz > old_sz? old_sz : new_sz;
556 memcpy(result, p, copy_sz);
557 deallocate(p, old_sz);
558 return(result);
559}
560
561#ifdef __STL_PTHREADS
562 template <bool threads, int inst>
563 pthread_mutex_t
564 __default_alloc_template<threads, inst>::__node_allocator_lock
565 = PTHREAD_MUTEX_INITIALIZER;
566#endif
567
568#ifdef __STL_WIN32THREADS
569 template <bool threads, int inst> CRITICAL_SECTION
570 __default_alloc_template<threads, inst>::__node_allocator_lock;
571
572 template <bool threads, int inst> bool
573 __default_alloc_template<threads, inst>::__node_allocator_lock_initialized
574 = false;
575#endif
576
577#ifdef __STL_SGI_THREADS
578__STL_END_NAMESPACE
579#include <mutex.h>
580#include <time.h>
581__STL_BEGIN_NAMESPACE
582// Somewhat generic lock implementations. We need only test-and-set
583// and some way to sleep. These should work with both SGI pthreads
584// and sproc threads. They may be useful on other systems.
585template <bool threads, int inst>
586volatile unsigned long
587__default_alloc_template<threads, inst>::__node_allocator_lock = 0;
588
589#if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64)) || defined(__GNUC__)
590# define __test_and_set(l,v) test_and_set(l,v)
591#endif
592
593template <bool threads, int inst>
594void
595__default_alloc_template<threads, inst>::__lock(volatile unsigned long *lock)
596{
597 const unsigned low_spin_max = 30; // spin cycles if we suspect uniprocessor
598 const unsigned high_spin_max = 1000; // spin cycles for multiprocessor
599 static unsigned spin_max = low_spin_max;
600 unsigned my_spin_max;
601 static unsigned last_spins = 0;
602 unsigned my_last_spins;
603 static struct timespec ts = {0, 1000};
604 unsigned junk;
605# define __ALLOC_PAUSE junk *= junk; junk *= junk; junk *= junk; junk *= junk
606 int i;
607
608 if (!__test_and_set((unsigned long *)lock, 1)) {
609 return;
610 }
611 my_spin_max = spin_max;
612 my_last_spins = last_spins;
613 for (i = 0; i < my_spin_max; i++) {
614 if (i < my_last_spins/2 || *lock) {
615 __ALLOC_PAUSE;
616 continue;
617 }
618 if (!__test_and_set((unsigned long *)lock, 1)) {
619 // got it!
620 // Spinning worked. Thus we're probably not being scheduled
621 // against the other process with which we were contending.
622 // Thus it makes sense to spin longer the next time.
623 last_spins = i;
624 spin_max = high_spin_max;
625 return;
626 }
627 }
628 // We are probably being scheduled against the other process. Sleep.
629 spin_max = low_spin_max;
630 for (;;) {
631 if (!__test_and_set((unsigned long *)lock, 1)) {
632 return;
633 }
634 nanosleep(&ts, 0);
635 }
636}
637
638template <bool threads, int inst>
639inline void
640__default_alloc_template<threads, inst>::__unlock(volatile unsigned long *lock)
641{
642# if defined(__GNUC__) && __mips >= 3
643 asm("sync");
644 *lock = 0;
645# elif __mips >= 3 && (defined (_ABIN32) || defined(_ABI64))
646 __lock_release(lock);
647# else
648 *lock = 0;
649 // This is not sufficient on many multiprocessors, since
650 // writes to protected variables and the lock may be reordered.
651# endif
652}
653#endif
654
655template <bool threads, int inst>
656char *__default_alloc_template<threads, inst>::start_free = 0;
657
658template <bool threads, int inst>
659char *__default_alloc_template<threads, inst>::end_free = 0;
660
661template <bool threads, int inst>
662size_t __default_alloc_template<threads, inst>::heap_size = 0;
663
664template <bool threads, int inst>
665__default_alloc_template<threads, inst>::obj * __VOLATILE
666__default_alloc_template<threads, inst> ::free_list[
667# ifdef __SUNPRO_CC
668 __NFREELISTS
669# else
670 __default_alloc_template<threads, inst>::__NFREELISTS
671# endif
672] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, };
673// The 16 zeros are necessary to make version 4.1 of the SunPro
674// compiler happy. Otherwise it appears to allocate too little
675// space for the array.
676
677# ifdef __STL_WIN32THREADS
678 // Create one to get critical section initialized.
679 // We do this onece per file, but only the first constructor
680 // does anything.
681 static alloc __node_allocator_dummy_instance;
682# endif
683
684#endif /* ! __USE_MALLOC */
685
686#if defined(__sgi) && !defined(__GNUC__) && (_MIPS_SIM != _MIPS_SIM_ABI32)
687#pragma reset woff 1174
688#endif
689
690__STL_END_NAMESPACE
691
692#undef __PRIVATE
693
694#endif /* __SGI_STL_INTERNAL_ALLOC_H */
695
696// Local Variables:
697// mode:C++
698// End:
Note: See TracBrowser for help on using the repository browser.