Changeset 592 for trunk/src/3rdparty/javascriptcore
- Timestamp:
- Feb 23, 2010, 12:37:34 AM (15 years ago)
- Location:
- trunk/src/3rdparty/javascriptcore/JavaScriptCore
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/3rdparty/javascriptcore/JavaScriptCore/interpreter/RegisterFile.cpp
r561 r592 37 37 munmap(reinterpret_cast<char*>(m_buffer), ((m_max - m_start) + m_maxGlobals) * sizeof(Register)); 38 38 #elif HAVE(VIRTUALALLOC) 39 40 41 39 42 #if PLATFORM(WINCE) 40 43 VirtualFree(m_buffer, DWORD(m_commitEnd) - DWORD(m_buffer), MEM_DECOMMIT); 41 44 #endif 42 45 VirtualFree(m_buffer, 0, MEM_RELEASE); 46 43 47 #else 44 48 fastFree(m_buffer); … … 51 55 while (madvise(m_start, (m_max - m_start) * sizeof(Register), MADV_FREE) == -1 && errno == EAGAIN) { } 52 56 #elif HAVE(VIRTUALALLOC) 57 58 59 60 53 61 VirtualFree(m_start, (m_max - m_start) * sizeof(Register), MEM_DECOMMIT); 54 62 m_commitEnd = m_start; 63 55 64 #endif 56 65 m_maxUsed = m_start; -
trunk/src/3rdparty/javascriptcore/JavaScriptCore/interpreter/RegisterFile.h
r561 r592 42 42 #endif 43 43 44 45 46 47 48 44 49 namespace JSC { 45 50 … … 130 135 bool grow(Register* newEnd); 131 136 void shrink(Register* newEnd); 132 137 133 138 void setNumGlobals(size_t numGlobals) { m_numGlobals = numGlobals; } 134 139 int numGlobals() const { return m_numGlobals; } … … 136 141 137 142 Register* lastGlobal() const { return m_start - m_numGlobals; } 138 143 139 144 void markGlobals(MarkStack& markStack, Heap* heap) { heap->markConservatively(markStack, lastGlobal(), m_start); } 140 145 void markCallFrames(MarkStack& markStack, Heap* heap) { heap->markConservatively(markStack, m_start, m_end); } … … 185 190 } 186 191 #elif HAVE(VIRTUALALLOC) 192 193 194 195 196 197 198 199 200 201 202 203 204 205 187 206 m_buffer = static_cast<Register*>(VirtualAlloc(0, roundUpAllocationSize(bufferLength, commitSize), MEM_RESERVE, PAGE_READWRITE)); 188 207 if (!m_buffer) { … … 205 224 } 206 225 m_commitEnd = reinterpret_cast<Register*>(reinterpret_cast<char*>(m_buffer) + committedSize); 207 #else 208 /* 226 #endif 227 #else 228 /* 209 229 * If neither MMAP nor VIRTUALALLOC are available - use fastMalloc instead. 210 230 * … … 240 260 241 261 #if !HAVE(MMAP) && HAVE(VIRTUALALLOC) 262 263 264 265 266 267 268 269 270 271 272 242 273 if (newEnd > m_commitEnd) { 243 274 size_t size = roundUpAllocationSize(reinterpret_cast<char*>(newEnd) - reinterpret_cast<char*>(m_commitEnd), commitSize); … … 253 284 } 254 285 #endif 286 255 287 256 288 if (newEnd > m_maxUsed) -
trunk/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubs.cpp
r561 r592 68 68 namespace JSC { 69 69 70 #if PLATFORM(DARWIN) || PLATFORM(WIN_OS) 70 #if PLATFORM(DARWIN) || PLATFORM(WIN_OS) 71 71 #define SYMBOL_STRING(name) "_" #name 72 72 #else -
trunk/src/3rdparty/javascriptcore/JavaScriptCore/runtime/Collector.cpp
r561 r592 128 128 class Heap::Thread { 129 129 public: 130 Thread(pthread_t pthread, const PlatformThread& platThread, void* base) 130 Thread(pthread_t pthread, const PlatformThread& platThread, void* base) 131 131 : posixThread(pthread) 132 132 , platformThread(platThread) … … 172 172 } 173 173 #endif // PLATFORM(SYMBIAN) 174 174 175 175 memset(&primaryHeap, 0, sizeof(CollectorHeap)); 176 176 memset(&numberHeap, 0, sizeof(CollectorHeap)); … … 239 239 240 240 memset(reinterpret_cast<void*>(address), 0, BLOCK_SIZE); 241 242 243 241 244 #elif PLATFORM(WINCE) 242 245 void* address = VirtualAlloc(NULL, BLOCK_SIZE, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); … … 312 315 313 316 if (heap.numBlocks > MIN_ARRAY_SIZE && heap.usedBlocks < heap.numBlocks / LOW_WATER_FACTOR) { 314 heap.numBlocks = heap.numBlocks / GROWTH_FACTOR; 317 heap.numBlocks = heap.numBlocks / GROWTH_FACTOR; 315 318 heap.blocks = static_cast<CollectorBlock**>(fastRealloc(heap.blocks, heap.numBlocks * sizeof(CollectorBlock*))); 316 319 } … … 325 328 #elif PLATFORM(SYMBIAN) 326 329 userChunk->Free(reinterpret_cast<TAny*>(block)); 330 331 327 332 #elif PLATFORM(WINCE) 328 333 VirtualFree(block, 0, MEM_RELEASE); … … 361 366 // if a large value survives one garbage collection, there is not much point to 362 367 // collecting more frequently as long as it stays alive. 363 // NOTE: we target the primaryHeap unconditionally as JSNumber doesn't modify cost 368 // NOTE: we target the primaryHeap unconditionally as JSNumber doesn't modify cost 364 369 365 370 primaryHeap.extraCost += cost; … … 629 634 ); 630 635 return static_cast<void*>(pTib->StackBase); 636 637 638 639 631 640 #elif PLATFORM(QNX) 632 641 return currentThreadStackBaseQNX(); … … 819 828 } 820 829 } 821 830 822 831 // Mark the primary heap 823 832 for (size_t block = 0; block < usedPrimaryBlocks; block++) { … … 924 933 unsigned user_count = x86_THREAD_STATE64_COUNT; 925 934 thread_state_flavor_t flavor = x86_THREAD_STATE64; 926 #elif PLATFORM(PPC) 935 #elif PLATFORM(PPC) 927 936 unsigned user_count = PPC_THREAD_STATE_COUNT; 928 937 thread_state_flavor_t flavor = PPC_THREAD_STATE; … … 939 948 kern_return_t result = thread_get_state(platformThread, flavor, (thread_state_t)®s, &user_count); 940 949 if (result != KERN_SUCCESS) { 941 WTFReportFatalError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, 950 WTFReportFatalError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, 942 951 "JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported.", result); 943 952 CRASH(); … … 1024 1033 1025 1034 #ifndef NDEBUG 1026 // Forbid malloc during the mark phase. Marking a thread suspends it, so 1027 // a malloc inside markChildren() would risk a deadlock with a thread that had been 1035 // Forbid malloc during the mark phase. Marking a thread suspends it, so 1036 // a malloc inside markChildren() would risk a deadlock with a thread that had been 1028 1037 // suspended while holding the malloc lock. 1029 1038 fastMallocForbid(); … … 1108 1117 // SWEEP: delete everything with a zero refcount (garbage) and unmark everything else 1109 1118 CollectorHeap& heap = heapType == PrimaryHeap ? primaryHeap : numberHeap; 1110 1119 1111 1120 size_t emptyBlocks = 0; 1112 1121 size_t numLiveObjects = heap.numLiveObjects; 1113 1122 1114 1123 for (size_t block = 0; block < heap.usedBlocks; block++) { 1115 1124 Block* curBlock = reinterpret_cast<Block*>(heap.blocks[block]); 1116 1125 1117 1126 size_t usedCells = curBlock->usedCells; 1118 1127 Cell* freeList = curBlock->freeList; 1119 1128 1120 1129 if (usedCells == HeapConstants<heapType>::cellsPerBlock) { 1121 1130 // special case with a block where all cells are used -- testing indicates this happens often … … 1123 1132 if (!curBlock->marked.get(i >> HeapConstants<heapType>::bitmapShift)) { 1124 1133 Cell* cell = curBlock->cells + i; 1125 1134 1126 1135 if (heapType != NumberHeap) { 1127 1136 JSCell* imp = reinterpret_cast<JSCell*>(cell); 1128 1137 // special case for allocated but uninitialized object 1129 // (We don't need this check earlier because nothing prior this point 1138 // (We don't need this check earlier because nothing prior this point 1130 1139 // assumes the object has a valid vptr.) 1131 1140 if (cell->u.freeCell.zeroIfFree == 0) 1132 1141 continue; 1133 1142 1134 1143 imp->~JSCell(); 1135 1144 } 1136 1145 1137 1146 --usedCells; 1138 1147 --numLiveObjects; 1139 1148 1140 1149 // put cell on the free list 1141 1150 cell->u.freeCell.zeroIfFree = 0; … … 1158 1167 --usedCells; 1159 1168 --numLiveObjects; 1160 1169 1161 1170 // put cell on the free list 1162 1171 cell->u.freeCell.zeroIfFree = 0; 1163 cell->u.freeCell.next = freeList - (cell + 1); 1172 cell->u.freeCell.next = freeList - (cell + 1); 1164 1173 freeList = cell; 1165 1174 } … … 1167 1176 } 1168 1177 } 1169 1178 1170 1179 curBlock->usedCells = static_cast<uint32_t>(usedCells); 1171 1180 curBlock->freeList = freeList; 1172 1181 curBlock->marked.clearAll(); 1173 1182 1174 1183 if (!usedCells) 1175 1184 ++emptyBlocks; 1176 1185 } 1177 1186 1178 1187 if (heap.numLiveObjects != numLiveObjects) 1179 1188 heap.firstBlockWithPossibleSpace = 0; 1180 1189 1181 1190 heap.numLiveObjects = numLiveObjects; 1182 1191 heap.numLiveObjectsAtLastCollect = numLiveObjects; 1183 1192 heap.extraCost = 0; 1184 1193 1185 1194 if (!emptyBlocks) 1186 1195 return numLiveObjects; … … 1254 1263 } 1255 1264 1256 size_t Heap::objectCount() 1257 { 1258 return primaryHeap.numLiveObjects + numberHeap.numLiveObjects - m_globalData->smallStrings.count(); 1259 } 1260 1261 template <HeapType heapType> 1265 size_t Heap::objectCount() 1266 { 1267 return primaryHeap.numLiveObjects + numberHeap.numLiveObjects - m_globalData->smallStrings.count(); 1268 } 1269 1270 template <HeapType heapType> 1262 1271 static void addToStatistics(Heap::Statistics& statistics, const CollectorHeap& heap) 1263 1272 { -
trunk/src/3rdparty/javascriptcore/JavaScriptCore/runtime/Collector.h
r561 r592 120 120 JSGlobalData* globalData() const { return m_globalData; } 121 121 static bool isNumber(JSCell*); 122 122 123 123 // Iterators for the object heap. 124 124 iterator primaryHeapBegin(); … … 182 182 template<> struct CellSize<sizeof(uint64_t)> { static const size_t m_value = 64; }; 183 183 184 #if PLATFORM(WINCE) || PLATFORM(SYMBIAN) 184 #if PLATFORM(WINCE) || PLATFORM(SYMBIAN) 185 185 const size_t BLOCK_SIZE = 64 * 1024; // 64k 186 186 #else … … 201 201 const size_t BITMAP_SIZE = (CELLS_PER_BLOCK + 7) / 8; 202 202 const size_t BITMAP_WORDS = (BITMAP_SIZE + 3) / sizeof(uint32_t); 203 203 204 204 struct CollectorBitmap { 205 205 uint32_t bits[BITMAP_WORDS]; 206 bool get(size_t n) const { return !!(bits[n >> 5] & (1 << (n & 0x1F))); } 207 void set(size_t n) { bits[n >> 5] |= (1 << (n & 0x1F)); } 208 void clear(size_t n) { bits[n >> 5] &= ~(1 << (n & 0x1F)); } 206 bool get(size_t n) const { return !!(bits[n >> 5] & (1 << (n & 0x1F))); } 207 void set(size_t n) { bits[n >> 5] |= (1 << (n & 0x1F)); } 208 void clear(size_t n) { bits[n >> 5] &= ~(1 << (n & 0x1F)); } 209 209 void clearAll() { memset(bits, 0, sizeof(bits)); } 210 210 }; 211 211 212 212 struct CollectorCell { 213 213 union { … … 249 249 HeapType type; 250 250 }; 251 251 252 252 template <HeapType heapType> struct HeapConstants; 253 253 … … 295 295 inline void Heap::reportExtraMemoryCost(size_t cost) 296 296 { 297 if (cost > minExtraCostSize) 298 recordExtraCost(cost / (CELL_SIZE * 2)); 297 if (cost > minExtraCostSize) 298 recordExtraCost(cost / (CELL_SIZE * 2)); 299 299 } 300 300 -
trunk/src/3rdparty/javascriptcore/JavaScriptCore/runtime/MarkStackPosix.cpp
r561 r592 21 21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 24 */ 25 25 … … 55 55 void* MarkStack::allocateStack(size_t size) 56 56 { 57 #if defined (__SYMBIAN32__) 57 #if defined (__SYMBIAN32__) 58 58 return fastMalloc(size); 59 59 #else … … 63 63 void MarkStack::releaseStack(void* addr, size_t size) 64 64 { 65 #if defined (__SYMBIAN32__) 65 #if defined (__SYMBIAN32__) 66 66 fastFree(addr); 67 67 #else -
trunk/src/3rdparty/javascriptcore/JavaScriptCore/wtf/FastMalloc.cpp
r561 r592 2 2 // All rights reserved. 3 3 // Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved. 4 // 4 // 5 5 // Redistribution and use in source and binary forms, with or without 6 6 // modification, are permitted provided that the following conditions are 7 7 // met: 8 // 8 // 9 9 // * Redistributions of source code must retain the above copyright 10 10 // notice, this list of conditions and the following disclaimer. … … 16 16 // contributors may be used to endorse or promote products derived from 17 17 // this software without specific prior written permission. 18 // 18 // 19 19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 20 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT … … 177 177 #endif 178 178 179 void* fastZeroedMalloc(size_t n) 179 void* fastZeroedMalloc(size_t n) 180 180 { 181 181 void* result = fastMalloc(n); … … 183 183 return result; 184 184 } 185 186 TryMallocReturnValue tryFastZeroedMalloc(size_t n) 185 186 TryMallocReturnValue tryFastZeroedMalloc(size_t n) 187 187 { 188 188 void* result; … … 198 198 199 199 #include <stdlib.h> 200 #if !PLATFORM(WIN_OS) 200 #if PLATFORM(WIN_OS) 201 #include "windows.h" 202 #elif PLATFORM(OS2) 203 #define INCL_BASE 204 #include <os2.h> 205 #else 201 206 #include <pthread.h> 202 #else203 #include "windows.h"204 207 #endif 205 208 206 209 namespace WTF { 207 210 208 TryMallocReturnValue tryFastMalloc(size_t n) 211 TryMallocReturnValue tryFastMalloc(size_t n) 209 212 { 210 213 ASSERT(!isForbidden()); … … 227 230 } 228 231 229 void* fastMalloc(size_t n) 232 void* fastMalloc(size_t n) 230 233 { 231 234 ASSERT(!isForbidden()); … … 346 349 347 350 void releaseFastMallocFreeMemory() { } 348 351 349 352 FastMallocStatistics fastMallocStatistics() 350 353 { … … 422 425 } \ 423 426 using FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead::FLAGS_##name 424 427 425 428 #define DEFINE_int64(name, value, meaning) \ 426 429 DEFINE_VARIABLE(int64_t, name, value, meaning) 427 430 428 431 #define DEFINE_double(name, value, meaning) \ 429 432 DEFINE_VARIABLE(double, name, value, meaning) … … 1179 1182 #if defined(WTF_CHANGES) 1180 1183 #if PLATFORM(X86_64) 1181 // On all known X86-64 platforms, the upper 16 bits are always unused and therefore 1184 // On all known X86-64 platforms, the upper 16 bits are always unused and therefore 1182 1185 // can be excluded from the PageMap key. 1183 1186 // See http://en.wikipedia.org/wiki/X86-64#Virtual_address_space_details … … 1276 1279 return GetDescriptor(p); 1277 1280 } 1278 1281 1279 1282 size_t ReturnedBytes() const; 1280 1283 #endif … … 1361 1364 } 1362 1365 } 1363 1366 1364 1367 // Allocate a large span of length == n. If successful, returns a 1365 1368 // span of exactly the specified length. Else, returns NULL. … … 1377 1380 // Index of last free list we scavenged 1378 1381 size_t scavenge_index_; 1379 1382 1380 1383 #if defined(WTF_CHANGES) && PLATFORM(DARWIN) 1381 1384 friend class FastMallocZone; … … 1443 1446 } 1444 1447 1445 void TCMalloc_PageHeap::scavenge() 1448 void TCMalloc_PageHeap::scavenge() 1446 1449 { 1447 1450 // If we have to commit memory in the last 5 seconds, it means we don't have enough free committed pages … … 1456 1459 if (!DLL_IsEmpty(&slist->normal)) { 1457 1460 // Release the last span on the normal portion of this list 1458 Span* s = slist->normal.prev; 1461 Span* s = slist->normal.prev; 1459 1462 // Only decommit up to a fraction of the free committed pages if pages_allocated_since_last_scavenge_ > 0. 1460 1463 if ((pagesDecommitted + s->length) * kMaxScavengeAmountFactor > free_committed_pages_) … … 1478 1481 } 1479 1482 1480 inline bool TCMalloc_PageHeap::shouldContinueScavenging() const 1481 { 1482 return free_committed_pages_ > kMinimumFreeCommittedPageCount; 1483 inline bool TCMalloc_PageHeap::shouldContinueScavenging() const 1484 { 1485 return free_committed_pages_ > kMinimumFreeCommittedPageCount; 1483 1486 } 1484 1487 … … 1800 1803 } 1801 1804 } 1802 1805 1803 1806 #ifdef WTF_CHANGES 1804 1807 size_t TCMalloc_PageHeap::ReturnedBytes() const { … … 1809 1812 result += r_pages << kPageShift; 1810 1813 } 1811 1814 1812 1815 for (Span* s = large_.returned.next; s != &large_.returned; s = s->next) 1813 1816 result += s->length << kPageShift; … … 2289 2292 ::Sleep(seconds * 1000); 2290 2293 } 2294 2295 2296 2297 2298 2291 2299 #endif 2292 2300 … … 2880 2888 #endif 2881 2889 tsd_inited = true; 2882 2890 2883 2891 #if !COMPILER(MSVC) 2884 2892 // We may have used a fake pthread_t for the main thread. Fix it. … … 3584 3592 3585 3593 #ifndef WTF_CHANGES 3586 extern "C" 3594 extern "C" 3587 3595 #else 3588 3596 #define do_malloc do_malloc<crashOnFailure> … … 3626 3634 3627 3635 #ifndef WTF_CHANGES 3628 extern "C" 3636 extern "C" 3629 3637 #endif 3630 3638 void free(void* ptr) { … … 3647 3655 3648 3656 #ifndef WTF_CHANGES 3649 extern "C" 3657 extern "C" 3650 3658 #else 3651 3659 template <bool crashOnFailure> … … 3667 3675 void* calloc(size_t n, size_t elem_size) { 3668 3676 size_t totalBytes = n * elem_size; 3669 3677 3670 3678 // Protect against overflow 3671 3679 if (n > 1 && elem_size && (totalBytes / elem_size) != n) … … 3700 3708 #ifndef WTF_CHANGES 3701 3709 #ifndef WTF_CHANGES 3702 extern "C" 3710 extern "C" 3703 3711 #endif 3704 3712 void cfree(void* ptr) { … … 3711 3719 3712 3720 #ifndef WTF_CHANGES 3713 extern "C" 3721 extern "C" 3714 3722 #else 3715 3723 template <bool crashOnFailure> … … 4353 4361 pageheap->ReleaseFreePages(); 4354 4362 } 4355 4363 4356 4364 FastMallocStatistics fastMallocStatistics() 4357 4365 { -
trunk/src/3rdparty/javascriptcore/JavaScriptCore/wtf/Platform.h
r561 r592 58 58 #endif 59 59 60 61 62 63 64 65 66 60 67 /* PLATFORM(WINCE) */ 61 68 /* Operating system level dependencies for Windows CE that should be used */ … … 224 231 /* Makes PLATFORM(WIN) default to PLATFORM(CAIRO) */ 225 232 /* FIXME: This should be changed from a blacklist to a whitelist */ 226 #if !PLATFORM(MAC) && !PLATFORM(QT) && !PLATFORM(WX) && !PLATFORM(CHROMIUM) && !PLATFORM(WINCE) && !PLATFORM(HAIKU) 233 #if !PLATFORM(MAC) && !PLATFORM(QT) && !PLATFORM(WX) && !PLATFORM(CHROMIUM) && !PLATFORM(WINCE) && !PLATFORM(HAIKU) 227 234 #define WTF_PLATFORM_CAIRO 1 228 235 #endif … … 562 569 #if !PLATFORM(WIN_OS) && !PLATFORM(SOLARIS) && !PLATFORM(QNX) \ 563 570 && !PLATFORM(SYMBIAN) && !PLATFORM(HAIKU) && !COMPILER(RVCT) && !PLATFORM(AIX) \ 564 && !PLATFORM(HPUX) 571 && !PLATFORM(HPUX) 565 572 #define HAVE_TM_GMTOFF 1 566 573 #define HAVE_TM_ZONE 1 567 574 #define HAVE_TIMEGM 1 568 #endif 575 #endif 569 576 570 577 #if PLATFORM(DARWIN) … … 599 606 #endif 600 607 #define HAVE_VIRTUALALLOC 1 608 609 610 611 612 613 614 615 616 601 617 602 618 #elif PLATFORM(SYMBIAN) -
trunk/src/3rdparty/javascriptcore/JavaScriptCore/wtf/TCSystemAlloc.cpp
r561 r592 1 1 // Copyright (c) 2005, 2007, Google Inc. 2 2 // All rights reserved. 3 // 3 // 4 4 // Redistribution and use in source and binary forms, with or without 5 5 // modification, are permitted provided that the following conditions are 6 6 // met: 7 // 7 // 8 8 // * Redistributions of source code must retain the above copyright 9 9 // notice, this list of conditions and the following disclaimer. … … 15 15 // contributors may be used to endorse or promote products derived from 16 16 // this software without specific prior written permission. 17 // 17 // 18 18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT … … 50 50 #if PLATFORM(WIN_OS) 51 51 #include "windows.h" 52 53 54 52 55 #else 53 56 #include <errno.h> … … 90 93 #if HAVE(MMAP) 91 94 static bool use_mmap = true; 92 #endif 95 #endif 93 96 94 97 #if HAVE(VIRTUALALLOC) … … 118 121 static void* TrySbrk(size_t size, size_t *actual_size, size_t alignment) { 119 122 size = ((size + alignment - 1) / alignment) * alignment; 120 123 121 124 // could theoretically return the "extra" bytes here, but this 122 125 // is simple and correct. 123 if (actual_size) 126 if (actual_size) 124 127 *actual_size = size; 125 128 126 129 void* result = sbrk(size); 127 130 if (result == reinterpret_cast<void*>(-1)) { … … 165 168 if (alignment < pagesize) alignment = pagesize; 166 169 size = ((size + alignment - 1) / alignment) * alignment; 167 170 168 171 // could theoretically return the "extra" bytes here, but this 169 172 // is simple and correct. 170 if (actual_size) 173 if (actual_size) 171 174 *actual_size = size; 172 175 173 176 // Ask for extra memory if alignment > pagesize 174 177 size_t extra = 0; … … 211 214 // Enforce page alignment 212 215 if (pagesize == 0) { 216 217 218 213 219 SYSTEM_INFO system_info; 214 220 GetSystemInfo(&system_info); 215 221 pagesize = system_info.dwPageSize; 222 216 223 } 217 224 … … 221 228 // could theoretically return the "extra" bytes here, but this 222 229 // is simple and correct. 223 if (actual_size) 230 if (actual_size) 224 231 *actual_size = size; 225 232 226 233 // Ask for extra memory if alignment > pagesize 227 234 size_t extra = 0; … … 229 236 extra = alignment - pagesize; 230 237 } 238 239 240 241 231 242 void* result = VirtualAlloc(NULL, size + extra, 232 MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN, 243 MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN, 233 244 PAGE_READWRITE); 245 234 246 235 247 if (result == NULL) { … … 248 260 // is decommit, since Windows only lets you free the whole allocation. 249 261 if (adjust > 0) { 262 263 264 250 265 VirtualFree(reinterpret_cast<void*>(ptr), adjust, MEM_DECOMMIT); 266 251 267 } 252 268 if (adjust < extra) { 269 270 271 253 272 VirtualFree(reinterpret_cast<void*>(ptr + adjust + size), extra-adjust, MEM_DECOMMIT); 273 254 274 } 255 275 … … 258 278 } 259 279 260 #endif /* HAVE( MMAP) */280 #endif /* HAVE() */ 261 281 262 282 #ifndef WTF_CHANGES … … 266 286 static off_t physmem_limit; // maximum physical address allowed 267 287 static int physmem_fd; // file descriptor for /dev/mem 268 288 269 289 // Check if we should use /dev/mem allocation. Note that it may take 270 290 // a while to get this flag initialized, so meanwhile we fall back to … … 276 296 return NULL; 277 297 } 278 298 279 299 if (!initialized) { 280 300 physmem_fd = open("/dev/mem", O_RDWR); … … 287 307 initialized = true; 288 308 } 289 309 290 310 // Enforce page alignment 291 311 if (pagesize == 0) pagesize = getpagesize(); 292 312 if (alignment < pagesize) alignment = pagesize; 293 313 size = ((size + alignment - 1) / alignment) * alignment; 294 314 295 315 // could theoretically return the "extra" bytes here, but this 296 316 // is simple and correct. 297 317 if (actual_size) 298 318 *actual_size = size; 299 319 300 320 // Ask for extra memory if alignment > pagesize 301 321 size_t extra = 0; … … 303 323 extra = alignment - pagesize; 304 324 } 305 325 306 326 // check to see if we have any memory left 307 327 if (physmem_limit != 0 && physmem_base + size + extra > physmem_limit) { … … 316 336 } 317 337 uintptr_t ptr = reinterpret_cast<uintptr_t>(result); 318 338 319 339 // Adjust the return memory so it is aligned 320 340 size_t adjust = 0; … … 322 342 adjust = alignment - (ptr & (alignment - 1)); 323 343 } 324 344 325 345 // Return the unused virtual memory to the system 326 346 if (adjust > 0) { … … 330 350 munmap(reinterpret_cast<char*>(ptr + adjust + size), extra - adjust); 331 351 } 332 352 333 353 ptr += adjust; 334 354 physmem_base += adjust + size; 335 355 336 356 return reinterpret_cast<void*>(ptr); 337 357 } … … 341 361 // Discard requests that overflow 342 362 if (size + alignment < size) return NULL; 343 363 344 364 SpinLockHolder lock_holder(&spinlock); 345 365 … … 357 377 } 358 378 #endif 359 379 360 380 #if HAVE(SBRK) 361 381 if (use_sbrk && !sbrk_failure) { … … 365 385 #endif 366 386 367 #if HAVE(MMAP) 387 #if HAVE(MMAP) 368 388 if (use_mmap && !mmap_failure) { 369 389 void* result = TryMmap(size, actual_size, alignment); … … 452 472 void TCMalloc_SystemRelease(void* start, size_t length) 453 473 { 474 475 476 477 454 478 if (VirtualFree(start, length, MEM_DECOMMIT)) 455 479 return; 480 456 481 457 482 // The decommit may fail if the memory region consists of allocations … … 492 517 void TCMalloc_SystemCommit(void* start, size_t length) 493 518 { 519 520 521 522 494 523 if (VirtualAlloc(start, length, MEM_COMMIT, PAGE_READWRITE) == start) 495 524 return; 525 496 526 497 527 // The commit may fail if the memory region consists of allocations
Note:
See TracChangeset
for help on using the changeset viewer.