Create a base GLSL front-end from the 3Dlabs glslang front-end from 20-Sep-2005.
git-svn-id: https://cvs.khronos.org/svn/repos/ogl/trunk/ecosystem/public/sdk/tools/glslang@19944 e7fa87d3-cd2b-0410-9028-fcbf551c1848
This commit is contained in:
commit
a0af473a8b
80 changed files with 21238 additions and 0 deletions
342
glslang/MachineIndependent/PoolAlloc.cpp
Normal file
342
glslang/MachineIndependent/PoolAlloc.cpp
Normal file
|
|
@ -0,0 +1,342 @@
|
|||
//
|
||||
//Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
|
||||
//All rights reserved.
|
||||
//
|
||||
//Redistribution and use in source and binary forms, with or without
|
||||
//modification, are permitted provided that the following conditions
|
||||
//are met:
|
||||
//
|
||||
// Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
//
|
||||
// Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following
|
||||
// disclaimer in the documentation and/or other materials provided
|
||||
// with the distribution.
|
||||
//
|
||||
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived
|
||||
// from this software without specific prior written permission.
|
||||
//
|
||||
//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
||||
//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
||||
//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
||||
//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
||||
//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
//POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
|
||||
#include "../Include/PoolAlloc.h"
|
||||
#include "../Include/Common.h"
|
||||
|
||||
#include "Include/InitializeGlobals.h"
|
||||
#include "osinclude.h"
|
||||
|
||||
OS_TLSIndex PoolIndex;
|
||||
|
||||
void InitializeGlobalPools()
|
||||
{
|
||||
TThreadGlobalPools* globalPools= static_cast<TThreadGlobalPools*>(OS_GetTLSValue(PoolIndex));
|
||||
if (globalPools)
|
||||
return;
|
||||
|
||||
TPoolAllocator *globalPoolAllocator = new TPoolAllocator(true);
|
||||
|
||||
TThreadGlobalPools* threadData = new TThreadGlobalPools();
|
||||
|
||||
threadData->globalPoolAllocator = globalPoolAllocator;
|
||||
|
||||
OS_SetTLSValue(PoolIndex, threadData);
|
||||
globalPoolAllocator->push();
|
||||
}
|
||||
|
||||
void FreeGlobalPools()
|
||||
{
|
||||
// Release the allocated memory for this thread.
|
||||
TThreadGlobalPools* globalPools= static_cast<TThreadGlobalPools*>(OS_GetTLSValue(PoolIndex));
|
||||
if (!globalPools)
|
||||
return;
|
||||
|
||||
GlobalPoolAllocator.popAll();
|
||||
delete &GlobalPoolAllocator;
|
||||
delete globalPools;
|
||||
}
|
||||
|
||||
bool InitializePoolIndex()
|
||||
{
|
||||
// Allocate a TLS index.
|
||||
if ((PoolIndex = OS_AllocTLSIndex()) == OS_INVALID_TLS_INDEX)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void FreePoolIndex()
|
||||
{
|
||||
// Release the TLS index.
|
||||
OS_FreeTLSIndex(PoolIndex);
|
||||
}
|
||||
|
||||
TPoolAllocator& GetGlobalPoolAllocator()
|
||||
{
|
||||
TThreadGlobalPools* threadData = static_cast<TThreadGlobalPools*>(OS_GetTLSValue(PoolIndex));
|
||||
|
||||
return *threadData->globalPoolAllocator;
|
||||
}
|
||||
|
||||
void SetGlobalPoolAllocatorPtr(TPoolAllocator* poolAllocator)
|
||||
{
|
||||
TThreadGlobalPools* threadData = static_cast<TThreadGlobalPools*>(OS_GetTLSValue(PoolIndex));
|
||||
|
||||
threadData->globalPoolAllocator = poolAllocator;
|
||||
}
|
||||
|
||||
//
|
||||
// Implement the functionality of the TPoolAllocator class, which
|
||||
// is documented in PoolAlloc.h.
|
||||
//
|
||||
TPoolAllocator::TPoolAllocator(bool g, int growthIncrement, int allocationAlignment) :
|
||||
global(g),
|
||||
pageSize(growthIncrement),
|
||||
alignment(allocationAlignment),
|
||||
freeList(0),
|
||||
inUseList(0),
|
||||
numCalls(0)
|
||||
{
|
||||
//
|
||||
// Don't allow page sizes we know are smaller than all common
|
||||
// OS page sizes.
|
||||
//
|
||||
if (pageSize < 4*1024)
|
||||
pageSize = 4*1024;
|
||||
|
||||
//
|
||||
// A large currentPageOffset indicates a new page needs to
|
||||
// be obtained to allocate memory.
|
||||
//
|
||||
currentPageOffset = pageSize;
|
||||
|
||||
//
|
||||
// Adjust alignment to be at least pointer aligned and
|
||||
// power of 2.
|
||||
//
|
||||
size_t minAlign = sizeof(void*);
|
||||
alignment &= ~(minAlign - 1);
|
||||
if (alignment < minAlign)
|
||||
alignment = minAlign;
|
||||
size_t a = 1;
|
||||
while (a < alignment)
|
||||
a <<= 1;
|
||||
alignment = a;
|
||||
alignmentMask = a - 1;
|
||||
|
||||
//
|
||||
// Align header skip
|
||||
//
|
||||
headerSkip = minAlign;
|
||||
if (headerSkip < sizeof(tHeader)) {
|
||||
headerSkip = (sizeof(tHeader) + alignmentMask) & ~alignmentMask;
|
||||
}
|
||||
}
|
||||
|
||||
TPoolAllocator::~TPoolAllocator()
|
||||
{
|
||||
if (!global) {
|
||||
//
|
||||
// Then we know that this object is not being
|
||||
// allocated after other, globally scoped objects
|
||||
// that depend on it. So we can delete the "in use" memory.
|
||||
//
|
||||
while (inUseList) {
|
||||
tHeader* next = inUseList->nextPage;
|
||||
inUseList->~tHeader();
|
||||
delete [] reinterpret_cast<char*>(inUseList);
|
||||
inUseList = next;
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Always delete the free list memory - it can't be being
|
||||
// (correctly) referenced, whether the pool allocator was
|
||||
// global or not. We should not check the guard blocks
|
||||
// here, because we did it already when the block was
|
||||
// placed into the free list.
|
||||
//
|
||||
while (freeList) {
|
||||
tHeader* next = freeList->nextPage;
|
||||
delete [] reinterpret_cast<char*>(freeList);
|
||||
freeList = next;
|
||||
}
|
||||
}
|
||||
|
||||
// Support MSVC++ 6.0
|
||||
const unsigned char TAllocation::guardBlockBeginVal = 0xfb;
|
||||
const unsigned char TAllocation::guardBlockEndVal = 0xfe;
|
||||
const unsigned char TAllocation::userDataFill = 0xcd;
|
||||
|
||||
# ifdef GUARD_BLOCKS
|
||||
const size_t TAllocation::guardBlockSize = 16;
|
||||
# else
|
||||
const size_t TAllocation::guardBlockSize = 0;
|
||||
# endif
|
||||
|
||||
//
|
||||
// Check a single guard block for damage
|
||||
//
|
||||
void TAllocation::checkGuardBlock(unsigned char* blockMem, unsigned char val, char* locText) const
|
||||
{
|
||||
for (int x = 0; x < guardBlockSize; x++) {
|
||||
if (blockMem[x] != val) {
|
||||
char assertMsg[80];
|
||||
|
||||
// We don't print the assert message. It's here just to be helpful.
|
||||
sprintf(assertMsg, "PoolAlloc: Damage %s %lu byte allocation at 0x%p\n",
|
||||
locText, size, data());
|
||||
assert(0 && "PoolAlloc: Damage in guard block");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void TPoolAllocator::push()
|
||||
{
|
||||
tAllocState state = { currentPageOffset, inUseList };
|
||||
|
||||
stack.push_back(state);
|
||||
|
||||
//
|
||||
// Indicate there is no current page to allocate from.
|
||||
//
|
||||
currentPageOffset = pageSize;
|
||||
}
|
||||
|
||||
//
|
||||
// Do a mass-deallocation of all the individual allocations
|
||||
// that have occurred since the last push(), or since the
|
||||
// last pop(), or since the object's creation.
|
||||
//
|
||||
// The deallocated pages are saved for future allocations.
|
||||
//
|
||||
void TPoolAllocator::pop()
|
||||
{
|
||||
if (stack.size() < 1)
|
||||
return;
|
||||
|
||||
tHeader* page = stack.back().page;
|
||||
currentPageOffset = stack.back().offset;
|
||||
|
||||
while (inUseList != page) {
|
||||
// invoke destructor to free allocation list
|
||||
inUseList->~tHeader();
|
||||
|
||||
tHeader* nextInUse = inUseList->nextPage;
|
||||
if (inUseList->pageCount > 1)
|
||||
delete [] reinterpret_cast<char*>(inUseList);
|
||||
else {
|
||||
inUseList->nextPage = freeList;
|
||||
freeList = inUseList;
|
||||
}
|
||||
inUseList = nextInUse;
|
||||
}
|
||||
|
||||
stack.pop_back();
|
||||
}
|
||||
|
||||
//
|
||||
// Do a mass-deallocation of all the individual allocations
|
||||
// that have occurred.
|
||||
//
|
||||
void TPoolAllocator::popAll()
|
||||
{
|
||||
while (stack.size() > 0)
|
||||
pop();
|
||||
}
|
||||
|
||||
void* TPoolAllocator::allocate(size_t numBytes)
|
||||
{
|
||||
// If we are using guard blocks, all allocations are bracketed by
|
||||
// them: [guardblock][allocation][guardblock]. numBytes is how
|
||||
// much memory the caller asked for. allocationSize is the total
|
||||
// size including guard blocks. In release build,
|
||||
// guardBlockSize=0 and this all gets optimized away.
|
||||
size_t allocationSize = TAllocation::allocationSize(numBytes);
|
||||
|
||||
//
|
||||
// Just keep some interesting statistics.
|
||||
//
|
||||
++numCalls;
|
||||
totalBytes += numBytes;
|
||||
|
||||
//
|
||||
// Do the allocation, most likely case first, for efficiency.
|
||||
// This step could be moved to be inline sometime.
|
||||
//
|
||||
if (currentPageOffset + allocationSize <= pageSize) {
|
||||
//
|
||||
// Safe to allocate from currentPageOffset.
|
||||
//
|
||||
unsigned char* memory = reinterpret_cast<unsigned char *>(inUseList) + currentPageOffset;
|
||||
currentPageOffset += allocationSize;
|
||||
currentPageOffset = (currentPageOffset + alignmentMask) & ~alignmentMask;
|
||||
|
||||
return initializeAllocation(inUseList, memory, numBytes);
|
||||
}
|
||||
|
||||
if (allocationSize + headerSkip > pageSize) {
|
||||
//
|
||||
// Do a multi-page allocation. Don't mix these with the others.
|
||||
// The OS is efficient and allocating and free-ing multiple pages.
|
||||
//
|
||||
size_t numBytesToAlloc = allocationSize + headerSkip;
|
||||
tHeader* memory = reinterpret_cast<tHeader*>(::new char[numBytesToAlloc]);
|
||||
if (memory == 0)
|
||||
return 0;
|
||||
|
||||
// Use placement-new to initialize header
|
||||
new(memory) tHeader(inUseList, (numBytesToAlloc + pageSize - 1) / pageSize);
|
||||
inUseList = memory;
|
||||
|
||||
currentPageOffset = pageSize; // make next allocation come from a new page
|
||||
|
||||
// No guard blocks for multi-page allocations (yet)
|
||||
return reinterpret_cast<void*>(reinterpret_cast<UINT_PTR>(memory) + headerSkip);
|
||||
}
|
||||
|
||||
//
|
||||
// Need a simple page to allocate from.
|
||||
//
|
||||
tHeader* memory;
|
||||
if (freeList) {
|
||||
memory = freeList;
|
||||
freeList = freeList->nextPage;
|
||||
} else {
|
||||
memory = reinterpret_cast<tHeader*>(::new char[pageSize]);
|
||||
if (memory == 0)
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Use placement-new to initialize header
|
||||
new(memory) tHeader(inUseList, 1);
|
||||
inUseList = memory;
|
||||
|
||||
unsigned char* ret = reinterpret_cast<unsigned char *>(inUseList) + headerSkip;
|
||||
currentPageOffset = (headerSkip + allocationSize + alignmentMask) & ~alignmentMask;
|
||||
|
||||
return initializeAllocation(inUseList, ret, numBytes);
|
||||
}
|
||||
|
||||
|
||||
//
|
||||
// Check all allocations in a list for damage by calling check on each.
|
||||
//
|
||||
void TAllocation::checkAllocList() const
|
||||
{
|
||||
for (const TAllocation* alloc = this; alloc != 0; alloc = alloc->prevAlloc)
|
||||
alloc->check();
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue