kicad/common/gal/opengl/cached_container.cpp

560 lines
16 KiB
C++
Raw Normal View History

/*
* This program source code file is part of KiCad, a free EDA CAD application.
*
* Copyright (C) 2013-2016 CERN
* @author Maciej Suminski <maciej.suminski@cern.ch>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you may find one here:
* http://www.gnu.org/licenses/old-licenses/gpl-2.0.html
* or you may search the http://www.gnu.org website for the version 2 license,
* or you may write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*/
/**
* @file cached_container.cpp
* @brief Class to store instances of VERTEX with caching. It allows storing VERTEX objects and
* associates them with VERTEX_ITEMs. This leads to a possibility of caching vertices data in the
* GPU memory and a fast reuse of that data.
*/
#include <gal/opengl/cached_container.h>
#include <gal/opengl/vertex_manager.h>
#include <gal/opengl/vertex_item.h>
#include <gal/opengl/shader.h>
#include <gal/opengl/utils.h>
#include <confirm.h>
#include <list>
2016-05-02 14:12:16 +00:00
#include <cassert>
#ifdef __WXDEBUG__
2016-05-02 14:12:16 +00:00
#include <wx/log.h>
#include <profile.h>
#endif /* __WXDEBUG__ */
using namespace KIGFX;
CACHED_CONTAINER::CACHED_CONTAINER( unsigned int aSize ) :
VERTEX_CONTAINER( aSize ), m_item( NULL ), m_isMapped( false ),
m_isInitialized( false ), m_glBufferHandle( -1 )
{
// In the beginning there is only free space
2013-10-14 18:40:36 +00:00
m_freeChunks.insert( CHUNK( aSize, 0 ) );
// Do not have uninitialized members:
m_chunkSize = 0;
m_chunkOffset = 0;
m_itemSize = 0;
}
CACHED_CONTAINER::~CACHED_CONTAINER()
{
if( m_isMapped )
Unmap();
if( m_isInitialized )
{
glDeleteBuffers( 1, &m_glBufferHandle );
}
}
void CACHED_CONTAINER::SetItem( VERTEX_ITEM* aItem )
{
2016-05-02 14:12:16 +00:00
assert( aItem != NULL );
2013-09-12 07:44:57 +00:00
m_item = aItem;
m_itemSize = m_item->GetSize();
m_chunkSize = m_itemSize;
2013-09-12 07:44:57 +00:00
if( m_itemSize == 0 )
m_items.insert( m_item ); // The item was not stored before
2013-07-17 16:49:38 +00:00
else
2013-09-12 07:44:57 +00:00
m_chunkOffset = m_item->GetOffset();
#if CACHED_CONTAINER_TEST > 1
wxLogDebug( wxT( "Adding/editing item 0x%08lx (size %d)" ), (long) m_item, m_itemSize );
#endif
}
void CACHED_CONTAINER::FinishItem()
{
2016-05-02 14:12:16 +00:00
assert( m_item != NULL );
assert( m_item->GetSize() == m_itemSize );
2013-09-12 07:44:57 +00:00
// Finishing the previously edited item
if( m_itemSize < m_chunkSize )
{
2013-09-12 07:44:57 +00:00
// There is some not used but reserved memory left, so we should return it to the pool
int itemOffset = m_item->GetOffset();
2013-09-12 07:44:57 +00:00
// Add the not used memory back to the pool
2016-05-02 14:12:16 +00:00
addFreeChunk( itemOffset + m_itemSize, m_chunkSize - m_itemSize );
2013-09-12 07:44:57 +00:00
// mergeFreeChunks(); // veery slow and buggy
}
2013-09-12 07:44:57 +00:00
#if CACHED_CONTAINER_TEST > 1
wxLogDebug( wxT( "Finishing item 0x%08lx (size %d)" ), (long) m_item, m_itemSize );
test();
m_item = NULL; // electric fence
2013-09-12 07:44:57 +00:00
#endif
}
VERTEX* CACHED_CONTAINER::Allocate( unsigned int aSize )
{
2016-05-02 14:12:16 +00:00
assert( m_item != NULL );
assert( m_isMapped );
if( m_failed )
return NULL;
if( m_itemSize + aSize > m_chunkSize )
{
// There is not enough space in the currently reserved chunk, so we have to resize it
// Reserve a bigger memory chunk for the current item and
// make it multiple of 3 to store triangles
m_chunkSize = ( 2 * m_itemSize ) + aSize + ( 3 - aSize % 3 );
// Save the current size before reallocating
m_chunkOffset = reallocate( m_chunkSize );
if( m_chunkOffset > m_currentSize )
{
m_failed = true;
return NULL;
}
}
VERTEX* reserved = &m_vertices[m_chunkOffset + m_itemSize];
m_itemSize += aSize;
2013-09-12 07:44:57 +00:00
// Now the item officially possesses the memory chunk
m_item->setSize( m_itemSize );
// The content has to be updated
m_dirty = true;
#if CACHED_CONTAINER_TEST > 1
test();
#endif
2013-09-12 07:44:57 +00:00
#if CACHED_CONTAINER_TEST > 2
showFreeChunks();
2016-05-02 14:12:16 +00:00
showUsedChunks();
2013-09-12 07:44:57 +00:00
#endif
return reserved;
}
2013-09-12 07:44:57 +00:00
void CACHED_CONTAINER::Delete( VERTEX_ITEM* aItem )
2013-07-17 16:49:38 +00:00
{
2016-05-02 14:12:16 +00:00
assert( aItem != NULL );
assert( m_items.find( aItem ) != m_items.end() );
2013-09-12 07:44:57 +00:00
int size = aItem->GetSize();
int offset = aItem->GetOffset();
2013-09-12 07:44:57 +00:00
#if CACHED_CONTAINER_TEST > 1
wxLogDebug( wxT( "Removing 0x%08lx (size %d offset %d)" ), (long) aItem, size, offset );
#endif
// Insert a free memory chunk entry in the place where item was stored
if( size > 0 )
{
2016-05-02 14:12:16 +00:00
addFreeChunk( offset, size );
2013-09-12 07:44:57 +00:00
// Indicate that the item is not stored in the container anymore
aItem->setSize( 0 );
}
2013-09-12 07:44:57 +00:00
m_items.erase( aItem );
#if CACHED_CONTAINER_TEST > 1
test();
#endif
2013-07-17 16:49:38 +00:00
// This dynamic memory freeing optimize memory usage, but in fact can create
// out of memory issues because freeing and reallocation large chuncks of memory
// can create memory fragmentation and no room to reallocate large chuncks
// after many free/reallocate cycles during a session using the same complex board
// So it can be disable.
// Currently: it is disable to avoid "out of memory" issues
#if 0
2013-07-17 16:49:38 +00:00
// Dynamic memory freeing, there is no point in holding
// a large amount of memory when there is no use for it
if( m_freeSpace > ( 0.75 * m_currentSize ) && m_currentSize > m_initialSize )
2013-07-17 16:49:38 +00:00
{
defragmentResize( 0.5 * m_currentSize );
2013-07-17 16:49:38 +00:00
}
#endif
2013-07-17 16:49:38 +00:00
}
void CACHED_CONTAINER::Clear()
{
Map();
glInvalidateBufferData( GL_ARRAY_BUFFER );
Unmap();
m_freeSpace = m_currentSize;
m_failed = false;
// Set the size of all the stored VERTEX_ITEMs to 0, so it is clear that they are not held
// in the container anymore
for( ITEMS::iterator it = m_items.begin(); it != m_items.end(); ++it )
{
( *it )->setSize( 0 );
}
m_items.clear();
2013-07-17 16:49:38 +00:00
// Now there is only free space left
m_freeChunks.clear();
2013-10-14 18:40:36 +00:00
m_freeChunks.insert( CHUNK( m_freeSpace, 0 ) );
}
2013-07-17 16:49:38 +00:00
void CACHED_CONTAINER::Map()
{
assert( !IsMapped() );
if( !m_isInitialized )
init();
glBindBuffer( GL_ARRAY_BUFFER, m_glBufferHandle );
m_vertices = static_cast<VERTEX*>( glMapBuffer( GL_ARRAY_BUFFER, GL_READ_WRITE ) );
checkGlError( "mapping vertices buffer" );
m_isMapped = true;
}
void CACHED_CONTAINER::Unmap()
{
assert( IsMapped() );
glUnmapBuffer( GL_ARRAY_BUFFER );
checkGlError( "unmapping vertices buffer" );
glBindBuffer( GL_ARRAY_BUFFER, 0 );
m_vertices = NULL;
checkGlError( "unbinding vertices buffer" );
m_isMapped = false;
}
void CACHED_CONTAINER::init()
{
glGenBuffers( 1, &m_glBufferHandle );
glBindBuffer( GL_COPY_WRITE_BUFFER, m_glBufferHandle );
glBufferData( GL_COPY_WRITE_BUFFER, m_currentSize * VertexSize, NULL, GL_DYNAMIC_DRAW );
glBindBuffer( GL_COPY_WRITE_BUFFER, 0 );
checkGlError( "allocating video memory for cached container" );
m_isInitialized = true;
}
unsigned int CACHED_CONTAINER::reallocate( unsigned int aSize )
{
2016-05-02 14:12:16 +00:00
assert( aSize > 0 );
assert( m_isMapped );
2013-09-12 07:44:57 +00:00
#if CACHED_CONTAINER_TEST > 2
2013-09-12 07:44:57 +00:00
wxLogDebug( wxT( "Resize 0x%08lx from %d to %d" ), (long) m_item, m_itemSize, aSize );
#endif
// Is there enough space to store vertices?
if( m_freeSpace < aSize )
{
bool result;
// Would it be enough to double the current space?
if( aSize < m_freeSpace + m_currentSize )
{
// Yes: exponential growing
result = defragmentResize( m_currentSize * 2 );
}
else
{
// No: grow to the nearest greater power of 2
result = defragmentResize( pow( 2, ceil( log2( m_currentSize * 2 + aSize ) ) ) );
}
if( !result )
return UINT_MAX;
}
2013-09-12 07:44:57 +00:00
// Look for the free space chunk of at least given size
2013-10-14 18:40:36 +00:00
FREE_CHUNK_MAP::iterator newChunk = m_freeChunks.lower_bound( aSize );
2013-07-17 16:49:38 +00:00
if( newChunk == m_freeChunks.end() )
{
2013-07-17 16:49:38 +00:00
// In the case when there is enough space to store the vertices,
// but the free space is not continous we should defragment the container
if( !defragmentResize( m_currentSize ) )
return UINT_MAX;
2013-07-17 16:49:38 +00:00
// Update the current offset
m_chunkOffset = m_item->GetOffset();
// We can take the first free chunk, as there is only one after defragmentation
// and we can be sure that it provides enough space to store the object
2013-07-17 16:49:38 +00:00
newChunk = m_freeChunks.begin();
}
2016-05-02 14:12:16 +00:00
// Parameters of the allocated chunk
unsigned int newChunkSize = newChunk->first;
unsigned int newChunkOffset = newChunk->second;
2016-05-02 14:12:16 +00:00
assert( newChunkSize >= aSize );
assert( newChunkOffset < m_currentSize );
2013-07-17 16:49:38 +00:00
// Check if the item was previously stored in the container
if( m_itemSize > 0 )
{
#if CACHED_CONTAINER_TEST > 3
wxLogDebug( wxT( "Moving 0x%08x from 0x%08x to 0x%08x" ),
2016-05-02 14:12:16 +00:00
(int) m_item, oldChunkOffset, newChunkOffset );
#endif
2013-07-17 16:49:38 +00:00
// The item was reallocated, so we have to copy all the old data to the new place
2016-05-02 14:12:16 +00:00
memcpy( &m_vertices[newChunkOffset], &m_vertices[m_chunkOffset], m_itemSize * VertexSize );
2013-07-17 16:49:38 +00:00
// Free the space previously used by the chunk
2016-05-02 14:12:16 +00:00
assert( m_itemSize > 0 );
addFreeChunk( m_chunkOffset, m_itemSize );
2013-07-17 16:49:38 +00:00
}
2013-07-17 16:49:38 +00:00
// Remove the allocated chunk from the free space pool
m_freeChunks.erase( newChunk );
// If there is some space left, return it to the pool - add an entry for it
2016-05-02 14:12:16 +00:00
if( newChunkSize > aSize )
{
2016-05-02 14:12:16 +00:00
m_freeChunks.insert( CHUNK( newChunkSize - aSize, newChunkOffset + aSize ) );
}
m_freeSpace -= aSize;
2013-09-12 07:44:57 +00:00
// mergeFreeChunks(); // veery slow and buggy
m_item->setOffset( newChunkOffset );
return newChunkOffset;
}
void CACHED_CONTAINER::mergeFreeChunks()
{
if( m_freeChunks.size() <= 1 ) // There are no chunks that can be merged
2013-07-17 16:49:38 +00:00
return;
#if CACHED_CONTAINER_TEST > 0
prof_counter totalTime;
prof_start( &totalTime );
2013-09-12 07:44:57 +00:00
#endif
2013-07-17 16:49:38 +00:00
// Reversed free chunks map - this one stores chunk size with its offset as the key
2013-10-14 18:40:36 +00:00
std::list<CHUNK> freeChunks;
2013-10-14 18:40:36 +00:00
FREE_CHUNK_MAP::const_iterator it, it_end;
2013-07-17 16:49:38 +00:00
for( it = m_freeChunks.begin(), it_end = m_freeChunks.end(); it != it_end; ++it )
{
freeChunks.push_back( std::make_pair( it->second, it->first ) );
}
2013-07-17 16:49:38 +00:00
m_freeChunks.clear();
freeChunks.sort();
2013-10-14 18:40:36 +00:00
std::list<CHUNK>::const_iterator itf, itf_end;
2013-07-17 16:49:38 +00:00
unsigned int offset = freeChunks.front().first;
unsigned int size = freeChunks.front().second;
freeChunks.pop_front();
2013-07-17 16:49:38 +00:00
for( itf = freeChunks.begin(), itf_end = freeChunks.end(); itf != itf_end; ++itf )
{
if( itf->first == offset + size )
{
// These chunks can be merged, so just increase the current chunk size and go on
size += itf->second;
}
else
{
// These chunks cannot be merged
// So store the previous one
m_freeChunks.insert( std::make_pair( size, offset ) );
// and let's check the next chunk
offset = itf->first;
size = itf->second;
2013-07-17 16:49:38 +00:00
}
}
// Add the last one
m_freeChunks.insert( std::make_pair( size, offset ) );
#if CACHED_CONTAINER_TEST > 0
prof_end( &totalTime );
wxLogDebug( wxT( "Merged free chunks / %.1f ms" ), totalTime.msecs() );
2013-09-12 07:44:57 +00:00
#endif
2016-05-02 14:12:16 +00:00
#if CACHED_CONTAINER_TEST > 1
test();
2016-05-02 14:12:16 +00:00
#endif
}
bool CACHED_CONTAINER::defragmentResize( unsigned int aNewSize )
{
#if CACHED_CONTAINER_TEST > 0
wxLogDebug( wxT( "Resizing container from %d to %d" ), m_currentSize, aNewSize );
#endif
// No shrinking if we cannot fit all the data
if( aNewSize < m_currentSize && usedSpace() > aNewSize )
return false;
#if CACHED_CONTAINER_TEST > 0
wxLogDebug( wxT( "Defragmenting" ) );
prof_counter totalTime;
prof_start( &totalTime );
#endif
GLuint newBuffer;
Unmap();
glGenBuffers( 1, &newBuffer );
glBindBuffer( GL_COPY_WRITE_BUFFER, newBuffer );
glBufferData( GL_COPY_WRITE_BUFFER, aNewSize * VertexSize, NULL, GL_DYNAMIC_DRAW );
glBindBuffer( GL_COPY_READ_BUFFER, m_glBufferHandle );
checkGlError( "resizing vertex buffer" );
int newOffset = 0;
ITEMS::iterator it, it_end;
2013-07-17 16:49:38 +00:00
for( it = m_items.begin(), it_end = m_items.end(); it != it_end; ++it )
{
VERTEX_ITEM* item = *it;
int itemOffset = item->GetOffset();
int itemSize = item->GetSize();
// Move an item to the new container
glCopyBufferSubData( GL_COPY_READ_BUFFER, GL_COPY_WRITE_BUFFER,
itemOffset * VertexSize, newOffset * VertexSize, itemSize * VertexSize );
// Update new offset
item->setOffset( newOffset );
// Move to the next free space
newOffset += itemSize;
}
glBindBuffer( GL_COPY_WRITE_BUFFER, 0 );
glBindBuffer( GL_COPY_READ_BUFFER, 0 );
glDeleteBuffers( 1, &m_glBufferHandle );
m_glBufferHandle = newBuffer;
checkGlError( "switching buffers during defragmentation" );
Map();
#if CACHED_CONTAINER_TEST > 0
prof_end( &totalTime );
wxLogDebug( wxT( "Defragmented the container storing %d vertices / %.1f ms" ),
m_currentSize - m_freeSpace, totalTime.msecs() );
#endif
m_freeSpace += ( aNewSize - m_currentSize );
m_currentSize = aNewSize;
// Now there is only one big chunk of free memory
m_freeChunks.clear();
m_freeChunks.insert( CHUNK( m_freeSpace, m_currentSize - m_freeSpace ) );
return true;
}
2013-07-17 16:49:38 +00:00
2016-05-02 14:12:16 +00:00
void CACHED_CONTAINER::addFreeChunk( unsigned int aOffset, unsigned int aSize )
{
assert( aOffset + aSize <= m_currentSize );
assert( aSize > 0 );
m_freeChunks.insert( CHUNK( aSize, aOffset ) );
m_freeSpace += aSize;
}
void CACHED_CONTAINER::showFreeChunks()
{
FREE_CHUNK_MAP::iterator it;
wxLogDebug( wxT( "Free chunks:" ) );
for( it = m_freeChunks.begin(); it != m_freeChunks.end(); ++it )
{
unsigned int offset = getChunkOffset( *it );
unsigned int size = getChunkSize( *it );
2016-05-02 14:12:16 +00:00
assert( size > 0 );
wxLogDebug( wxT( "[0x%08x-0x%08x] (size %d)" ),
offset, offset + size - 1, size );
}
}
2016-05-02 14:12:16 +00:00
void CACHED_CONTAINER::showUsedChunks()
{
ITEMS::iterator it;
2016-05-02 14:12:16 +00:00
wxLogDebug( wxT( "Used chunks:" ) );
for( it = m_items.begin(); it != m_items.end(); ++it )
{
VERTEX_ITEM* item = *it;
unsigned int offset = item->GetOffset();
unsigned int size = item->GetSize();
2016-05-02 14:12:16 +00:00
assert( size > 0 );
2013-09-12 07:44:57 +00:00
wxLogDebug( wxT( "[0x%08x-0x%08x] @ 0x%08lx (size %d)" ),
offset, offset + size - 1, (long) item, size );
}
}
void CACHED_CONTAINER::test()
{
// Free space check
unsigned int freeSpace = 0;
FREE_CHUNK_MAP::iterator itf;
for( itf = m_freeChunks.begin(); itf != m_freeChunks.end(); ++itf )
freeSpace += getChunkSize( *itf );
2013-07-17 16:49:38 +00:00
2016-05-02 14:12:16 +00:00
assert( freeSpace == m_freeSpace );
2016-05-02 14:12:16 +00:00
// Used space check
/*unsigned int usedSpace = 0;
ITEMS::iterator itr;
for( itr = m_items.begin(); itr != m_items.end(); ++itr )
2016-05-02 14:12:16 +00:00
usedSpace += ( *itr )->GetSize();
usedSpace += m_itemSize; // Add the current chunk size
2016-05-02 14:12:16 +00:00
assert( ( freeSpace + usedSpace ) == m_currentSize );*/
// Overlapping check TBD
2013-07-17 16:49:38 +00:00
}