• R/O
  • SSH
  • HTTPS

eirrepo: Commit


Commit MetaInfo

Revision117 (tree)
Time2018-09-06 20:41:32
Authorquiret

Log Message

- refactored code
- fixed a bug where the free-bytes region of growing memory islands was not updated
- added & improved some unit tests

Change Summary

Incremental Difference

--- common/sdk/OSUtils.memheap.h (revision 116)
+++ common/sdk/OSUtils.memheap.h (revision 117)
@@ -233,61 +233,11 @@
233233 // We can only fail if the allocation does not fit with regards to the remaining free space.
234234 // Or the required data size is zero (makes no sense!)
235235
236- if ( newSize == 0 )
237- return false;
238-
239236 VMemAllocation *memHandle = get_mem_block_from_ptr( memPtr );
240237
241- // We do not have to update anything, so bail.
242- size_t oldDataSize = memHandle->dataSize;
238+ VMemIsland *manager = memHandle->manager;
243239
244- if ( oldDataSize == newSize )
245- return true;
246-
247- // Since we know the free space after the memory handle, we can simply grow or shrink without issue.
248- // The operation takes logarithmic time though, because we update the AVL tree.
249-
250- size_t startOfDataOffset = (size_t)memPtr;
251-
252- size_t newRequestedStartOfFreeBytes = ( startOfDataOffset + newSize );
253-
254- // Get the offset to the byte that is last of the available (possible) free space.
255- size_t endOfFreeSpaceOffset;
256-
257- bool wasFreeSpaceEmpty = memHandle->freeSpaceAfterThis.freeRegion.IsEmpty();
258-
259- if ( wasFreeSpaceEmpty == false )
260- {
261- endOfFreeSpaceOffset = memHandle->freeSpaceAfterThis.freeRegion.GetSliceEndPoint();
262- }
263- else
264- {
265- endOfFreeSpaceOffset = memHandle->GetRegion().GetSliceEndPoint();
266- }
267-
268- // If this is not a valid offset for the free bytes, we bail.
269- // We add 1 because it could become empty aswell.
270- if ( endOfFreeSpaceOffset + 1 < newRequestedStartOfFreeBytes )
271- return false;
272-
273- // Update the meta-data.
274- VMemIsland *memIsland = memHandle->manager;
275-
276- if ( wasFreeSpaceEmpty == false )
277- {
278- memIsland->avlSortedBySize.RemoveByNodeFast( &memHandle->freeSpaceAfterThis.sortedBySizeNode );
279- }
280-
281- memHandle->freeSpaceAfterThis.freeRegion = memBlockSlice_t::fromOffsets( newRequestedStartOfFreeBytes, endOfFreeSpaceOffset );
282- memHandle->dataSize = newSize;
283-
284- // Insert the new thing again.
285- if ( memHandle->freeSpaceAfterThis.freeRegion.IsEmpty() == false )
286- {
287- memIsland->avlSortedBySize.Insert( &memHandle->freeSpaceAfterThis.sortedBySizeNode );
288- }
289-
290- return true;
240+ return manager->ResizeAllocation( this, memHandle, memPtr, newSize );
291241 }
292242
293243 // Returns the data size of an allocation.
@@ -307,6 +257,35 @@
307257 return ( memHandle->dataOff + memHandle->dataSize );
308258 }
309259
260+ // Returns statistics about this memory allocator.
261+ struct heapStats
262+ {
263+ size_t usedMetaBytes = 0;
264+ size_t freeBytes = 0;
265+ size_t countOfAllocations = 0;
266+ size_t countOfIslands = 0;
267+ };
268+
269+ inline heapStats GetStatistics( void ) const
270+ {
271+ heapStats stats;
272+
273+ LIST_FOREACH_BEGIN( VMemIsland, this->listIslands.root, managerNode )
274+
275+ VMemIsland::usageStats islandStats = item->GetUsageStatistics();
276+
277+ stats.usedMetaBytes += islandStats.usedMetaBytes;
278+ stats.freeBytes += islandStats.freeBytes;
279+ stats.countOfAllocations += islandStats.countOfAllocations;
280+
281+ // One more island.
282+ stats.countOfIslands++;
283+
284+ LIST_FOREACH_END
285+
286+ return stats;
287+ }
288+
310289 // Simple realloc helper just because it is being exposed in the CRT aswell.
311290 inline void* Realloc( void *memPtr, size_t newSize, size_t alignment = sizeof(void*) )
312291 {
@@ -387,11 +366,6 @@
387366 struct VMemFreeBlock
388367 {
389368 inline VMemFreeBlock( void ) = default;
390- inline VMemFreeBlock( size_t totalSize )
391- : freeRegion( 0, totalSize )
392- {
393- return;
394- }
395369 inline VMemFreeBlock( memBlockSlice_t slice )
396370 : freeRegion( std::move( slice ) )
397371 {
@@ -660,6 +634,9 @@
660634
661635 if ( growSuccess )
662636 {
637+ // Grow the available free space.
638+ lastFreeBlock->freeRegion.SetSliceEndPoint( finalMemEndOffset );
639+
663640 // Just return the new spot.
664641 // We will insert to the end node.
665642 allocPtr = (void*)tryNewMemOffset;
@@ -681,6 +658,7 @@
681658
682659 // Subtract our allocation from the free region we have found and newly manage the things.
683660 bool hadSomethingStartFromLeft = false;
661+ bool hadFreeSpaceAfterNewAlloc = false;
684662
685663 // Update the pointery things.
686664 LIST_INSERT( freeBlockToAllocateInto->sortedByAddrNode, newAlloc->freeSpaceAfterThis.sortedByAddrNode );
@@ -706,6 +684,10 @@
706684 }
707685 else
708686 {
687+ // It is important that we keep the pointers inside of free region intact,
688+ // so even if it is empty we know where it is supposed to start.
689+ hadFreeSpaceAfterNewAlloc = true;
690+
709691 // This has to be the memory that is available just after our allocation.
710692 newAlloc->freeSpaceAfterThis.freeRegion = slicedRegion;
711693 avlSortedBySize.Insert( &newAlloc->freeSpaceAfterThis.sortedBySizeNode );
@@ -718,6 +700,12 @@
718700 freeBlockToAllocateInto->freeRegion.collapse();
719701 }
720702
703+ if ( !hadFreeSpaceAfterNewAlloc )
704+ {
705+ // Make proper empty space.
706+ newAlloc->freeSpaceAfterThis.freeRegion = memBlockSlice_t( allocSlice.GetSliceEndPoint() + 1, 0 );
707+ }
708+
721709 return newAlloc;
722710 }
723711
@@ -727,17 +715,8 @@
727715
728716 // We simply release out the memory that we are asked to free.
729717 {
730- size_t newFreeEndOffset;
718+ size_t newFreeEndOffset = allocObj->freeSpaceAfterThis.freeRegion.GetSliceEndPoint();
731719
732- if ( allocObj->freeSpaceAfterThis.freeRegion.IsEmpty() == false )
733- {
734- newFreeEndOffset = allocObj->freeSpaceAfterThis.freeRegion.GetSliceEndPoint();
735- }
736- else
737- {
738- newFreeEndOffset = allocObj->GetRegion().GetSliceEndPoint();
739- }
740-
741720 RwListEntry <VMemFreeBlock> *nodePrevFreeBlock = allocObj->freeSpaceAfterThis.sortedByAddrNode.prev;
742721
743722 // Has to be because there is a first free block, always.
@@ -797,6 +776,52 @@
797776 }
798777 }
799778
779+ inline bool ResizeAllocation( NativeHeapAllocator *manager, VMemAllocation *memHandle, void *memPtr, size_t newSize )
780+ {
781+ if ( newSize == 0 )
782+ return false;
783+
784+ // We do not have to update anything, so bail.
785+ size_t oldDataSize = memHandle->dataSize;
786+
787+ if ( oldDataSize == newSize )
788+ return true;
789+
790+ // Since we know the free space after the memory handle, we can simply grow or shrink without issue.
791+ // The operation takes logarithmic time though, because we update the AVL tree.
792+
793+ size_t startOfDataOffset = (size_t)memPtr;
794+
795+ size_t newRequestedStartOfFreeBytes = ( startOfDataOffset + newSize );
796+
797+ // Get the offset to the byte that is last of the available (possible) free space.
798+ size_t endOfFreeSpaceOffset = memHandle->freeSpaceAfterThis.freeRegion.GetSliceEndPoint();
799+
800+ // If this is not a valid offset for the free bytes, we bail.
801+ // We add 1 because it could become empty aswell.
802+ if ( endOfFreeSpaceOffset + 1 < newRequestedStartOfFreeBytes )
803+ return false;
804+
805+ // Update the meta-data.
806+ VMemIsland *memIsland = memHandle->manager;
807+
808+ if ( memHandle->freeSpaceAfterThis.freeRegion.IsEmpty() == false )
809+ {
810+ memIsland->avlSortedBySize.RemoveByNodeFast( &memHandle->freeSpaceAfterThis.sortedBySizeNode );
811+ }
812+
813+ memHandle->freeSpaceAfterThis.freeRegion = memBlockSlice_t::fromOffsets( newRequestedStartOfFreeBytes, endOfFreeSpaceOffset );
814+ memHandle->dataSize = newSize;
815+
816+ // Insert the new thing again.
817+ if ( memHandle->freeSpaceAfterThis.freeRegion.IsEmpty() == false )
818+ {
819+ memIsland->avlSortedBySize.Insert( &memHandle->freeSpaceAfterThis.sortedBySizeNode );
820+ }
821+
822+ return true;
823+ }
824+
800825 inline bool HasNoAllocations( void ) const
801826 {
802827 // If there is just the first free space block, then there cannot be any allocation either.
@@ -803,6 +828,49 @@
803828 return ( this->firstFreeSpaceBlock.sortedByAddrNode.next == &this->sortedByAddrFreeBlocks.root );
804829 }
805830
831+ // Returns statistics about usage of this memory island.
832+ struct usageStats
833+ {
834+ size_t usedMetaBytes = 0;
835+ size_t freeBytes = 0;
836+ size_t countOfAllocations = 0;
837+ };
838+
839+ inline usageStats GetUsageStatistics( void ) const
840+ {
841+ usageStats stats;
842+
843+ bool hasStartOfUsedBytesPos = false;
844+ size_t startOfUsedBytesPos;
845+
846+ LIST_FOREACH_BEGIN( VMemFreeBlock, this->sortedByAddrFreeBlocks.root, sortedByAddrNode )
847+
848+ memBlockSlice_t freeRegion = item->freeRegion;
849+
850+ if ( hasStartOfUsedBytesPos )
851+ {
852+ size_t endUsedBytesOffset = freeRegion.GetSliceStartPoint();
853+
854+ stats.usedMetaBytes += ( endUsedBytesOffset - startOfUsedBytesPos );
855+
856+ // One more allocation.
857+ stats.countOfAllocations++;
858+ }
859+ else
860+ {
861+ startOfUsedBytesPos = ( freeRegion.GetSliceEndPoint() + 1 );
862+
863+ hasStartOfUsedBytesPos = true;
864+ }
865+
866+ // Count the free bytes aswell.
867+ stats.freeBytes += freeRegion.GetSliceSize();
868+
869+ LIST_FOREACH_END
870+
871+ return stats;
872+ }
873+
806874 RwListEntry <VMemIsland> managerNode;
807875
808876 NativePageAllocator::pageHandle *allocHandle; // handle into the NativePageAllocator for meta-info
--- common/sdk/Vector.h (revision 116)
+++ common/sdk/Vector.h (revision 117)
@@ -156,22 +156,35 @@
156156 size_t old_count = this->data_count;
157157 structType *old_data = this->data_entries;
158158
159+ size_t moved_idx = 0;
160+
159161 try
160162 {
161- for ( size_t n = 0; n < new_item_count; n++ )
163+ while ( moved_idx < new_item_count )
162164 {
163- const structType *old_entry = nullptr;
165+ // This is not supposed to be constant because we can move-away from the original.
166+ structType *old_entry = nullptr;
164167
165- if ( n < old_count )
168+ if ( moved_idx < old_count )
166169 {
167- old_entry = ( old_data + n );
170+ old_entry = ( old_data + moved_idx );
168171 }
169172
170- cb( (structType*)new_data_ptr + n, old_entry, n );
173+ cb( (structType*)new_data_ptr + moved_idx, old_entry, moved_idx );
174+
175+ moved_idx++;
171176 }
172177 }
173178 catch( ... )
174179 {
180+ // Move-back the already moved items.
181+ while ( moved_idx > 0 )
182+ {
183+ moved_idx--;
184+
185+
186+ }
187+
175188 // We do not add the item to our array, so clean-up.
176189 allocatorType::Free( this, new_data_ptr );
177190
@@ -196,41 +209,33 @@
196209 size_t newCount = ( oldCount + 1 );
197210 size_t newRequiredSize = newCount * sizeof(structType);
198211
199- structType *use_data = this->data_entries;
200-
201- if ( use_data )
212+ if ( structType *use_data = this->data_entries )
202213 {
203214 bool gotToResize = allocatorType::Resize( this, use_data, newRequiredSize );
204215
205- if ( !gotToResize )
216+ if ( gotToResize )
206217 {
207- recast_memory( newCount, newRequiredSize,
208- [&]( void *memPtr, const structType *old_item, size_t idx )
209- {
210- if ( idx == oldCount )
211- {
212- new (memPtr) structType( item );
213- }
214- else
215- {
216- new (memPtr) structType( *old_item );
217- }
218- });
218+ // We just have to add at back.
219+ new ( use_data + oldCount ) structType( item );
219220
221+ this->data_count = newCount;
222+
220223 return;
221224 }
222225 }
223- else
226+
227+ recast_memory( newCount, newRequiredSize,
228+ [&]( void *memPtr, const structType *old_item, size_t idx )
224229 {
225- use_data = (structType*)allocatorType::Allocate( this, newRequiredSize, alignof(structType) );
226-
227- this->data_entries = use_data;
228- }
229-
230- // We just have to add at back.
231- new ( use_data + oldCount ) structType( item );
232-
233- this->data_count = newCount;
230+ if ( idx == oldCount )
231+ {
232+ new (memPtr) structType( item );
233+ }
234+ else
235+ {
236+ new (memPtr) structType( *old_item );
237+ }
238+ });
234239 }
235240
236241 inline void RemoveFromBack( void )
--- unittests/src/heaptests.cpp (revision 116)
+++ unittests/src/heaptests.cpp (revision 117)
@@ -155,4 +155,30 @@
155155 assert( shrinkedMemBlock == newMemBlock );
156156 }
157157 printf( "ok.\n" );
158+
159+ printf( "testing native heap allocator move..." );
160+ {
161+ NativeHeapAllocator heap1, heap2;
162+
163+ void *oneMem = heap1.Allocate( 123 );
164+ void *twoMem = heap1.Allocate( 456 );
165+
166+ assert( oneMem != nullptr );
167+ assert( twoMem != nullptr );
168+
169+ NativeHeapAllocator::heapStats statsOfHeap1 = heap1.GetStatistics();
170+
171+ assert( statsOfHeap1.usedMetaBytes >= 579 );
172+
173+ heap2 = std::move( heap1 );
174+
175+ assert( heap1.GetStatistics().usedMetaBytes == 0 );
176+ assert( heap2.GetStatistics().usedMetaBytes == statsOfHeap1.usedMetaBytes );
177+
178+ heap2.Free( oneMem );
179+ heap2.Free( twoMem );
180+
181+ assert( heap2.GetStatistics().usedMetaBytes == 0 );
182+ }
183+ printf( "ok.\n" );
158184 }
\ No newline at end of file
--- unittests/src/memtests.cpp (revision 116)
+++ unittests/src/memtests.cpp (revision 117)
@@ -48,18 +48,34 @@
4848 // Test placed memory allocation.
4949 printf( "testing native vmem handle placed memory allocation..." );
5050 {
51- NativePageAllocator::pageHandle *one = nativeMem.Allocate( (void*)0x60000000, 0x1000 );
51+ bool isGood = false;
52+ {
53+ NativePageAllocator::pageHandle *one = nativeMem.Allocate( (void*)0x60000000, 0x1000 );
5254
53- assert( one != nullptr );
55+ if ( one != nullptr )
56+ {
57+ NativePageAllocator::pageHandle *two = nativeMem.Allocate( (void*)0x60001000, 0x1000 );
5458
55- NativePageAllocator::pageHandle *two = nativeMem.Allocate( (void*)0x60001000, 0x1000 );
59+ if ( two != nullptr )
60+ {
61+ isGood = true;
5662
57- assert( two != nullptr );
63+ nativeMem.Free( one );
64+ }
5865
59- nativeMem.Free( two );
60- nativeMem.Free( one );
66+ nativeMem.Free( two );
67+ }
68+ }
69+
70+ if ( isGood )
71+ {
72+ printf( "ok.\n" );
73+ }
74+ else
75+ {
76+ printf( "soft-fail.\n" );
77+ }
6178 }
62- printf( "ok.\n" );
6379
6480 // Test freeing memory by address.
6581 printf( "testing native vmem handle free-by-addr..." );
@@ -191,6 +207,20 @@
191207 }
192208 printf( "ok.\n" );
193209
210+ printf( "testing native vmem allocator move..." );
211+ {
212+ NativePageAllocator nativeMem1, nativeMem2;
213+
214+ NativePageAllocator::pageHandle *handle = nativeMem1.Allocate( nullptr, nativeMem1.GetPageSize() );
215+
216+ assert( handle != nullptr );
217+
218+ nativeMem2 = std::move( nativeMem1 );
219+
220+ nativeMem2.Free( handle );
221+ }
222+ printf( "ok.\n" );
223+
194224 // TODO: add test cases where we check the caching functionality;
195225 // * cached alloc across arenas
196226 // * preferring cached reserved memory over new memory
Show on old repository browser