• R/O
  • SSH
  • HTTPS

eirrepo: Commit


Commit MetaInfo

Revision113 (tree)
Time2018-08-30 02:03:46
Authorquiret

Log Message

- improved memory allocation for the very rare scenario where weird memory alignments are used but succeeding allocations try to fit, then fail, then do not try possibly good same-size freeblocks because the misaligned was tried first (it now checks all same-size freeblocks, with converging-to-zero penalty)

Change Summary

Incremental Difference

--- common/sdk/AVLTree.h (revision 112)
+++ common/sdk/AVLTree.h (revision 113)
@@ -667,24 +667,80 @@
667667 return true;
668668 }
669669
670+ struct nodestack_iterator
671+ {
672+ AINLINE nodestack_iterator( AVLNode *walkNode )
673+ {
674+ this->curNode = walkNode;
675+ this->nodestack = nullptr;
676+ this->doesIterateNodestack = false;
677+ }
678+ AINLINE nodestack_iterator( const nodestack_iterator& ) = default;
679+ AINLINE ~nodestack_iterator( void ) = default;
680+
681+ AINLINE nodestack_iterator& operator = ( const nodestack_iterator& ) = default;
682+
683+ AINLINE bool IsEnd( void ) const
684+ {
685+ if ( this->doesIterateNodestack )
686+ {
687+ return ( this->curNode == this->nodestack );
688+ }
689+ else
690+ {
691+ return ( this->curNode == nullptr );
692+ }
693+ }
694+
695+ AINLINE void Increment( void )
696+ {
697+ AVLNode *curNode = this->curNode;
698+
699+ assert( curNode != nullptr );
700+
701+ if ( this->doesIterateNodestack )
702+ {
703+ curNode = curNode->next;
704+ }
705+ else
706+ {
707+ AVLNode *nodestack = curNode->owned_nodestack;
708+
709+ if ( nodestack != nullptr )
710+ {
711+ this->curNode = nodestack;
712+ this->nodestack = nodestack;
713+ this->doesIterateNodestack = true;
714+ }
715+ else
716+ {
717+ this->curNode = nullptr;
718+ }
719+ }
720+ }
721+
722+ AINLINE AVLNode* Resolve( void )
723+ {
724+ return this->curNode;
725+ }
726+
727+ AVLNode *curNode;
728+ AVLNode *nodestack;
729+ bool doesIterateNodestack;
730+ };
731+
670732 // Calls the function for each node-stack member of the node.
671733 // Should be used if you want to iterate over absolutely every node of a tree.
672734 template <typename callbackType>
673735 static AINLINE void call_for_each_node_for_nodestack( AVLNode *walkNode, const callbackType& cb )
674736 {
675- cb( walkNode );
737+ nodestack_iterator iter( walkNode );
676738
677- if ( AVLNode *nodestack = walkNode->owned_nodestack )
739+ while ( !iter.IsEnd() )
678740 {
679- AVLNode *curnode = nodestack;
741+ cb( iter.Resolve() );
680742
681- do
682- {
683- cb( curnode );
684-
685- curnode = curnode->next;
686- }
687- while ( curnode != nodestack );
743+ iter.Increment();
688744 }
689745 }
690746
--- common/sdk/OSUtils.memheap.h (revision 112)
+++ common/sdk/OSUtils.memheap.h (revision 113)
@@ -574,32 +574,40 @@
574574
575575 while ( !iter.IsEnd() )
576576 {
577- // Please note that by not checking the nodestack members we just try any region of same size
578- // for allocation. It does not matter of course which one we pick.
579- // For funny business we could randomize at this point though.
577+ // We have to check each member of the nodestack of the current best-fit node because allocation could
578+ // fail due to misalignment. But since we have the best-fit node going for good alignment usage
579+ // is something the user wants: do not worry!
580580
581- VMemFreeBlock *smallFreeBlock = AVL_GETITEM( VMemFreeBlock, iter.Resolve(), sortedBySizeNode );
581+ VMemAllocAVLTree::nodestack_iterator nodestack_iter( iter.Resolve() );
582582
583- // Try to allocate into it.
584- // It succeeds if the allocation does fit into the free region.
585- size_t reqSize;
586- size_t allocOff = smallFreeBlock->freeRegion.GetSliceStartPoint();
583+ while ( !nodestack_iter.IsEnd() )
584+ {
585+ VMemFreeBlock *smallFreeBlock = AVL_GETITEM( VMemFreeBlock, nodestack_iter.Resolve(), sortedBySizeNode );
587586
588- posDispatch.ScanNextBlock( allocOff, reqSize );
587+ // Try to allocate into it.
588+ // It succeeds if the allocation does fit into the free region.
589+ size_t reqSize;
590+ size_t allocOff = smallFreeBlock->freeRegion.GetSliceStartPoint();
589591
590- memBlockSlice_t requiredMemRegion( allocOff, reqSize );
592+ posDispatch.ScanNextBlock( allocOff, reqSize );
591593
592- memBlockSlice_t::eIntersectionResult intResult = requiredMemRegion.intersectWith( smallFreeBlock->freeRegion );
594+ memBlockSlice_t requiredMemRegion( allocOff, reqSize );
593595
594- if ( intResult == memBlockSlice_t::INTERSECT_INSIDE ||
595- intResult == memBlockSlice_t::INTERSECT_EQUAL )
596- {
597- // We found a valid allocation slot!
598- // So return it.
599- allocPtr = (void*)allocOff;
600- allocSlice = requiredMemRegion;
601- freeBlockToAllocateInto = smallFreeBlock;
602- goto foundAllocationSpot;
596+ memBlockSlice_t::eIntersectionResult intResult = requiredMemRegion.intersectWith( smallFreeBlock->freeRegion );
597+
598+ if ( intResult == memBlockSlice_t::INTERSECT_INSIDE ||
599+ intResult == memBlockSlice_t::INTERSECT_EQUAL )
600+ {
601+ // We found a valid allocation slot!
602+ // So return it.
603+ allocPtr = (void*)allocOff;
604+ allocSlice = requiredMemRegion;
605+ freeBlockToAllocateInto = smallFreeBlock;
606+ goto foundAllocationSpot;
607+ }
608+
609+ // Try the next same-size freeblock.
610+ nodestack_iter.Increment();
603611 }
604612
605613 // Try the next bigger block.
Show on old repository browser