@@ -46,7 +46,11 @@ using namespace CorUnix;
4646SET_DEFAULT_DEBUG_CHANNEL (VIRTUAL);
4747
4848CRITICAL_SECTION virtual_critsec PAL_GLOBAL;
49- CRITICAL_SECTION virtual_realloc PAL_GLOBAL;
49+
50+ #ifdef DEBUG
51+ Volatile<int > attempt1 = 0 ;
52+ Volatile<int > attempt2 = 0 ;
53+ #endif
5054
5155#if MMAP_IGNORES_HINT
5256typedef struct FREE_BLOCK {
@@ -124,7 +128,6 @@ VIRTUALInitialize( void )
124128 TRACE ( " Initializing the Virtual Critical Sections. \n " );
125129
126130 InternalInitializeCriticalSection (&virtual_critsec);
127- InternalInitializeCriticalSection (&virtual_realloc);
128131
129132 pVirtualMemory = NULL ;
130133 pVirtualMemoryLastFound = NULL ;
@@ -864,6 +867,101 @@ static BOOL VIRTUALStoreAllocationInfo(
864867 return bRetVal;
865868}
866869
870+ /* ***
871+ * VIRTUALUpdateAllocationInfo()
872+ *
873+ * Updates the allocation information in the linked list for an existing region.
874+ * NOTE: The caller must own the critical section virtual_critsec.
875+ */
876+ static BOOL VIRTUALUpdateAllocationInfo (
877+ IN PCMI pExistingEntry, /* Existing entry in the virtual memory regions list that we are currently managing. */
878+ IN UINT_PTR startBoundary, /* The new starting address of the region. */
879+ IN SIZE_T memSize) /* The new size of the region. */
880+ {
881+ BOOL bRetVal = TRUE ;
882+ SIZE_T nBufferSize = 0 ;
883+
884+ if ((memSize & VIRTUAL_PAGE_MASK) != 0 )
885+ {
886+ ERROR (" The memory size was not in multiples of the page size. \n " );
887+ bRetVal = FALSE ;
888+ goto done;
889+ }
890+
891+ if (!pExistingEntry)
892+ {
893+ ERROR (" Existing region not provided.\n " );
894+ bRetVal = FALSE ;
895+ goto done;
896+ }
897+
898+ pExistingEntry->startBoundary = startBoundary;
899+ pExistingEntry->memSize = memSize;
900+
901+ nBufferSize = memSize / VIRTUAL_PAGE_SIZE / CHAR_BIT;
902+ if ((memSize / VIRTUAL_PAGE_SIZE) % CHAR_BIT != 0 )
903+ {
904+ nBufferSize++;
905+ }
906+
907+ // Cleaup previous structures as we will need to reinitialize these using the new size of the region.
908+ {
909+ #if MMAP_DOESNOT_ALLOW_REMAP
910+ if (pExistingEntry->pDirtyPages ) InternalFree (pExistingEntry->pDirtyPages );
911+ pExistingEntry->pDirtyPages = NULL ;
912+ #endif // MMAP_DOESNOT_ALLOW_REMAP
913+
914+ if (pExistingEntry->pProtectionState ) InternalFree (pExistingEntry->pProtectionState );
915+ pExistingEntry->pProtectionState = NULL ;
916+
917+ if (pExistingEntry->pAllocState ) InternalFree (pExistingEntry->pAllocState );
918+ pExistingEntry->pAllocState = NULL ;
919+ }
920+
921+ pExistingEntry->pAllocState = (BYTE*)InternalMalloc (nBufferSize);
922+ pExistingEntry->pProtectionState = (BYTE*)InternalMalloc ((memSize / VIRTUAL_PAGE_SIZE));
923+ #if MMAP_DOESNOT_ALLOW_REMAP
924+ pExistingEntry->pDirtyPages = (BYTE*)InternalMalloc (nBufferSize);
925+ #endif // MMAP_DOESNOT_ALLOW_REMAP
926+
927+ if (pExistingEntry->pAllocState && pExistingEntry->pProtectionState
928+ #if MMAP_DOESNOT_ALLOW_REMAP
929+ && pExistingEntry->pDirtyPages
930+ #endif // MMAP_DOESNOT_ALLOW_REMAP
931+ )
932+ {
933+ /* Set the intial allocation state, and initial allocation protection. */
934+ #if MMAP_DOESNOT_ALLOW_REMAP
935+ memset (pExistingEntry->pDirtyPages , 0 , nBufferSize);
936+ #endif // pExistingEntry
937+ VIRTUALSetAllocState (MEM_RESERVE, 0 , nBufferSize * CHAR_BIT, pExistingEntry);
938+ memset (pExistingEntry->pProtectionState ,
939+ VIRTUALConvertWinFlags (pExistingEntry->accessProtection ),
940+ memSize / VIRTUAL_PAGE_SIZE);
941+ }
942+ else
943+ {
944+ ERROR (" Unable to allocate memory for the structure.\n " );
945+ bRetVal = FALSE ;
946+
947+ #if MMAP_DOESNOT_ALLOW_REMAP
948+ if (pExistingEntry->pDirtyPages ) InternalFree (pExistingEntry->pDirtyPages );
949+ pExistingEntry->pDirtyPages = NULL ;
950+ #endif //
951+
952+ if (pExistingEntry->pProtectionState ) InternalFree (pExistingEntry->pProtectionState );
953+ pExistingEntry->pProtectionState = NULL ;
954+
955+ if (pExistingEntry->pAllocState ) InternalFree (pExistingEntry->pAllocState );
956+ pExistingEntry->pAllocState = NULL ;
957+
958+ goto done;
959+ }
960+
961+ done:
962+ return bRetVal;
963+ }
964+
867965/* *****
868966 *
869967 * VIRTUALReserveMemory() - Helper function that actually reserves the memory.
@@ -1695,6 +1793,128 @@ VirtualAlloc_(
16951793 return pRetVal;
16961794}
16971795
1796+ /* ++
1797+ Function:
1798+ VirtualFreeEnclosing_ This method tries to free memory enclosing a 64K aligned region an already RESERVED larger region.
1799+ This is to be used specifically when we attempt to get a 64K aligned address on new VirtualAlloc allocations.
1800+
1801+ --*/
1802+ BOOL
1803+ VirtualFreeEnclosing_ (
1804+ IN LPVOID lpRegionStartAddress, /* Starting address of the original region. */
1805+ IN SIZE_T dwSize, /* Size of the requested region i.e. the intended size of the VirtualAlloc call.*/
1806+ IN SIZE_T dwAlignmentSize, /* The intended alignment of the returned address. This is also the size of the extra memory reserved i.e. 64KB in our case whenw e try a 64K alignment. */
1807+ IN LPVOID lpActualAlignedStartAddress) /* Actual starting address that will be returned for the new allocation. */
1808+ {
1809+ BOOL bRetVal = TRUE ;
1810+
1811+ #ifdef DEBUG
1812+ _ASSERTE (lpActualAlignedStartAddress >= lpRegionStartAddress);
1813+ _ASSERTE (lpActualAlignedStartAddress < (char *)lpRegionStartAddress + dwSize + dwAlignmentSize);
1814+ #endif
1815+
1816+ char * beforeRegionStart = (char *) lpRegionStartAddress;
1817+ size_t beforeRegionSize = (char *) lpActualAlignedStartAddress - beforeRegionStart;
1818+ char * afterRegionStart = (char *) lpActualAlignedStartAddress + dwSize;
1819+ size_t afterRegionSize = dwAlignmentSize - beforeRegionSize;
1820+
1821+ bool beforeRegionFreed = false ;
1822+ bool afterRegionFreed = (afterRegionSize == 0 );
1823+
1824+ #ifdef DEBUG
1825+ _ASSERTE (dwSize + dwAlignmentSize == beforeRegionSize + dwSize + afterRegionSize);
1826+
1827+ SIZE_T alignmentDiff = ((ULONG_PTR)lpRegionStartAddress % dwAlignmentSize);
1828+ size_t beforeRegionSize2 = dwAlignmentSize - alignmentDiff;
1829+ _ASSERTE (beforeRegionSize == beforeRegionSize2);
1830+ #endif
1831+
1832+ CPalThread *pthrCurrent;
1833+ pthrCurrent = InternalGetCurrentThread ();
1834+
1835+ if (dwSize == 0 )
1836+ {
1837+ ERROR (" dwSize must be non-zero when releasing enclosing memory region.\n " );
1838+ pthrCurrent->SetLastError (ERROR_INVALID_PARAMETER);
1839+ return FALSE ;
1840+ }
1841+
1842+ InternalEnterCriticalSection (pthrCurrent, &virtual_critsec);
1843+
1844+ PCMI pMemoryToBeReleased =
1845+ VIRTUALFindRegionInformation ((UINT_PTR)lpRegionStartAddress);
1846+
1847+ if (!pMemoryToBeReleased)
1848+ {
1849+ ERROR (" lpRegionStartAddress must be the base address returned by VirtualAlloc.\n " );
1850+ pthrCurrent->SetLastError (ERROR_INVALID_ADDRESS);
1851+ bRetVal = FALSE ;
1852+ goto VirtualFreeEnclosingExit;
1853+ }
1854+
1855+ TRACE (" Releasing the following memory %d to %d.\n " , beforeRegionStart, beforeRegionSize);
1856+
1857+ #if (MMAP_IGNORES_HINT && !MMAP_DOESNOT_ALLOW_REMAP)
1858+ if (mmap ((void *)beforeRegionStart, beforeRegionSize, PROT_NONE, MAP_FIXED | MAP_PRIVATE, gBackingFile ,
1859+ (char *)beforeRegionStart - (char *)gBackingBaseAddress ) != MAP_FAILED)
1860+ #else // MMAP_IGNORES_HINT && !MMAP_DOESNOT_ALLOW_REMAP
1861+ if (munmap ((LPVOID)beforeRegionStart, beforeRegionSize) == 0 )
1862+ #endif // MMAP_IGNORES_HINT && !MMAP_DOESNOT_ALLOW_REMAP
1863+ {
1864+ beforeRegionFreed = true ;
1865+ }
1866+ else
1867+ {
1868+ #if MMAP_IGNORES_HINT
1869+ ASSERT (" Unable to remap the memory onto the backing file; "
1870+ " error is %d.\n " , errno);
1871+ #else // MMAP_IGNORES_HINT
1872+ ASSERT (" Unable to unmap the memory, munmap() returned "
1873+ " an abnormal value.\n " );
1874+ #endif // MMAP_IGNORES_HINT
1875+ pthrCurrent->SetLastError (ERROR_INTERNAL_ERROR);
1876+ bRetVal = FALSE ;
1877+ goto VirtualFreeEnclosingExit;
1878+ }
1879+
1880+ if (!afterRegionFreed)
1881+ {
1882+ TRACE (" Releasing the following memory %d to %d.\n " , afterRegionStart, afterRegionSize);
1883+ #if (MMAP_IGNORES_HINT && !MMAP_DOESNOT_ALLOW_REMAP)
1884+ if (mmap ((void *)afterRegionStart, afterRegionSize, PROT_NONE, MAP_FIXED | MAP_PRIVATE, gBackingFile ,
1885+ (char *)afterRegionStart - (char *)gBackingBaseAddress ) != MAP_FAILED)
1886+ #else // MMAP_IGNORES_HINT && !MMAP_DOESNOT_ALLOW_REMAP
1887+ if (munmap ((LPVOID)afterRegionStart, afterRegionSize) == 0 )
1888+ #endif // MMAP_IGNORES_HINT && !MMAP_DOESNOT_ALLOW_REMAP
1889+ {
1890+ afterRegionFreed = true ;
1891+ }
1892+ else
1893+ {
1894+ #if MMAP_IGNORES_HINT
1895+ ASSERT (" Unable to remap the memory onto the backing file; "
1896+ " error is %d.\n " , errno);
1897+ #else // MMAP_IGNORES_HINT
1898+ ASSERT (" Unable to unmap the memory, munmap() returned "
1899+ " an abnormal value.\n " );
1900+ #endif // MMAP_IGNORES_HINT
1901+ pthrCurrent->SetLastError (ERROR_INTERNAL_ERROR);
1902+ bRetVal = FALSE ;
1903+ goto VirtualFreeEnclosingExit;
1904+ }
1905+ }
1906+
1907+ if (beforeRegionFreed && afterRegionFreed)
1908+ {
1909+ bRetVal = VIRTUALUpdateAllocationInfo (pMemoryToBeReleased, (UINT_PTR)lpActualAlignedStartAddress, dwSize);
1910+ goto VirtualFreeEnclosingExit;
1911+ }
1912+
1913+ VirtualFreeEnclosingExit:
1914+ InternalLeaveCriticalSection (pthrCurrent, &virtual_critsec);
1915+ return bRetVal;
1916+ }
1917+
16981918#define KB64 (64 * 1024 )
16991919#define MB64 (KB64 * 1024 )
17001920
@@ -1717,41 +1937,56 @@ VirtualAlloc(
17171937 if (reserve || commit)
17181938 {
17191939 char *address = (char *) VirtualAlloc_ (nullptr , dwSize, MEM_RESERVE, flProtect);
1720- if (!address) return nullptr ;
1940+ if (!address)
1941+ {
1942+ return nullptr ;
1943+ }
17211944
17221945 if (reserve)
17231946 {
17241947 flAllocationType &= ~MEM_RESERVE;
17251948 }
17261949
17271950 SIZE_T diff = ((ULONG_PTR)address % KB64);
1728- if ( diff != 0 )
1951+ if (diff != 0 )
17291952 {
1953+ // Free the previously allocated address as it's not 64K aligned.
17301954 VirtualFree (address, 0 , MEM_RELEASE);
1731- char *addr64 = address + (KB64 - diff);
1732-
1733- // try reserving from the same address space
1734- address = (char *) VirtualAlloc_ (addr64, dwSize, MEM_RESERVE, flProtect);
17351955
1956+ // looks like ``pushed new address + dwSize`` is not available
1957+ // try on a bigger surface
1958+ address = (char *)VirtualAlloc_ (nullptr , dwSize + KB64, MEM_RESERVE, flProtect);
17361959 if (!address)
1737- { // looks like ``pushed new address + dwSize`` is not available
1738- // try on a bigger surface
1739- address = (char *) VirtualAlloc_ (nullptr , dwSize + KB64, MEM_RESERVE, flProtect);
1740- if (!address) return nullptr ;
1741-
1742- diff = ((ULONG_PTR)address % KB64);
1743- addr64 = address + (KB64 - diff);
1960+ {
1961+ // This is an actual OOM.
1962+ return nullptr ;
1963+ }
17441964
1745- CPalThread *pthrCurrent = InternalGetCurrentThread ();
1746- InternalEnterCriticalSection (pthrCurrent, &virtual_realloc);
1747- VirtualFree (address, 0 , MEM_RELEASE);
1748- address = (char *) VirtualAlloc_ (addr64, dwSize, MEM_RESERVE, flProtect);
1749- InternalLeaveCriticalSection (pthrCurrent, &virtual_realloc);
1965+ diff = ((ULONG_PTR)address % KB64);
1966+ char * addr64 = address + (KB64 - diff);
17501967
1751- if (!address) return nullptr ;
1968+ // Free the regions enclosing the 64K aligned region we intend to use.
1969+ if (VirtualFreeEnclosing_ (address, dwSize, KB64, addr64) == 0 )
1970+ {
1971+ ASSERT (" Unable to unmap the enclosing memory.\n " );
1972+ return nullptr ;
17521973 }
1974+
1975+ address = addr64;
1976+ #ifdef DEBUG
1977+ InterlockedIncrement (&attempt2);
1978+ #endif
1979+ }
1980+ #ifdef DEBUG
1981+ else
1982+ {
1983+ InterlockedIncrement (&attempt1);
17531984 }
1985+ #endif
17541986
1987+ #ifdef DEBUG
1988+ TRACE (" VirtualAlloc 64K alignment attempts: %d : %d \n " , attempt1.RawValue (), attempt2.RawValue ());
1989+ #endif
17551990 if (flAllocationType == 0 ) return address;
17561991 lpAddress = address;
17571992 }
0 commit comments