|
7 | 7 | #include <typeinfo>
|
8 | 8 |
|
9 | 9 | #include "platform/platform.hpp"
|
| 10 | +#include "platform/memory_mapping.hpp" |
10 | 11 | #include "utils/error.hpp"
|
11 | 12 | #include "utils/microfmt.hpp"
|
12 | 13 | #include "utils/utils.hpp"
|
13 | 14 | #include "logging.hpp"
|
14 | 15 | #include "unwind/unwind.hpp"
|
15 | 16 |
|
16 | 17 | #ifndef _MSC_VER
|
17 |
| - #include <string.h> |
18 |
| - #if IS_WINDOWS |
19 |
| - #ifndef WIN32_LEAN_AND_MEAN |
20 |
| - #define WIN32_LEAN_AND_MEAN |
21 |
| - #endif |
22 |
| - #include <windows.h> |
23 |
| - #else |
24 |
| - #include <sys/mman.h> |
25 |
| - #include <unistd.h> |
26 |
| - #if IS_APPLE |
27 |
| - #include <mach/mach.h> |
28 |
| - #ifdef CPPTRACE_HAS_MACH_VM |
29 |
| - #include <mach/mach_vm.h> |
30 |
| - #endif |
31 |
| - #else |
32 |
| - #include <fstream> |
33 |
| - #include <ios> |
34 |
| - #endif |
35 |
| - #endif |
| 18 | + #include <cstring> |
36 | 19 | #endif
|
37 | 20 |
|
38 | 21 | CPPTRACE_BEGIN_NAMESPACE
|
@@ -230,242 +213,6 @@ namespace detail {
|
230 | 213 | #warning "Cpptrace from_current: Unrecognized C++ standard library, from_current() won't be supported"
|
231 | 214 | constexpr size_t vtable_size = 0;
|
232 | 215 | #endif
|
233 |
| - |
234 |
| - #if IS_WINDOWS |
235 |
| - int get_page_size() { |
236 |
| - SYSTEM_INFO info; |
237 |
| - GetSystemInfo(&info); |
238 |
| - return info.dwPageSize; |
239 |
| - } |
240 |
| - constexpr auto memory_readonly = PAGE_READONLY; |
241 |
| - constexpr auto memory_readwrite = PAGE_READWRITE; |
242 |
| - int mprotect_page_and_return_old_protections(void* page, int page_size, int protections) { |
243 |
| - DWORD old_protections; |
244 |
| - if(!VirtualProtect(page, page_size, protections, &old_protections)) { |
245 |
| - throw internal_error( |
246 |
| - "VirtualProtect call failed: {}", |
247 |
| - std::system_error(GetLastError(), std::system_category()).what() |
248 |
| - ); |
249 |
| - } |
250 |
| - return old_protections; |
251 |
| - } |
252 |
| - void mprotect_page(void* page, int page_size, int protections) { |
253 |
| - mprotect_page_and_return_old_protections(page, page_size, protections); |
254 |
| - } |
255 |
| - void* allocate_page(int page_size) { |
256 |
| - auto page = VirtualAlloc(nullptr, page_size, MEM_COMMIT | MEM_RESERVE, memory_readwrite); |
257 |
| - if(!page) { |
258 |
| - throw internal_error( |
259 |
| - "VirtualAlloc call failed: {}", |
260 |
| - std::system_error(GetLastError(), std::system_category()).what() |
261 |
| - ); |
262 |
| - } |
263 |
| - return page; |
264 |
| - } |
265 |
| - #else |
266 |
| - int get_page_size() { |
267 |
| - #if defined(_SC_PAGESIZE) |
268 |
| - return sysconf(_SC_PAGESIZE); |
269 |
| - #else |
270 |
| - return getpagesize(); |
271 |
| - #endif |
272 |
| - } |
273 |
| - constexpr auto memory_readonly = PROT_READ; |
274 |
| - constexpr auto memory_readwrite = PROT_READ | PROT_WRITE; |
275 |
| - #if IS_APPLE |
276 |
| - int get_page_protections(void* page) { |
277 |
| - // https://stackoverflow.com/a/12627784/15675011 |
278 |
| - #ifdef CPPTRACE_HAS_MACH_VM |
279 |
| - mach_vm_size_t vmsize; |
280 |
| - mach_vm_address_t address = (mach_vm_address_t)page; |
281 |
| - #else |
282 |
| - vm_size_t vmsize; |
283 |
| - vm_address_t address = (vm_address_t)page; |
284 |
| - #endif |
285 |
| - vm_region_basic_info_data_t info; |
286 |
| - mach_msg_type_number_t info_count = |
287 |
| - sizeof(size_t) == 8 ? VM_REGION_BASIC_INFO_COUNT_64 : VM_REGION_BASIC_INFO_COUNT; |
288 |
| - memory_object_name_t object; |
289 |
| - kern_return_t status = |
290 |
| - #ifdef CPPTRACE_HAS_MACH_VM |
291 |
| - mach_vm_region |
292 |
| - #else |
293 |
| - vm_region_64 |
294 |
| - #endif |
295 |
| - ( |
296 |
| - mach_task_self(), |
297 |
| - &address, |
298 |
| - &vmsize, |
299 |
| - VM_REGION_BASIC_INFO, |
300 |
| - (vm_region_info_t)&info, |
301 |
| - &info_count, |
302 |
| - &object |
303 |
| - ); |
304 |
| - if(status == KERN_INVALID_ADDRESS) { |
305 |
| - throw internal_error("vm_region failed with KERN_INVALID_ADDRESS"); |
306 |
| - } |
307 |
| - int perms = 0; |
308 |
| - if(info.protection & VM_PROT_READ) { |
309 |
| - perms |= PROT_READ; |
310 |
| - } |
311 |
| - if(info.protection & VM_PROT_WRITE) { |
312 |
| - perms |= PROT_WRITE; |
313 |
| - } |
314 |
| - if(info.protection & VM_PROT_EXECUTE) { |
315 |
| - perms |= PROT_EXEC; |
316 |
| - } |
317 |
| - return perms; |
318 |
| - } |
319 |
| - #else |
320 |
| - // Code for reading /proc/self/maps |
321 |
| - // Unfortunately this is the canonical and only way to get memory permissions on linux |
322 |
| - // It comes with some surprising behaviors. Because it's a pseudo-file and maps could update at any time, reads of |
323 |
| - // the file can tear. The surprising observable behavior here is overlapping ranges: |
324 |
| - // - https://unix.stackexchange.com/questions/704987/overlapping-address-ranges-in-proc-maps |
325 |
| - // - https://stackoverflow.com/questions/59737950/what-is-the-correct-way-to-get-a-consistent-snapshot-of-proc-pid-smaps |
326 |
| - // Additional info: |
327 |
| - // Note: reading /proc/PID/maps or /proc/PID/smaps is inherently racy (consistent |
328 |
| - // output can be achieved only in the single read call). |
329 |
| - // This typically manifests when doing partial reads of these files while the |
330 |
| - // memory map is being modified. Despite the races, we do provide the following |
331 |
| - // guarantees: |
332 |
| - // |
333 |
| - // 1) The mapped addresses never go backwards, which implies no two |
334 |
| - // regions will ever overlap. |
335 |
| - // 2) If there is something at a given vaddr during the entirety of the |
336 |
| - // life of the smaps/maps walk, there will be some output for it. |
337 |
| - // |
338 |
| - // https://www.kernel.org/doc/Documentation/filesystems/proc.txt |
339 |
| - // Ideally we could do everything as a single read() call but I don't think that's practical, especially given that |
340 |
| - // the kernel has limited buffers internally. While we shouldn't be modifying mapped memory while reading |
341 |
| - // /proc/self/maps here, it's theoretically possible that we could allocate and that could go to the OS for more |
342 |
| - // pages. |
343 |
| - // While reading this is inherently racy, as far as I can tell tears don't happen within a line but they can happen |
344 |
| - // between lines. |
345 |
| - // The code that writes /proc/pid/maps: |
346 |
| - // - https://github.com/torvalds/linux/blob/3d0ebc36b0b3e8486ceb6e08e8ae173aaa6d1221/fs/proc/task_mmu.c#L304-L365 |
347 |
| - |
348 |
| - struct address_range { |
349 |
| - uintptr_t low; |
350 |
| - uintptr_t high; |
351 |
| - int perms; |
352 |
| - bool operator<(const address_range& other) const { |
353 |
| - return low < other.low; |
354 |
| - } |
355 |
| - }; |
356 |
| - |
357 |
| - // returns nullopt on eof |
358 |
| - optional<address_range> read_map_entry(std::ifstream& stream) { |
359 |
| - uintptr_t start; |
360 |
| - uintptr_t stop; |
361 |
| - stream>>start; |
362 |
| - stream.ignore(1); // dash |
363 |
| - stream>>stop; |
364 |
| - if(stream.eof()) { |
365 |
| - return nullopt; |
366 |
| - } |
367 |
| - if(stream.fail()) { |
368 |
| - throw internal_error("Failure reading /proc/self/maps"); |
369 |
| - } |
370 |
| - stream.ignore(1); // space |
371 |
| - char r, w, x; // there's a private/shared flag after these but we don't need it |
372 |
| - stream>>r>>w>>x; |
373 |
| - if(stream.fail() || stream.eof()) { |
374 |
| - throw internal_error("Failure reading /proc/self/maps"); |
375 |
| - } |
376 |
| - int perms = 0; |
377 |
| - if(r == 'r') { |
378 |
| - perms |= PROT_READ; |
379 |
| - } |
380 |
| - if(w == 'w') { |
381 |
| - perms |= PROT_WRITE; |
382 |
| - } |
383 |
| - if(x == 'x') { |
384 |
| - perms |= PROT_EXEC; |
385 |
| - } |
386 |
| - stream.ignore(std::numeric_limits<std::streamsize>::max(), '\n'); |
387 |
| - return address_range{start, stop, perms}; |
388 |
| - } |
389 |
| - |
390 |
| - // returns a vector or nullopt if a tear is detected |
391 |
| - optional<std::vector<address_range>> try_load_mapped_region_info() { |
392 |
| - std::ifstream stream("/proc/self/maps"); |
393 |
| - stream>>std::hex; |
394 |
| - std::vector<address_range> ranges; |
395 |
| - while(auto entry = read_map_entry(stream)) { |
396 |
| - const auto& range = entry.unwrap(); |
397 |
| - VERIFY(range.low <= range.high); |
398 |
| - if(!ranges.empty()) { |
399 |
| - const auto& last_range = ranges.back(); |
400 |
| - if(range.low < last_range.high) { |
401 |
| - return nullopt; |
402 |
| - } |
403 |
| - } |
404 |
| - ranges.push_back(range); |
405 |
| - } |
406 |
| - return ranges; |
407 |
| - } |
408 |
| - |
409 |
| - // we can allocate during try_load_mapped_region_info, in theory that could cause a tear |
410 |
| - optional<std::vector<address_range>> try_load_mapped_region_info_with_retries(int n) { |
411 |
| - VERIFY(n > 0); |
412 |
| - for(int i = 0; i < n; i++) { |
413 |
| - if(auto info = try_load_mapped_region_info()) { |
414 |
| - return info; |
415 |
| - } |
416 |
| - } |
417 |
| - throw internal_error("Couldn't successfully load /proc/self/maps after {} retries", n); |
418 |
| - } |
419 |
| - |
420 |
| - const std::vector<address_range>& load_mapped_region_info() { |
421 |
| - static std::vector<address_range> regions; |
422 |
| - static bool has_loaded = false; |
423 |
| - if(!has_loaded) { |
424 |
| - has_loaded = true; |
425 |
| - if(auto info = try_load_mapped_region_info_with_retries(2)) { |
426 |
| - regions = std::move(info).unwrap(); |
427 |
| - } |
428 |
| - } |
429 |
| - return regions; |
430 |
| - } |
431 |
| - |
432 |
| - int get_page_protections(void* page) { |
433 |
| - const auto& mapped_region_info = load_mapped_region_info(); |
434 |
| - auto it = first_less_than_or_equal( |
435 |
| - mapped_region_info.begin(), |
436 |
| - mapped_region_info.end(), |
437 |
| - reinterpret_cast<uintptr_t>(page), |
438 |
| - [](uintptr_t a, const address_range& b) { |
439 |
| - return a < b.low; |
440 |
| - } |
441 |
| - ); |
442 |
| - if(it == mapped_region_info.end()) { |
443 |
| - throw internal_error( |
444 |
| - "Failed to find mapping for {>16:0h} in /proc/self/maps", |
445 |
| - reinterpret_cast<uintptr_t>(page) |
446 |
| - ); |
447 |
| - } |
448 |
| - return it->perms; |
449 |
| - } |
450 |
| - #endif |
451 |
| - void mprotect_page(void* page, int page_size, int protections) { |
452 |
| - if(mprotect(page, page_size, protections) != 0) { |
453 |
| - throw internal_error("mprotect call failed: {}", strerror(errno)); |
454 |
| - } |
455 |
| - } |
456 |
| - int mprotect_page_and_return_old_protections(void* page, int page_size, int protections) { |
457 |
| - auto old_protections = get_page_protections(page); |
458 |
| - mprotect_page(page, page_size, protections); |
459 |
| - return old_protections; |
460 |
| - } |
461 |
| - void* allocate_page(int page_size) { |
462 |
| - auto page = mmap(nullptr, page_size, memory_readwrite, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); |
463 |
| - if(page == MAP_FAILED) { |
464 |
| - throw internal_error("mmap call failed: {}", strerror(errno)); |
465 |
| - } |
466 |
| - return page; |
467 |
| - } |
468 |
| - #endif |
469 | 216 | void perform_typeinfo_surgery(const std::type_info& info, bool(*do_catch_function)(const std::type_info*, const std::type_info*, void**, unsigned)) {
|
470 | 217 | if(vtable_size == 0) { // set to zero if we don't know what standard library we're working with
|
471 | 218 | return;
|
@@ -532,7 +279,7 @@ namespace detail {
|
532 | 279 | }
|
533 | 280 |
|
534 | 281 | // make our own copy of the vtable
|
535 |
| - memcpy(new_vtable_page, type_info_vtable_pointer, vtable_size * sizeof(void*)); |
| 282 | + std::memcpy(new_vtable_page, type_info_vtable_pointer, vtable_size * sizeof(void*)); |
536 | 283 | // ninja in the custom __do_catch interceptor
|
537 | 284 | auto new_vtable = static_cast<void**>(new_vtable_page);
|
538 | 285 | // double cast is done here because older (and some newer gcc versions) warned about it under -Wpedantic
|
|
0 commit comments