Issue 246: Dynamic_images.* needs to be 64-bit ready. Created types that are typedefed to the appropriate types depending on 32/64-bit compilation and modified dynamic_images to use these new types. Tested 32-bit minidump-generation. Also did some code cleanup along the way. Removed all blank lines that had spaces.
git-svn-id: http://google-breakpad.googlecode.com/svn/trunk@253 4c0a9323-5329-0410-9bdc-e9ce6186880e
This commit is contained in:
parent
fc816a3b3a
commit
867df1c652
5 changed files with 369 additions and 190 deletions
|
@ -33,6 +33,8 @@ extern "C" { // needed to compile on Leopard
|
|||
#include <stdio.h>
|
||||
}
|
||||
|
||||
#include <dlfcn.h>
|
||||
#include <mach/mach_vm.h>
|
||||
#include <algorithm>
|
||||
#include "client/mac/handler/dynamic_images.h"
|
||||
|
||||
|
@ -46,27 +48,30 @@ namespace google_breakpad {
|
|||
// first in order to handle cases when we're reading strings and they
|
||||
// straddle two vm regions.
|
||||
//
|
||||
static vm_size_t GetMemoryRegionSize(task_port_t target_task,
|
||||
static mach_vm_size_t GetMemoryRegionSize(task_port_t target_task,
|
||||
const void* address,
|
||||
vm_size_t *size_to_end) {
|
||||
vm_address_t region_base = (vm_address_t)address;
|
||||
vm_size_t region_size;
|
||||
mach_vm_size_t *size_to_end) {
|
||||
mach_vm_address_t region_base = (mach_vm_address_t)address;
|
||||
mach_vm_size_t region_size;
|
||||
natural_t nesting_level = 0;
|
||||
vm_region_submap_info submap_info;
|
||||
mach_msg_type_number_t info_count = VM_REGION_SUBMAP_INFO_COUNT;
|
||||
|
||||
// Get information about the vm region containing |address|
|
||||
vm_region_recurse_info_t region_info;
|
||||
region_info = reinterpret_cast<vm_region_recurse_info_t>(&submap_info);
|
||||
|
||||
kern_return_t result =
|
||||
vm_region_recurse(target_task,
|
||||
mach_vm_region_recurse(target_task,
|
||||
®ion_base,
|
||||
®ion_size,
|
||||
&nesting_level,
|
||||
reinterpret_cast<vm_region_recurse_info_t>(&submap_info),
|
||||
region_info,
|
||||
&info_count);
|
||||
|
||||
if (result == KERN_SUCCESS) {
|
||||
// Get distance from |address| to the end of this region
|
||||
*size_to_end = region_base + region_size -(vm_address_t)address;
|
||||
*size_to_end = region_base + region_size -(mach_vm_address_t)address;
|
||||
|
||||
// If we want to handle strings as long as 4096 characters we may need
|
||||
// to check if there's a vm region immediately following the first one.
|
||||
|
@ -74,18 +79,17 @@ static vm_size_t GetMemoryRegionSize(task_port_t target_task,
|
|||
// of the second region.
|
||||
if (*size_to_end < 4096) {
|
||||
// Second region starts where the first one ends
|
||||
vm_address_t region_base2 =
|
||||
(vm_address_t)(region_base + region_size);
|
||||
vm_size_t region_size2;
|
||||
mach_vm_address_t region_base2 =
|
||||
(mach_vm_address_t)(region_base + region_size);
|
||||
mach_vm_size_t region_size2;
|
||||
|
||||
// Get information about the following vm region
|
||||
result =
|
||||
vm_region_recurse(
|
||||
target_task,
|
||||
mach_vm_region_recurse(target_task,
|
||||
®ion_base2,
|
||||
®ion_size2,
|
||||
&nesting_level,
|
||||
reinterpret_cast<vm_region_recurse_info_t>(&submap_info),
|
||||
region_info,
|
||||
&info_count);
|
||||
|
||||
// Extend region_size to go all the way to the end of the 2nd region
|
||||
|
@ -95,7 +99,7 @@ static vm_size_t GetMemoryRegionSize(task_port_t target_task,
|
|||
}
|
||||
}
|
||||
|
||||
*size_to_end = region_base + region_size -(vm_address_t)address;
|
||||
*size_to_end = region_base + region_size -(mach_vm_address_t)address;
|
||||
} else {
|
||||
region_size = 0;
|
||||
*size_to_end = 0;
|
||||
|
@ -116,11 +120,11 @@ static void* ReadTaskString(task_port_t target_task,
|
|||
// the string is. And we don't know how long the string is, until we've read
|
||||
// the memory! So, we'll try to read kMaxStringLength bytes
|
||||
// (or as many bytes as we can until we reach the end of the vm region).
|
||||
vm_size_t size_to_end;
|
||||
mach_vm_size_t size_to_end;
|
||||
GetMemoryRegionSize(target_task, address, &size_to_end);
|
||||
|
||||
if (size_to_end > 0) {
|
||||
vm_size_t size_to_read =
|
||||
mach_vm_size_t size_to_read =
|
||||
size_to_end > kMaxStringLength ? kMaxStringLength : size_to_end;
|
||||
|
||||
return ReadTaskMemory(target_task, address, size_to_read);
|
||||
|
@ -136,14 +140,21 @@ void* ReadTaskMemory(task_port_t target_task,
|
|||
const void* address,
|
||||
size_t length) {
|
||||
void* result = NULL;
|
||||
vm_address_t page_address = reinterpret_cast<vm_address_t>(address) & (-4096);
|
||||
vm_address_t last_page_address =
|
||||
(reinterpret_cast<vm_address_t>(address) + length + 4095) & (-4096);
|
||||
vm_size_t page_size = last_page_address - page_address;
|
||||
int systemPageSize = getpagesize();
|
||||
|
||||
// use the negative of the page size for the mask to find the page address
|
||||
mach_vm_address_t page_address =
|
||||
reinterpret_cast<mach_vm_address_t>(address) && (-systemPageSize);
|
||||
|
||||
mach_vm_address_t last_page_address =
|
||||
(reinterpret_cast<mach_vm_address_t>(address) + length +
|
||||
(systemPageSize - 1)) & (-systemPageSize);
|
||||
|
||||
mach_vm_size_t page_size = last_page_address - page_address;
|
||||
uint8_t* local_start;
|
||||
uint32_t local_length;
|
||||
|
||||
kern_return_t r = vm_read(target_task,
|
||||
kern_return_t r = mach_vm_read(target_task,
|
||||
page_address,
|
||||
page_size,
|
||||
reinterpret_cast<vm_offset_t*>(&local_start),
|
||||
|
@ -152,9 +163,11 @@ void* ReadTaskMemory(task_port_t target_task,
|
|||
if (r == KERN_SUCCESS) {
|
||||
result = malloc(length);
|
||||
if (result != NULL) {
|
||||
memcpy(result, &local_start[(uint32_t)address - page_address], length);
|
||||
memcpy(result,
|
||||
&local_start[(mach_vm_address_t)address - page_address],
|
||||
length);
|
||||
}
|
||||
vm_deallocate(mach_task_self(), (uintptr_t)local_start, local_length);
|
||||
mach_vm_deallocate(mach_task_self(), (uintptr_t)local_start, local_length);
|
||||
}
|
||||
|
||||
return result;
|
||||
|
@ -165,15 +178,31 @@ void* ReadTaskMemory(task_port_t target_task,
|
|||
//==============================================================================
|
||||
// Initializes vmaddr_, vmsize_, and slide_
|
||||
void DynamicImage::CalculateMemoryInfo() {
|
||||
mach_header *header = GetMachHeader();
|
||||
breakpad_mach_header *header = GetMachHeader();
|
||||
|
||||
// unless we can process the header, ensure that calls to
|
||||
// IsValid() will return false
|
||||
vmaddr_ = 0;
|
||||
vmsize_ = 0;
|
||||
slide_ = 0;
|
||||
|
||||
#if __LP64__
|
||||
if(header->magic != MH_MAGIC_64) {
|
||||
return;
|
||||
}
|
||||
#else
|
||||
if(header->magic != MH_MAGIC) {
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
const struct load_command *cmd =
|
||||
reinterpret_cast<const struct load_command *>(header + 1);
|
||||
|
||||
for (unsigned int i = 0; cmd && (i < header->ncmds); ++i) {
|
||||
if (cmd->cmd == LC_SEGMENT) {
|
||||
const struct segment_command *seg =
|
||||
reinterpret_cast<const struct segment_command *>(cmd);
|
||||
const breakpad_mach_segment_command *seg =
|
||||
reinterpret_cast<const breakpad_mach_segment_command *>(cmd);
|
||||
|
||||
if (!strcmp(seg->segname, "__TEXT")) {
|
||||
vmaddr_ = seg->vmaddr;
|
||||
|
@ -191,10 +220,6 @@ void DynamicImage::CalculateMemoryInfo() {
|
|||
(reinterpret_cast<const char *>(cmd) + cmd->cmdsize);
|
||||
}
|
||||
|
||||
// we failed - a call to IsValid() will return false
|
||||
vmaddr_ = 0;
|
||||
vmsize_ = 0;
|
||||
slide_ = 0;
|
||||
}
|
||||
|
||||
void DynamicImage::Print() {
|
||||
|
@ -203,11 +228,11 @@ void DynamicImage::Print() {
|
|||
path = "(unknown)";
|
||||
}
|
||||
printf("%p: %s\n", GetLoadAddress(), path);
|
||||
mach_header *header = GetMachHeader();
|
||||
breakpad_mach_header *header = GetMachHeader();
|
||||
MachHeader(*header).Print();
|
||||
printf("vmaddr\t\t: %p\n", reinterpret_cast<void*>(GetVMAddr()));
|
||||
printf("vmsize\t\t: %d\n", GetVMSize());
|
||||
printf("slide\t\t: %d\n", GetVMAddrSlide());
|
||||
printf("vmsize\t\t: %llu\n", GetVMSize());
|
||||
printf("slide\t\t: %td\n", GetVMAddrSlide());
|
||||
}
|
||||
|
||||
#pragma mark -
|
||||
|
@ -261,8 +286,10 @@ void DynamicImages::ReadImageInfoForTask() {
|
|||
dyld_image_info &info = infoArray[i];
|
||||
|
||||
// First read just the mach_header from the image in the task.
|
||||
mach_header *header = reinterpret_cast<mach_header*>
|
||||
(ReadTaskMemory(task_, info.load_address_, sizeof(mach_header)));
|
||||
breakpad_mach_header *header = reinterpret_cast<breakpad_mach_header*>
|
||||
(ReadTaskMemory(task_,
|
||||
info.load_address_,
|
||||
sizeof(breakpad_mach_header)));
|
||||
|
||||
if (!header)
|
||||
break; // bail on this dynamic image
|
||||
|
@ -270,10 +297,12 @@ void DynamicImages::ReadImageInfoForTask() {
|
|||
// Now determine the total amount we really want to read based on the
|
||||
// size of the load commands. We need the header plus all of the
|
||||
// load commands.
|
||||
unsigned int header_size = sizeof(mach_header) + header->sizeofcmds;
|
||||
unsigned int header_size =
|
||||
sizeof(breakpad_mach_header) + header->sizeofcmds;
|
||||
|
||||
free(header);
|
||||
|
||||
header = reinterpret_cast<mach_header*>
|
||||
header = reinterpret_cast<breakpad_mach_header*>
|
||||
(ReadTaskMemory(task_, info.load_address_, header_size));
|
||||
|
||||
// Read the file name from the task's memory space.
|
||||
|
@ -287,9 +316,10 @@ void DynamicImages::ReadImageInfoForTask() {
|
|||
}
|
||||
|
||||
// Create an object representing this image and add it to our list.
|
||||
DynamicImage *new_image = new DynamicImage(header,
|
||||
DynamicImage *new_image;
|
||||
new_image = new DynamicImage(header,
|
||||
header_size,
|
||||
info.load_address_,
|
||||
(breakpad_mach_header*)info.load_address_,
|
||||
file_path,
|
||||
info.file_mod_date_,
|
||||
task_);
|
||||
|
|
|
@ -68,13 +68,23 @@ typedef struct dyld_all_image_infos {
|
|||
bool processDetachedFromSharedRegion;
|
||||
} dyld_all_image_infos;
|
||||
|
||||
// some typedefs to isolate 64/32 bit differences
|
||||
#ifdef __LP64__
|
||||
typedef mach_header_64 breakpad_mach_header;
|
||||
typedef segment_command_64 breakpad_mach_segment_command;
|
||||
#else
|
||||
typedef mach_header breakpad_mach_header;
|
||||
typedef segment_command breakpad_mach_segment_command;
|
||||
#endif
|
||||
|
||||
|
||||
//==============================================================================
|
||||
// A simple wrapper for a mach_header
|
||||
//
|
||||
// This could be fleshed out with some more interesting methods.
|
||||
class MachHeader {
|
||||
public:
|
||||
explicit MachHeader(const mach_header &header) : header_(header) {}
|
||||
explicit MachHeader(const breakpad_mach_header &header) : header_(header) {}
|
||||
|
||||
void Print() {
|
||||
printf("magic\t\t: %4x\n", header_.magic);
|
||||
|
@ -86,16 +96,16 @@ class MachHeader {
|
|||
printf("flags\t\t: %d\n", header_.flags);
|
||||
}
|
||||
|
||||
mach_header header_;
|
||||
breakpad_mach_header header_;
|
||||
};
|
||||
|
||||
//==============================================================================
|
||||
// Represents a single dynamically loaded mach-o image
|
||||
class DynamicImage {
|
||||
public:
|
||||
DynamicImage(mach_header *header, // we take ownership
|
||||
DynamicImage(breakpad_mach_header *header, // we take ownership
|
||||
int header_size, // includes load commands
|
||||
mach_header *load_address,
|
||||
breakpad_mach_header *load_address,
|
||||
char *inFilePath,
|
||||
uintptr_t image_mod_date,
|
||||
mach_port_t task)
|
||||
|
@ -116,7 +126,7 @@ class DynamicImage {
|
|||
}
|
||||
|
||||
// Returns pointer to a local copy of the mach_header plus load commands
|
||||
mach_header *GetMachHeader() {return header_;}
|
||||
breakpad_mach_header *GetMachHeader() {return header_;}
|
||||
|
||||
// Size of mach_header plus load commands
|
||||
int GetHeaderSize() const {return header_size_;}
|
||||
|
@ -127,16 +137,16 @@ class DynamicImage {
|
|||
uintptr_t GetModDate() const {return file_mod_date_;}
|
||||
|
||||
// Actual address where the image was loaded
|
||||
mach_header *GetLoadAddress() const {return load_address_;}
|
||||
breakpad_mach_header *GetLoadAddress() const {return load_address_;}
|
||||
|
||||
// Address where the image should be loaded
|
||||
uint32_t GetVMAddr() const {return vmaddr_;}
|
||||
mach_vm_address_t GetVMAddr() const {return vmaddr_;}
|
||||
|
||||
// Difference between GetLoadAddress() and GetVMAddr()
|
||||
ptrdiff_t GetVMAddrSlide() const {return slide_;}
|
||||
|
||||
// Size of the image
|
||||
uint32_t GetVMSize() const {return vmsize_;}
|
||||
mach_vm_size_t GetVMSize() const {return vmsize_;}
|
||||
|
||||
// Task owning this loaded image
|
||||
mach_port_t GetTask() {return task_;}
|
||||
|
@ -183,17 +193,19 @@ class DynamicImage {
|
|||
InitializeFilePath(inInfo.GetFilePath());
|
||||
|
||||
// copy mach_header and load commands
|
||||
header_ = reinterpret_cast<mach_header*>(malloc(inInfo.header_size_));
|
||||
void *headerBuffer = malloc(inInfo.header_size_);
|
||||
header_ = reinterpret_cast<breakpad_mach_header*>(headerBuffer);
|
||||
|
||||
memcpy(header_, inInfo.header_, inInfo.header_size_);
|
||||
header_size_ = inInfo.header_size_;
|
||||
}
|
||||
#endif
|
||||
|
||||
mach_header *header_; // our local copy of the header
|
||||
breakpad_mach_header *header_; // our local copy of the header
|
||||
int header_size_; // mach_header plus load commands
|
||||
mach_header *load_address_; // base address image is mapped into
|
||||
uint32_t vmaddr_;
|
||||
uint32_t vmsize_;
|
||||
breakpad_mach_header *load_address_; // base address image is mapped into
|
||||
mach_vm_address_t vmaddr_;
|
||||
mach_vm_size_t vmsize_;
|
||||
ptrdiff_t slide_;
|
||||
|
||||
char *file_path_; // path dyld used to load the image
|
||||
|
@ -211,7 +223,8 @@ class DynamicImage {
|
|||
class DynamicImageRef {
|
||||
public:
|
||||
explicit DynamicImageRef(DynamicImage *inP) : p(inP) {}
|
||||
DynamicImageRef(const DynamicImageRef &inRef) : p(inRef.p) {} // STL required
|
||||
// The copy constructor is required by STL
|
||||
DynamicImageRef(const DynamicImageRef &inRef) : p(inRef.p) {}
|
||||
|
||||
bool operator<(const DynamicImageRef &inRef) const {
|
||||
return (*const_cast<DynamicImageRef*>(this)->p)
|
||||
|
@ -266,10 +279,14 @@ class DynamicImages {
|
|||
}
|
||||
|
||||
void TestPrint() {
|
||||
const breakpad_mach_header *header;
|
||||
for (int i = 0; i < (int)image_list_.size(); ++i) {
|
||||
printf("dyld: %p: name = %s\n", _dyld_get_image_header(i),
|
||||
_dyld_get_image_name(i) );
|
||||
const mach_header *header = _dyld_get_image_header(i);
|
||||
|
||||
const void *imageHeader = _dyld_get_image_header(i);
|
||||
header = reinterpret_cast<const breakpad_mach_header*>(imageHeader);
|
||||
|
||||
MachHeader(*header).Print();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include <cstdio>
|
||||
|
||||
#include <mach/host_info.h>
|
||||
#include <mach/mach_vm.h>
|
||||
#include <mach/vm_statistics.h>
|
||||
#include <mach-o/dyld.h>
|
||||
#include <mach-o/loader.h>
|
||||
|
@ -59,8 +60,10 @@ MinidumpGenerator::MinidumpGenerator()
|
|||
GatherSystemInformation();
|
||||
}
|
||||
|
||||
// constructor when generating from a different process than the crashed process
|
||||
MinidumpGenerator::MinidumpGenerator(mach_port_t crashing_task, mach_port_t handler_thread)
|
||||
// constructor when generating from a different process than the
|
||||
// crashed process
|
||||
MinidumpGenerator::MinidumpGenerator(mach_port_t crashing_task,
|
||||
mach_port_t handler_thread)
|
||||
: exception_type_(0),
|
||||
exception_code_(0),
|
||||
exception_thread_(0),
|
||||
|
@ -94,7 +97,10 @@ void MinidumpGenerator::GatherSystemInformation() {
|
|||
CFStringRef vers_path =
|
||||
CFSTR("/System/Library/CoreServices/SystemVersion.plist");
|
||||
CFURLRef sys_vers =
|
||||
CFURLCreateWithFileSystemPath(NULL, vers_path, kCFURLPOSIXPathStyle, false);
|
||||
CFURLCreateWithFileSystemPath(NULL,
|
||||
vers_path,
|
||||
kCFURLPOSIXPathStyle,
|
||||
false);
|
||||
CFDataRef data;
|
||||
SInt32 error;
|
||||
CFURLCreateDataAndPropertiesFromResource(NULL, sys_vers, &data, NULL, NULL,
|
||||
|
@ -207,26 +213,36 @@ bool MinidumpGenerator::Write(const char *path) {
|
|||
return result;
|
||||
}
|
||||
|
||||
size_t MinidumpGenerator::CalculateStackSize(vm_address_t start_addr) {
|
||||
vm_address_t stack_region_base = start_addr;
|
||||
vm_size_t stack_region_size;
|
||||
size_t MinidumpGenerator::CalculateStackSize(mach_vm_address_t start_addr) {
|
||||
mach_vm_address_t stack_region_base = start_addr;
|
||||
mach_vm_size_t stack_region_size;
|
||||
natural_t nesting_level = 0;
|
||||
vm_region_submap_info submap_info;
|
||||
mach_msg_type_number_t info_count = VM_REGION_SUBMAP_INFO_COUNT;
|
||||
|
||||
vm_region_recurse_info_t region_info;
|
||||
region_info = reinterpret_cast<vm_region_recurse_info_t>(&submap_info);
|
||||
|
||||
kern_return_t result =
|
||||
vm_region_recurse(crashing_task_, &stack_region_base, &stack_region_size,
|
||||
&nesting_level,
|
||||
reinterpret_cast<vm_region_recurse_info_t>(&submap_info),
|
||||
mach_vm_region_recurse(crashing_task_, &stack_region_base,
|
||||
&stack_region_size, &nesting_level,
|
||||
region_info,
|
||||
&info_count);
|
||||
|
||||
if ((stack_region_base + stack_region_size) == 0xbffff000) {
|
||||
// The stack for thread 0 needs to extend all the way to 0xc0000000
|
||||
// For many processes the stack is first created in one page
|
||||
// from 0xbffff000 - 0xc0000000 and is then later extended to
|
||||
// a much larger size by creating a new VM region immediately below
|
||||
// the initial page
|
||||
if ((stack_region_base + stack_region_size) == TOP_OF_THREAD0_STACK) {
|
||||
// The stack for thread 0 needs to extend all the way to
|
||||
// 0xc0000000 on 32 bit and 00007fff5fc00000 on 64bit. HOWEVER,
|
||||
// for many processes, the stack is first created in one page
|
||||
// below this, and is then later extended to a much larger size by
|
||||
// creating a new VM region immediately below the initial page.
|
||||
|
||||
// include the original stack frame page (0xbffff000 - 0xc0000000)
|
||||
// You can see this for yourself by running vmmap on a "hello,
|
||||
// world" program
|
||||
|
||||
// Because of the above, we'll add 4k to include the original
|
||||
// stack frame page.
|
||||
// This method of finding the stack region needs to be done in
|
||||
// a better way; the breakpad issue 247 is tracking this.
|
||||
stack_region_size += 0x1000;
|
||||
}
|
||||
|
||||
|
@ -235,7 +251,7 @@ size_t MinidumpGenerator::CalculateStackSize(vm_address_t start_addr) {
|
|||
}
|
||||
|
||||
bool MinidumpGenerator::WriteStackFromStartAddress(
|
||||
vm_address_t start_addr,
|
||||
mach_vm_address_t start_addr,
|
||||
MDMemoryDescriptor *stack_location) {
|
||||
UntypedMDRVA memory(&writer_);
|
||||
size_t size = CalculateStackSize(start_addr);
|
||||
|
@ -250,7 +266,11 @@ bool MinidumpGenerator::WriteStackFromStartAddress(
|
|||
|
||||
bool result;
|
||||
if (dynamic_images_) {
|
||||
void *stack_memory = ReadTaskMemory(crashing_task_, (void*)start_addr, size);
|
||||
|
||||
void *stack_memory = ReadTaskMemory(crashing_task_,
|
||||
(void*)start_addr,
|
||||
size);
|
||||
|
||||
result = memory.Copy(stack_memory, size);
|
||||
free(stack_memory);
|
||||
} else {
|
||||
|
@ -263,42 +283,56 @@ bool MinidumpGenerator::WriteStackFromStartAddress(
|
|||
return result;
|
||||
}
|
||||
|
||||
#if TARGET_CPU_PPC
|
||||
#if TARGET_CPU_PPC || TARGET_CPU_PPC64
|
||||
bool MinidumpGenerator::WriteStack(breakpad_thread_state_data_t state,
|
||||
MDMemoryDescriptor *stack_location) {
|
||||
ppc_thread_state_t *machine_state =
|
||||
reinterpret_cast<ppc_thread_state_t *>(state);
|
||||
vm_address_t start_addr = machine_state->r1;
|
||||
breakpad_thread_state_t *machine_state =
|
||||
reinterpret_cast<breakpad_thread_state_t *>(state);
|
||||
#if TARGET_CPU_PPC
|
||||
mach_vm_address_t start_addr = machine_state->r1;
|
||||
#else
|
||||
mach_vm_address_t start_addr = machine_state->__r1;
|
||||
#endif
|
||||
return WriteStackFromStartAddress(start_addr, stack_location);
|
||||
}
|
||||
|
||||
u_int64_t MinidumpGenerator::CurrentPCForStack(breakpad_thread_state_data_t state) {
|
||||
ppc_thread_state_t *machine_state =
|
||||
reinterpret_cast<ppc_thread_state_t *>(state);
|
||||
u_int64_t
|
||||
MinidumpGenerator::CurrentPCForStack(breakpad_thread_state_data_t state) {
|
||||
breakpad_thread_state_t *machine_state =
|
||||
reinterpret_cast<breakpad_thread_state_t *>(state);
|
||||
|
||||
#if TARGET_CPU_PPC
|
||||
return machine_state->srr0;
|
||||
#else
|
||||
return machine_state->__srr0;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool MinidumpGenerator::WriteContext(breakpad_thread_state_data_t state,
|
||||
MDLocationDescriptor *register_location) {
|
||||
TypedMDRVA<MDRawContextPPC> context(&writer_);
|
||||
ppc_thread_state_t *machine_state =
|
||||
reinterpret_cast<ppc_thread_state_t *>(state);
|
||||
TypedMDRVA<MinidumpContext> context(&writer_);
|
||||
breakpad_thread_state_t *machine_state =
|
||||
reinterpret_cast<breakpad_thread_state_t *>(state);
|
||||
|
||||
if (!context.Allocate())
|
||||
return false;
|
||||
|
||||
*register_location = context.location();
|
||||
MDRawContextPPC *context_ptr = context.get();
|
||||
MinidumpContext *context_ptr = context.get();
|
||||
context_ptr->context_flags = MD_CONTEXT_PPC_BASE;
|
||||
|
||||
#if TARGET_CPU_PPC64
|
||||
#define AddReg(a) context_ptr->a = machine_state->__ ## a
|
||||
#define AddGPR(a) context_ptr->gpr[a] = machine_state->__r ## a
|
||||
#else
|
||||
#define AddReg(a) context_ptr->a = machine_state->a
|
||||
#define AddGPR(a) context_ptr->gpr[a] = machine_state->r ## a
|
||||
#endif
|
||||
|
||||
AddReg(srr0);
|
||||
AddReg(cr);
|
||||
AddReg(xer);
|
||||
AddReg(ctr);
|
||||
AddReg(mq);
|
||||
AddReg(lr);
|
||||
AddReg(vrsave);
|
||||
|
||||
|
@ -334,38 +368,68 @@ bool MinidumpGenerator::WriteContext(breakpad_thread_state_data_t state,
|
|||
AddGPR(29);
|
||||
AddGPR(30);
|
||||
AddGPR(31);
|
||||
|
||||
#if TARGET_CPU_PPC
|
||||
/* The mq register is only for PPC */
|
||||
AddReg(mq);
|
||||
#endif
|
||||
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#elif TARGET_CPU_X86
|
||||
#elif TARGET_CPU_X86 || TARGET_CPU_X86_64
|
||||
|
||||
bool MinidumpGenerator::WriteStack(breakpad_thread_state_data_t state,
|
||||
MDMemoryDescriptor *stack_location) {
|
||||
i386_thread_state_t *machine_state =
|
||||
reinterpret_cast<i386_thread_state_t *>(state);
|
||||
vm_address_t start_addr = machine_state->esp;
|
||||
breakpad_thread_state_t *machine_state =
|
||||
reinterpret_cast<breakpad_thread_state_t *>(state);
|
||||
|
||||
#if TARGET_CPU_X86_64
|
||||
mach_vm_address_t start_addr = machine_state->__rsp;
|
||||
#else
|
||||
mach_vm_address_t start_addr = machine_state->esp;
|
||||
#endif
|
||||
return WriteStackFromStartAddress(start_addr, stack_location);
|
||||
}
|
||||
|
||||
u_int64_t MinidumpGenerator::CurrentPCForStack(breakpad_thread_state_data_t state) {
|
||||
i386_thread_state_t *machine_state =
|
||||
reinterpret_cast<i386_thread_state_t *>(state);
|
||||
u_int64_t
|
||||
MinidumpGenerator::CurrentPCForStack(breakpad_thread_state_data_t state) {
|
||||
breakpad_thread_state_t *machine_state =
|
||||
reinterpret_cast<breakpad_thread_state_t *>(state);
|
||||
|
||||
#if TARGET_CPU_X86_64
|
||||
return machine_state->__rip;
|
||||
#else
|
||||
return machine_state->eip;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool MinidumpGenerator::WriteContext(breakpad_thread_state_data_t state,
|
||||
MDLocationDescriptor *register_location) {
|
||||
TypedMDRVA<MDRawContextX86> context(&writer_);
|
||||
i386_thread_state_t *machine_state =
|
||||
reinterpret_cast<i386_thread_state_t *>(state);
|
||||
TypedMDRVA<MinidumpContext> context(&writer_);
|
||||
breakpad_thread_state_t *machine_state =
|
||||
reinterpret_cast<breakpad_thread_state_t *>(state);
|
||||
|
||||
if (!context.Allocate())
|
||||
return false;
|
||||
|
||||
*register_location = context.location();
|
||||
MDRawContextX86 *context_ptr = context.get();
|
||||
MinidumpContext *context_ptr = context.get();
|
||||
|
||||
#if TARGET_CPU_X86
|
||||
context_ptr->context_flags = MD_CONTEXT_X86;
|
||||
|
||||
#define AddReg(a) context_ptr->a = machine_state->a
|
||||
AddReg(eax);
|
||||
AddReg(ebx);
|
||||
AddReg(ecx);
|
||||
AddReg(edx);
|
||||
AddReg(esi);
|
||||
AddReg(edi);
|
||||
AddReg(ebp);
|
||||
AddReg(esp);
|
||||
|
||||
AddReg(cs);
|
||||
AddReg(ds);
|
||||
AddReg(ss);
|
||||
|
@ -375,16 +439,40 @@ bool MinidumpGenerator::WriteContext(breakpad_thread_state_data_t state,
|
|||
AddReg(eflags);
|
||||
|
||||
AddReg(eip);
|
||||
AddReg(eax);
|
||||
AddReg(ebx);
|
||||
AddReg(ecx);
|
||||
AddReg(edx);
|
||||
AddReg(esi);
|
||||
AddReg(edi);
|
||||
AddReg(ebp);
|
||||
AddReg(esp);
|
||||
#else
|
||||
|
||||
#define AddReg(a) context_ptr->a = machine_state->__ ## a
|
||||
context_ptr->context_flags = MD_CONTEXT_AMD64;
|
||||
AddReg(rax);
|
||||
AddReg(rbx);
|
||||
AddReg(rcx);
|
||||
AddReg(rdx);
|
||||
AddReg(rdi);
|
||||
AddReg(rsi);
|
||||
AddReg(rbp);
|
||||
AddReg(rsp);
|
||||
AddReg(r8);
|
||||
AddReg(r9);
|
||||
AddReg(r10);
|
||||
AddReg(r11);
|
||||
AddReg(r12);
|
||||
AddReg(r13);
|
||||
AddReg(r14);
|
||||
AddReg(r15);
|
||||
AddReg(rip);
|
||||
// according to AMD's software developer guide, bits above 18 are
|
||||
// not used in the flags register. Since the minidump format
|
||||
// specifies 32 bits for the flags register, we can truncate safely
|
||||
// with no loss.
|
||||
context_ptr->eflags = machine_state->__rflags;
|
||||
AddReg(cs);
|
||||
AddReg(fs);
|
||||
AddReg(gs);
|
||||
#endif
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
bool MinidumpGenerator::WriteThreadStream(mach_port_t thread_id,
|
||||
|
@ -447,7 +535,8 @@ bool MinidumpGenerator::WriteThreadListStream(
|
|||
return true;
|
||||
}
|
||||
|
||||
bool MinidumpGenerator::WriteExceptionStream(MDRawDirectory *exception_stream) {
|
||||
bool
|
||||
MinidumpGenerator::WriteExceptionStream(MDRawDirectory *exception_stream) {
|
||||
TypedMDRVA<MDRawExceptionStream> exception(&writer_);
|
||||
|
||||
if (!exception.Allocate())
|
||||
|
@ -466,7 +555,9 @@ bool MinidumpGenerator::WriteExceptionStream(MDRawDirectory *exception_stream) {
|
|||
breakpad_thread_state_data_t state;
|
||||
mach_msg_type_number_t stateCount = sizeof(state);
|
||||
|
||||
if (thread_get_state(exception_thread_, BREAKPAD_MACHINE_THREAD_STATE, state,
|
||||
if (thread_get_state(exception_thread_,
|
||||
BREAKPAD_MACHINE_THREAD_STATE,
|
||||
state,
|
||||
&stateCount) != KERN_SUCCESS)
|
||||
return false;
|
||||
|
||||
|
@ -564,7 +655,7 @@ bool MinidumpGenerator::WriteModuleStream(unsigned int index,
|
|||
if (!image)
|
||||
return false;
|
||||
|
||||
const mach_header *header = image->GetMachHeader();
|
||||
const breakpad_mach_header *header = image->GetMachHeader();
|
||||
|
||||
if (!header)
|
||||
return false;
|
||||
|
@ -588,11 +679,24 @@ bool MinidumpGenerator::WriteModuleStream(unsigned int index,
|
|||
}
|
||||
} else {
|
||||
// we're getting module info in the crashed process
|
||||
const struct mach_header *header = _dyld_get_image_header(index);
|
||||
|
||||
const breakpad_mach_header *header;
|
||||
header = (breakpad_mach_header*)_dyld_get_image_header(index);
|
||||
if (!header)
|
||||
return false;
|
||||
|
||||
#ifdef __LP64__
|
||||
assert(header->magic == MH_MAGIC_64);
|
||||
|
||||
if(header->magic != MH_MAGIC_64)
|
||||
return false;
|
||||
#else
|
||||
assert(header->magic == MH_MAGIC);
|
||||
|
||||
if(header->magic != MH_MAGIC)
|
||||
return false;
|
||||
#endif
|
||||
|
||||
int cpu_type = header->cputype;
|
||||
unsigned long slide = _dyld_get_image_vmaddr_slide(index);
|
||||
const char* name = _dyld_get_image_name(index);
|
||||
|
@ -603,8 +707,10 @@ bool MinidumpGenerator::WriteModuleStream(unsigned int index,
|
|||
|
||||
for (unsigned int i = 0; cmd && (i < header->ncmds); i++) {
|
||||
if (cmd->cmd == LC_SEGMENT) {
|
||||
const struct segment_command *seg =
|
||||
reinterpret_cast<const struct segment_command *>(cmd);
|
||||
|
||||
const breakpad_mach_segment_command *seg =
|
||||
reinterpret_cast<const breakpad_mach_segment_command *>(cmd);
|
||||
|
||||
if (!strcmp(seg->segname, "__TEXT")) {
|
||||
MDLocationDescriptor string_location;
|
||||
|
||||
|
@ -622,7 +728,7 @@ bool MinidumpGenerator::WriteModuleStream(unsigned int index,
|
|||
}
|
||||
}
|
||||
|
||||
cmd = reinterpret_cast<struct load_command *>((char *)cmd + cmd->cmdsize);
|
||||
cmd = reinterpret_cast<struct load_command*>((char *)cmd + cmd->cmdsize);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -705,9 +811,6 @@ bool MinidumpGenerator::WriteModuleListStream(
|
|||
MDRawDirectory *module_list_stream) {
|
||||
TypedMDRVA<MDRawModuleList> list(&writer_);
|
||||
|
||||
if (!_dyld_present())
|
||||
return false;
|
||||
|
||||
int image_count = dynamic_images_ ?
|
||||
dynamic_images_->GetImageCount() : _dyld_image_count();
|
||||
|
||||
|
@ -770,12 +873,15 @@ bool MinidumpGenerator::WriteMiscInfoStream(MDRawDirectory *misc_info_stream) {
|
|||
int mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, info_ptr->process_id };
|
||||
size_t size;
|
||||
if (!sysctl(mib, sizeof(mib) / sizeof(mib[0]), NULL, &size, NULL, 0)) {
|
||||
vm_address_t addr;
|
||||
if (vm_allocate(mach_task_self(), &addr, size, true) == KERN_SUCCESS) {
|
||||
mach_vm_address_t addr;
|
||||
if (mach_vm_allocate(mach_task_self(),
|
||||
&addr,
|
||||
size,
|
||||
true) == KERN_SUCCESS) {
|
||||
struct kinfo_proc *proc = (struct kinfo_proc *)addr;
|
||||
if (!sysctl(mib, sizeof(mib) / sizeof(mib[0]), proc, &size, NULL, 0))
|
||||
info_ptr->process_create_time = proc->kp_proc.p_starttime.tv_sec;
|
||||
vm_deallocate(mach_task_self(), addr, size);
|
||||
mach_vm_deallocate(mach_task_self(), addr, size);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -46,6 +46,26 @@ namespace google_breakpad {
|
|||
|
||||
using std::string;
|
||||
|
||||
#if TARGET_CPU_X86_64 || TARGET_CPU_PPC64
|
||||
#define TOP_OF_THREAD0_STACK 0x00007fff5fbff000
|
||||
#else
|
||||
#define TOP_OF_THREAD0_STACK 0xbffff000
|
||||
#endif
|
||||
|
||||
#if TARGET_CPU_X86_64
|
||||
typedef x86_thread_state64_t breakpad_thread_state_t;
|
||||
typedef MDRawContextAMD64 MinidumpContext;
|
||||
#elif TARGET_CPU_X86
|
||||
typedef i386_thread_state_t breakpad_thread_state_t;
|
||||
typedef MDRawContextX86 MinidumpContext;
|
||||
#elif TARGET_CPU_PPC64
|
||||
typedef ppc_thread_state64_t breakpad_thread_state_t;
|
||||
typedef MDRawContextPPC64 MinidumpContext;
|
||||
#elif TARGET_CPU_PPC
|
||||
typedef ppc_thread_state_t breakpad_thread_state_t;
|
||||
typedef MDRawContextPPC MinidumpContext;
|
||||
#endif
|
||||
|
||||
// Creates a minidump file of the current process. If there is exception data,
|
||||
// use SetExceptionInformation() to add this to the minidump. The minidump
|
||||
// file is generated by the Write() function.
|
||||
|
@ -93,7 +113,7 @@ class MinidumpGenerator {
|
|||
|
||||
// Helpers
|
||||
u_int64_t CurrentPCForStack(breakpad_thread_state_data_t state);
|
||||
bool WriteStackFromStartAddress(vm_address_t start_addr,
|
||||
bool WriteStackFromStartAddress(mach_vm_address_t start_addr,
|
||||
MDMemoryDescriptor *stack_location);
|
||||
bool WriteStack(breakpad_thread_state_data_t state,
|
||||
MDMemoryDescriptor *stack_location);
|
||||
|
@ -103,7 +123,9 @@ class MinidumpGenerator {
|
|||
bool WriteCVRecord(MDRawModule *module, int cpu_type,
|
||||
const char *module_path);
|
||||
bool WriteModuleStream(unsigned int index, MDRawModule *module);
|
||||
size_t CalculateStackSize(vm_address_t start_addr);
|
||||
|
||||
size_t CalculateStackSize(mach_vm_address_t start_addr);
|
||||
|
||||
int FindExecutableModule();
|
||||
|
||||
// disallow copy ctor and operator=
|
||||
|
|
|
@ -48,9 +48,13 @@ static void *Reporter(void *) {
|
|||
struct passwd *user = getpwuid(getuid());
|
||||
|
||||
// Write it to the desktop
|
||||
snprintf(buffer, sizeof(buffer), "/Users/%s/Desktop/test.dmp", user->pw_name);
|
||||
fprintf(stdout, "Writing %s\n", buffer);
|
||||
snprintf(buffer,
|
||||
sizeof(buffer),
|
||||
"/Users/%s/Desktop/test.dmp",
|
||||
user->pw_name);
|
||||
|
||||
fprintf(stdout, "Writing %s\n", buffer);
|
||||
unlink(buffer);
|
||||
md.Write(buffer);
|
||||
doneWritingReport = true;
|
||||
|
||||
|
|
Loading…
Reference in a new issue