Vault  4.1
vmemorytracker.cpp
Go to the documentation of this file.
00001 /*
00002 Copyright c1997-2014 Trygve Isaacson. All rights reserved.
00003 This file is part of the Code Vault version 4.1
00004 http://www.bombaydigital.com/
00005 License: MIT. See LICENSE.md in the Vault top level directory.
00006 */
00007 
00010 #include "vtypes.h"
00011 
00012 #ifdef VAULT_MEMORY_ALLOCATION_TRACKING_SUPPORT
00013 
00014 /*
00015 Memory leak tracking facility.
00016 THEORY OF OPERATION:
00017 
00018 This entire facility is enabled by defining VAULT_MEMORY_ALLOCATION_TRACKING_SUPPORT in vconfigure.h.
00019 
00020 In vtypes.h, we redefine "new" with a C preprocessor macro. Our version performs this replacement:
00021 
00022   new Foo  --->  new(__FILE__, __LINE__) foo
00023 
00024 This means that code exposed to our macro (that is, code that includes vtypes.h) will call our
00025 versions of operator new defined here:
00026   void* operator new(size_t size, const char* file, int line)
00027   void* operator new[](size_t size, const char* file, int line)
00028 
00029 Any code not exposed to our macro will use the standard operator new, and those memory allocations
00030 we will not see here.
00031 
00032 We also define a replacement for the global operator delete and delete[].
00033 
00034 Our new uses malloc(). Our delete uses free().
00035 
00036 Our implementations keep track of each malloc'ed allocation, by storing an AllocationRecord in a map.
00037 The map maps the (const void*) pointer value returned by malloc() to the (const AllocationRecord*)
00038 holding our extra information.
00039 In the delete, we remove the AllocationRecord from the map and delete it.
00040 
00041 We protect our data structures with a mutex. However, for efficiency, we don't lock the mutex just
00042 to check our enable flag. This flag is only changed by user's debugging command, and if it's off
00043 we don't want to have any extra overhead. So our check of the flag is non-atomic, but we don't care.
00044 
00045 There are several important things we do here to protect against infinite recursion and other
00046 problems:
00047 1. When locking our mutex, we must pass the global empty string, so that it does not
00048    itself allocate memory for the string buffer.
00049 2. When reporting, we locally turn off allocation tracking, so that the reporting code
00050    itself does not mutate our data structures.
00051 3. We must anticipate our delete being called for pointers we did not new. These might be pointers
00052    allocated when tracking was disabled, or by system libraries.
00053 4. Here in the implementation file, we have to #undef new and V_NEW so that when we do memory allocation
00054    here we aren't recursing into our tracking code. We get the original global new.
00055 5. Any code here that executes while holding our mutex, must be careful to guarantee that it does not
00056    invoke other code here that acquires the mutex (such as allocating via our new), because that will
00057    create a recursion deadlock (except on Windows where mutex locks allow recursion).
00058 6. So far, it seems that we have to make sure we don't get involved at static initialization time or
00059    static termination time. Avoiding use at static init is easily achieved by requiring our feature to be
00060    explicitly turned on at runtime. Avoiding use at static termination is not so automatic. To make this
00061    easy, we define our interface class VMemoryTracker to disable tracking on destruction. The recommended
00062    procedure is to simply declare a VMemoryTracker on the stack in the application main() function. If
00063    it wants, it can enable tracking there, or let some later runtime command control it. When the main
00064    function ends, the destructor will turn off tracking before static termination time.
00065 
00066 */
00067 
00068 #include "vmutex.h"
00069 #include "vmutexlocker.h"
00070 #include "vhex.h"
00071 #include "vtextiostream.h"
00072 #include "vlogger.h"
00073 #include "vthread.h"
00074 
00075 #undef V_NEW
00076 #undef new
00077 
00085 class AllocationRecord {
00086     public:
00087         AllocationRecord(void* pointer, bool isArray, size_t size, const char* file, int line, const char* stackCrawlInfo)
00088             : mAllocationNumber(0)
00089             , mWhen()
00090             , mPointer(pointer)
00091             , mIsArray(isArray)
00092             , mSize(size)
00093             , mFile(file)
00094             , mLine(line)
00095             , mStackCrawlInfo(stackCrawlInfo)
00096             {}
00097         ~AllocationRecord() { // Note that we don't own mPointer or mFile.
00098             delete [] mStackCrawlInfo;
00099         }
00100 
00101         mutable Vs64    mAllocationNumber;  
00102         VInstant        mWhen;              
00103         void*           mPointer;           
00104         bool            mIsArray;           
00105         size_t          mSize;              
00106         const char*     mFile;              
00107         int             mLine;              
00108         const char*     mStackCrawlInfo;    
00109 
00110         // Need operator< to suport STL sort(). We sort by mAllocationNumber.
00111         friend inline bool operator<(const AllocationRecord& r1, const AllocationRecord& r2);
00112 };
00113 
00114 inline bool operator<(const AllocationRecord& r1, const AllocationRecord& r2) { return r1.mAllocationNumber < r2.mAllocationNumber; }
00115 
00116 typedef std::map<const void*, const AllocationRecord*> AllocationMap;
00117 
00123 class AllocationRecordPtr {
00124     public:
00125         AllocationRecordPtr(const AllocationRecord* r)
00126             : mPtr(r)
00127             {}
00128         ~AllocationRecordPtr() {} // Note that we don't own mPtr.
00129 
00130         const AllocationRecord* mPtr; 
00131 
00132         // Need operator< to suport STL sort(). We sort by mAllocationNumber.
00133         friend inline bool operator<(const AllocationRecordPtr& r1, const AllocationRecordPtr& r2);
00134 };
00135 
00136 inline bool operator<(const AllocationRecordPtr& r1, const AllocationRecordPtr& r2) { return r1.mPtr->mAllocationNumber < r2.mPtr->mAllocationNumber; }
00137 
00138 typedef std::vector<AllocationRecordPtr> AllocationPtrVector;
00139 
00145 class CodeLocation {
00146     public:
00147         CodeLocation(const VString& file, int line)
00148             : mFile(file)
00149             , mLine(line)
00150             {}
00151         ~CodeLocation() {}
00152 
00153         VString mFile;
00154         int mLine;
00155 };
00156 
00157 typedef std::vector<CodeLocation> CodeLocationVector;
00158 
00159 // Private global variables.
00160 static AllocationMap gAllocationMap; 
00161 static CodeLocationVector gStackCrawlCodeLocations; 
00162 static bool   gTrackMemory = false; 
00163 static bool   gInsideLockedMutex = false; 
00164 static Vs64   gNextAllocationNumber = 1; 
00165 static int    gMaxNumAllocations = 50000; 
00166 static int    gCurrentNumAllocations = 0; 
00167 static VDuration gExpirationDuration = 15 * VDuration::MINUTE(); 
00168 static VInstant gExpirationTime; 
00169 static VMutex gAllocationMapMutex("gAllocationMapMutex", true); // 2nd param MUST be true to prevent this mutex from logging (which would allocate memory).
00170 static VString REPORT_LABEL("MEMORY REPORT");
00171 // Track large memory allocations
00172 static size_t gTrackAllocationssOver = 0;
00173 static size_t gTrackAllocationssUnder = V_MAX_SIZE;
00174 
00178 static void _putToMap(const void* p, const AllocationRecord* r) {
00179     VMutexLocker locker(&gAllocationMapMutex, VString::EMPTY());
00180     r->mAllocationNumber = gNextAllocationNumber++; // We do this manually so that AllocationRecord ctor doesn't also need to lock
00181     ++gCurrentNumAllocations;
00182     gAllocationMap[p] = r;
00183 }
00184 
00189 static void _removeFromMap(const void* p) {
00190     VMutexLocker locker(&gAllocationMapMutex, VString::EMPTY());
00191     --gCurrentNumAllocations;
00192     gAllocationMap[p] = NULL;
00193 }
00194 
00200 static void _stripFileName(VString& path) {
00201     int lastSlash = path.lastIndexOf('\\');
00202     if (lastSlash == -1) {
00203         lastSlash = path.lastIndexOf('/');
00204     }
00205 
00206     if (lastSlash != -1) {
00207         path.substringInPlace(lastSlash + 1);
00208     }
00209 }
00210 
00216 static const char* _getFileNamePtr(const char* path) {
00217     int length = (int) ::strlen(path);
00218     // Walk backwards until we find a slash or backslash.
00219     int offset = length - 1;
00220     while ((offset >= 0) && (path[offset] != '/') && (path[offset] != '\\')) {
00221         --offset;
00222     }
00223 
00224     return path + offset + 1;
00225 }
00226 
00230 static bool _isCodeLocationCrawlEnabled(const char* file, int line) {
00231     // Fast exit if no code locations are crawl-enabled.
00232     if (gStackCrawlCodeLocations.empty()) {
00233         return false;
00234     }
00235 
00236     // Note that we can't use VString here because we are being called during memory allocation, and
00237     // a VString used here will itself allocate memory and we'll have infinite recursion. Use low-level
00238     // C char operations.
00239     const char* fileName = _getFileNamePtr(file);
00240     for (CodeLocationVector::const_iterator i = gStackCrawlCodeLocations.begin(); i != gStackCrawlCodeLocations.end(); ++i) {
00241         if (((*i).mFile == fileName) && ((*i).mLine == line)) {
00242             return true;
00243         }
00244     }
00245 
00246     return false;
00247 }
00248 
00252 static const AllocationRecord* _getFromMap(const void* p) {
00253     VMutexLocker locker(&gAllocationMapMutex, VString::EMPTY());
00254     return gAllocationMap[p];
00255 }
00256 
00257 VMemoryTracker::VMemoryTracker(bool enableAtStart) {
00258     if (enableAtStart) {
00259         VMemoryTracker::enable();
00260     }
00261 }
00262 
00263 VMemoryTracker::~VMemoryTracker() {
00264     VMemoryTracker::disable();
00265     VMemoryTracker::reset();
00266 }
00267 
00268 // static
00269 void VMemoryTracker::enable() {
00270     // Set the expiration first to avoid a race.
00271     if (gExpirationDuration == VDuration::ZERO()) {
00272         gExpirationTime = VInstant::INFINITE_FUTURE();
00273     } else {
00274         gExpirationTime = VInstant(/*now*/) + gExpirationDuration;
00275     }
00276 
00277     gTrackMemory = true;
00278 }
00279 
00280 // static
00281 void VMemoryTracker::disable() {
00282     gTrackMemory = false;
00283 }
00284 
00285 // static
00286 void VMemoryTracker::reset() {
00287     VMutexLocker locker(&gAllocationMapMutex, VString::EMPTY());
00288     bool wasTracking = gTrackMemory;
00289     gTrackMemory = false;
00290     gInsideLockedMutex = true; // prevents our deletes from triggering delete processing while we hold the mutex
00291 
00292     for (AllocationMap::const_iterator i = gAllocationMap.begin(); i != gAllocationMap.end(); ++i) {
00293         const void* p = i->first;
00294         const AllocationRecord* r = i->second;
00295         if (r != NULL) {
00296             gAllocationMap[p] = NULL;
00297             delete r;
00298         }
00299     }
00300 
00301     gAllocationMap.clear();
00302     gCurrentNumAllocations = 0;
00303     gInsideLockedMutex = false;
00304     gTrackMemory = wasTracking;
00305 }
00306 
00307 // static
00308 bool VMemoryTracker::isEnabled() {
00309     return gTrackMemory;
00310 }
00311 
00312 // static
00313 void VMemoryTracker::setLimit(int maxNumAllocations) {
00314     gMaxNumAllocations = maxNumAllocations;
00315 }
00316 
00317 // static
00318 int VMemoryTracker::getLimit() {
00319     return gMaxNumAllocations;
00320 }
00321 
00322 // static
00323 void VMemoryTracker::setOver(size_t newOver) {
00324     gTrackAllocationssOver = newOver;
00325 }
00326 
00327 // static
00328 size_t VMemoryTracker::getOver() {
00329     return gTrackAllocationssOver;
00330 }
00331 
00332 // static
00333 void VMemoryTracker::setUnder(size_t newUnder) {
00334     gTrackAllocationssUnder = newUnder == 0 ? V_MAX_SIZE : newUnder;
00335 }
00336 
00337 // static
00338 size_t VMemoryTracker::getUnder() {
00339     return gTrackAllocationssUnder;
00340 }
00341 
00342 Vs64 VMemoryTracker::getAllocationNumber() {
00343     return gNextAllocationNumber;
00344 }
00345 
00346 // static
00347 void VMemoryTracker::setExpiration(const VDuration& d) {
00348     gExpirationDuration = d;
00349 
00350     if (gExpirationDuration == VDuration::ZERO()) {
00351         gExpirationTime = VInstant::INFINITE_FUTURE();
00352     } else {
00353         gExpirationTime = VInstant(/*now*/) + gExpirationDuration;
00354     }
00355 }
00356 
00357 // static
00358 Vs64 VMemoryTracker::getExpirationTime() {
00359     return gExpirationTime.getValue();
00360 }
00361 
00362 // static
00363 Vs64 VMemoryTracker::getExpirationMilliseconds() {
00364     return gExpirationDuration.getDurationMilliseconds();
00365 }
00366 
00367 // static
00368 void VMemoryTracker::omitPointer(const void* p) {
00369     const AllocationRecord* r = _getFromMap(p);
00370     if (r != NULL) {
00371         _removeFromMap(p);
00372         delete r;
00373     }
00374 }
00375 
00376 // static
00377 void VMemoryTracker::enableCodeLocationCrawl(const VString& file, int line) {
00378     for (CodeLocationVector::const_iterator i = gStackCrawlCodeLocations.begin(); i != gStackCrawlCodeLocations.end(); ++i) {
00379         if (((*i).mFile == file) && ((*i).mLine == line)) {
00380             return;
00381         }
00382     }
00383 
00384     // Not already enabled -- add it.
00385     gStackCrawlCodeLocations.push_back(CodeLocation(file, line));
00386 }
00387 
00388 // static
00389 void VMemoryTracker::disableCodeLocationCrawl(const VString& file, int line) {
00390     for (CodeLocationVector::iterator i = gStackCrawlCodeLocations.begin(); i != gStackCrawlCodeLocations.end(); ++i) {
00391         if (((*i).mFile == file) && ((*i).mLine == line)) {
00392             gStackCrawlCodeLocations.erase(i);
00393             return;
00394         }
00395     }
00396 }
00397 
00398 static void* _allocateMemory(size_t size, const char* file, int line, bool isArray) {
00399     void* p = ::malloc(size); // Would prefer to call through to global new: ::operator new(size) or new[](size)
00400 
00401     // When malloc fails, it returns null. But new should throw std::bad_alloc().
00402     if (p == NULL) {
00403         throw std::bad_alloc();
00404     }
00405 
00406     if (gTrackMemory) {
00407         if (size <= gTrackAllocationssOver || size >= gTrackAllocationssUnder) {
00408             return p;
00409         }
00410 
00411         // The most efficient way to check expiration is to go ahead and create the allocation record,
00412         // and use its timestamp. This way we don't also read the clock a 2nd time.
00413         if ((gMaxNumAllocations < 1) || (gCurrentNumAllocations < gMaxNumAllocations)) {
00414             const char* stackCrawlInfo = NULL;
00415             if (_isCodeLocationCrawlEnabled(file, line)) {
00416                 VStringLoggerPtr logger(new VStringLogger(VString::EMPTY(), VLoggerLevel::TRACE, false));
00417                 VThread::logStackCrawl(VString::EMPTY(), logger, false);
00418                 stackCrawlInfo = logger->orphanLines();
00419             }
00420             const AllocationRecord* r = new AllocationRecord(p, isArray, size, file, line, stackCrawlInfo);
00421             _putToMap(p, r);
00422 
00423             if ((gExpirationDuration != VDuration::ZERO()) && (r->mWhen > gExpirationTime)) {
00424                 gTrackMemory = false;
00425             }
00426 
00427         } else {
00428             gTrackMemory = false;
00429         }
00430     }
00431     return p;
00432 }
00433 
00434 static void _freeMemory(void* p, bool /*isArray*/) {
00435     if (gTrackMemory || (!gInsideLockedMutex && (gCurrentNumAllocations > 0))) { // We must honor deletions that might be in our map, even if tracking is disabled. Unless we're inside our own mutex lock block.
00436         const AllocationRecord* r = _getFromMap(p);
00437         if (r != NULL) {
00438             _removeFromMap(p);
00439             delete r;
00440         }
00441     }
00442 
00443     ::free(p); // Would prefer to call through to global delete: ::operator delete(p) or delete[](p)
00444 }
00445 
00446 void* operator new(size_t size, const char* file, int line) {
00447     return _allocateMemory(size, file, line, false);
00448 }
00449 
00450 void operator delete(void* p, const char* /*file*/, int /*line*/) {
00451     if (p == NULL) {
00452         return;
00453     }
00454 
00455     _freeMemory(p, false);
00456 }
00457 
00458 void operator delete(void* p) throw() {
00459     if (p == NULL) {
00460         return;
00461     }
00462 
00463     _freeMemory(p, false);
00464 }
00465 
00466 void* operator new[](size_t size, const char* file, int line) {
00467     return _allocateMemory(size, file, line, true);
00468 }
00469 
00470 void operator delete[](void* p, const char* /*file*/, int /*line*/) {
00471     if (p == NULL) {
00472         return;
00473     }
00474 
00475     _freeMemory(p, true);
00476 }
00477 
00478 void operator delete[](void* p) throw() {
00479     if (p == NULL) {
00480         return;
00481     }
00482 
00483     _freeMemory(p, true);
00484 }
00485 
00486 static void _reportText(const VString& s, bool toLogger, bool toConsole, VTextIOStream* toStream) {
00487     if (toLogger) {
00488         VLOGGER_NAMED_INFO("vault.VMemoryTracker", s);
00489     }
00490 
00491     if (toConsole) {
00492         std::cout << s << std::endl;
00493     }
00494 
00495     if (toStream != NULL) {
00496         toStream->writeLine(s);
00497     }
00498 }
00499 
00500 void VMemoryTracker::reportMemoryTracking(const VString& label, bool toLogger, bool toConsole, VTextIOStream* toStream, Vs64 bufferLengthLimit, bool showDetails, bool /*performAnalysis*/) {
00501     VMemoryTracker::omitPointer(label.getDataBufferConst()); // don't include the label in the report
00502 
00503     VMutexLocker locker(&gAllocationMapMutex, VString::EMPTY());
00504     gInsideLockedMutex = true; // prevents our deletes from triggering delete processing while we hold the mutex
00505     Vs64 numObjects = 0;
00506     size_t numBytes = 0;
00507     bool wasTracking = gTrackMemory;
00508     gTrackMemory = false;
00509     VDuration duration;
00510 
00511     /* local scope */ {
00512         // scope for "records" to guarantee STL deletes stuff before we re-enable gTrackMemory at end of function
00513         VInstant start;
00514         AllocationPtrVector records;
00515 
00516         // First pass is to gather the objects into a vector we can sort.
00517         for (AllocationMap::const_iterator i = gAllocationMap.begin(); i != gAllocationMap.end(); ++i) {
00518             // void* p = i->first;
00519             const AllocationRecord* r = i->second;
00520             if (r != NULL) {
00521                 records.push_back(AllocationRecordPtr(r));
00522             }
00523         }
00524 
00525         std::sort(records.begin(), records.end());
00526 
00527         // Second pass uses the vector and prints info about each record.
00528         _reportText(VSTRING_FORMAT("----- START %s", (label.isEmpty() ? REPORT_LABEL.chars() : label.chars())), toLogger, toConsole, toStream);
00529         _reportText(VSTRING_FORMAT(" Tracked object limit=%d, tracked object count=%d.", gMaxNumAllocations, gCurrentNumAllocations), toLogger, toConsole, toStream);
00530         for (AllocationPtrVector::const_iterator i = records.begin(); i != records.end(); ++i) {
00531             const AllocationRecord* r = (*i).mPtr;
00532             if (r != NULL) {
00533                 ++numObjects;
00534                 numBytes += r->mSize;
00535                 const Vu8* dataPtr = static_cast<const Vu8*>(r->mPointer);
00536                 Vs64 hexDumpLength = static_cast<Vs64>(r->mSize);
00537                 hexDumpLength = V_MIN(bufferLengthLimit, hexDumpLength);
00538 
00539                 // Strip off the front of the full file path, leaving just the file name. Could be DOS or Unix separators present.
00540                 VString fileName(r->mFile);
00541                 _stripFileName(fileName);
00542 
00543                 try {
00544                     if (showDetails) {
00545                         VString timeString = r->mWhen.getLocalString();
00546                         VString summary(VSTRING_ARGS(" [" VSTRING_FORMATTER_S64 "] [%s] 0x%08X " VSTRING_FORMATTER_SIZE " bytes @%s:%d", r->mAllocationNumber, timeString.chars(), r->mPointer, r->mSize, fileName.chars(), r->mLine));
00547 
00548                         if (r->mStackCrawlInfo != NULL) {
00549                             summary += VString::NATIVE_LINE_ENDING();
00550                             summary += r->mStackCrawlInfo;
00551                         }
00552 
00553                         if (toLogger) {
00554                             VLOGGER_NAMED_HEXDUMP("vault.VMemoryTracker", VLoggerLevel::INFO, summary, dataPtr, hexDumpLength);
00555                         }
00556 
00557                         if (toConsole) {
00558                             std::cout << summary.chars() << std::endl;
00559                             VHex hexDump(NULL);
00560                             hexDump.printHex(dataPtr, hexDumpLength);
00561                         }
00562 
00563                         if (toStream) {
00564                             toStream->writeLine(summary);
00565                             VHex hexDump(toStream);
00566                             hexDump.printHex(dataPtr, hexDumpLength);
00567                         }
00568 
00569                     } else {
00570                         VString hexString;
00571                         VString asciiChars;
00572                         if (dataPtr != NULL && hexDumpLength > 0) {
00573                             VHex::bufferToHexString(dataPtr, hexDumpLength, hexString, false);
00574                             VHex::bufferToPrintableASCIIString(dataPtr, hexDumpLength, asciiChars);
00575                         }
00576                         VString summary(VSTRING_ARGS(" [" VSTRING_FORMATTER_S64 "] 0x%08X " VSTRING_FORMATTER_SIZE " bytes @%s:%d %s %s", r->mAllocationNumber, r->mPointer, r->mSize, fileName.chars(), r->mLine, asciiChars.chars(), hexString.chars()));
00577 
00578                         if (r->mStackCrawlInfo != NULL) {
00579                             summary += " ... was allocated by:";
00580                             summary += VString::NATIVE_LINE_ENDING();
00581                             summary += r->mStackCrawlInfo;
00582                         }
00583 
00584                         _reportText(summary, toLogger, toConsole, toStream);
00585                     }
00586 
00587                 } catch (...) {
00588                     VString summary(VSTRING_ARGS(" [" VSTRING_FORMATTER_S64 "] 0x%08X " VSTRING_FORMATTER_SIZE " bytes @%s:%d **EXCEPTION GETTING DETAILS**", r->mAllocationNumber, r->mPointer, r->mSize, fileName.chars(), r->mLine));
00589                     _reportText(summary, toLogger, toConsole, toStream);
00590                 }
00591             }
00592         }
00593 
00594         VInstant end;
00595         duration = end - start;
00596     } // end of artificial scope ensuring "records" vector is cleaned up early
00597 
00598     _reportText(VSTRING_FORMAT(" Total objects found: " VSTRING_FORMATTER_S64 " objects, " VSTRING_FORMATTER_SIZE " bytes. %s", numObjects, numBytes, duration.getDurationString().chars()), toLogger, toConsole, toStream);
00599 
00600     if (!wasTracking && (numObjects > 0)) { // Remind user that we still need to monitor deletes while our map has records.
00601         _reportText("WARNING: There is still some performance overhead until you 'reset' the tracked memory.", false/*only scare interactive user, not log readers*/, toConsole, toStream);
00602     }
00603 
00604     _reportText(VSTRING_FORMAT("----- END %s", (label.isEmpty() ? REPORT_LABEL.chars() : label.chars())), toLogger, toConsole, toStream);
00605 
00606     gInsideLockedMutex = false;
00607     gTrackMemory = wasTracking;
00608 }
00609 
00610 #endif /* VAULT_MEMORY_ALLOCATION_TRACKING_SUPPORT */
00611 

Copyright ©1997-2014 Trygve Isaacson. All rights reserved. This documentation was generated with Doxygen.