-
Notifications
You must be signed in to change notification settings - Fork 13
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Add allocated CPU and GPU memory reporting #81
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -221,10 +221,15 @@ mtrtRuntimeClientGetNumDevices(MTRT_RuntimeClient client, int32_t *numDevices); | |
MLIR_CAPI_EXPORTED MTRT_Status mtrtRuntimeClientGetDevice( | ||
MTRT_RuntimeClient client, int32_t index, MTRT_Device *device); | ||
|
||
/// Retrieve the runtiem client that manages the specified memref. | ||
/// Retrieve the runtime client that manages the specified memref. | ||
MLIR_CAPI_EXPORTED MTRT_RuntimeClient | ||
mtrtMemRefGetClient(MTRT_MemRefValue memref); | ||
|
||
/// Retrieve the runtime client allocated cpu and gpu memory. | ||
MTRT_Status mtrtReportAllocatedMemory(MTRT_RuntimeClient client, | ||
int64_t *totalCpuMemory, | ||
int64_t *totalGpuMemory); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Let's use |
||
|
||
//===----------------------------------------------------------------------===// | ||
// Data Transfer | ||
//===----------------------------------------------------------------------===// | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -773,6 +773,9 @@ class AllocTracker { | |
/// Return true if the tracker's map contains `ptr`. | ||
bool contains(uintptr_t ptr) const; | ||
|
||
/// Report total CPU and GPU memory allocated by runtime client. | ||
std::pair<int64_t, int64_t> reportAllocatedMemory() const; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. There actually could are more types than just these two, so I'd prefer if we separate it into a struct or array. Array could be indexed by all the potential values of |
||
|
||
private: | ||
llvm::DenseMap<uintptr_t, PointerInfo> map; | ||
}; | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -429,6 +429,24 @@ PointerInfo AllocTracker::lookupOrDefault(uintptr_t ptr) const { | |
return map.at(ptr); | ||
} | ||
|
||
std::pair<int64_t, int64_t> AllocTracker::reportAllocatedMemory() const { | ||
int64_t totalCpuMemory = 0; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We should use |
||
int64_t totalGpuMemory = 0; | ||
|
||
for (const auto &entry : map) { | ||
const PointerInfo &info = entry.second; | ||
if (info.isExternallyManaged()) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @christopherbate Is this sufficient for tracking only internally managed/allocated pointers? |
||
continue; | ||
if (info.type == PointerType::host || info.type == PointerType::pinned_host) { | ||
totalCpuMemory += info.size; | ||
} else if (info.type == PointerType::device || info.type == PointerType::unified) { | ||
totalGpuMemory += info.size; | ||
} | ||
} | ||
|
||
return {totalCpuMemory, totalGpuMemory}; | ||
} | ||
|
||
StatusOr<PointerInfo> runtime::allocate(AllocTracker &tracker, PointerType type, | ||
uint64_t size, | ||
std::optional<uint32_t> alignment, | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -775,7 +775,18 @@ PYBIND11_MODULE(_api, m) { | |
THROW_IF_MTRT_ERROR(s); | ||
}, | ||
py::arg("device_memref"), py::arg("existing_host_memref"), | ||
py::arg("stream") = py::none()); | ||
py::arg("stream") = py::none()) | ||
.def( | ||
"report_allocated_memory", | ||
[](PyRuntimeClient &self) { | ||
int64_t totalGpuMemory; | ||
int64_t totalCpuMemory; | ||
MTRT_Status s = mtrtReportAllocatedMemory(self, &totalCpuMemory, &totalGpuMemory); | ||
THROW_IF_MTRT_ERROR(s); | ||
py::object namedtuple = py::module::import("collections").attr("namedtuple"); | ||
py::object MemoryUsage = namedtuple("MemoryUsage", "cpu_memory gpu_memory"); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. You'll need to update the stubs so users can see this type information in the IDE. |
||
return MemoryUsage(totalCpuMemory, totalGpuMemory); | ||
}); | ||
|
||
py::class_<PyRuntimeValue>(m, "RuntimeValue", py::module_local()) | ||
.def_property_readonly(MTRT_PYTHON_CAPI_PTR_ATTR, | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This isn't quite accurate. You're reporting the CPU/GPU memory that is being tracked by the RuntimeClient. It can track buffers that are externally allocated.