[heap profiler] Refactor intermediate storage classes in json_exporter

Remove Backtrace, BacktraceStorage.
Replace AllocationEvent with AllocationSite & AllocationMetrics.

BUG=923459

Change-Id: I7c1fa42e866c6cd6bb21a1f7d2c7ed4bd67d0919
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1574651
Commit-Queue: Alexei Filippov <[email protected]>
Reviewed-by: Erik Chen <[email protected]>
Cr-Commit-Position: refs/heads/master@{#652995}
diff --git a/components/services/heap_profiling/BUILD.gn b/components/services/heap_profiling/BUILD.gn
index c20235a..77cfbdf 100644
--- a/components/services/heap_profiling/BUILD.gn
+++ b/components/services/heap_profiling/BUILD.gn
@@ -4,10 +4,8 @@
 
 static_library("heap_profiling") {
   sources = [
-    "allocation_event.cc",
-    "allocation_event.h",
-    "backtrace.cc",
-    "backtrace.h",
+    "allocation.cc",
+    "allocation.h",
     "connection_manager.cc",
     "connection_manager.h",
     "heap_profiling_service.cc",
@@ -26,7 +24,6 @@
 source_set("unit_tests") {
   testonly = true
   sources = [
-    "backtrace_unittest.cc",
     "json_exporter_unittest.cc",
   ]
   deps = [
diff --git a/components/services/heap_profiling/allocation.cc b/components/services/heap_profiling/allocation.cc
new file mode 100644
index 0000000..7c49ef1
--- /dev/null
+++ b/components/services/heap_profiling/allocation.cc
@@ -0,0 +1,27 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/services/heap_profiling/allocation.h"
+
+#include "base/hash/hash.h"
+
+namespace heap_profiling {
+
+namespace {
+uint32_t ComputeHash(const std::vector<Address>& addrs) {
+  return base::Hash(addrs.data(), addrs.size() * sizeof(Address));
+}
+}  // namespace
+
+AllocationSite::AllocationSite(AllocatorType allocator,
+                               std::vector<Address>&& stack,
+                               int context_id)
+    : allocator(allocator),
+      stack(std::move(stack)),
+      context_id(context_id),
+      hash_(ComputeHash(this->stack)) {}
+
+AllocationSite::~AllocationSite() = default;
+
+}  // namespace heap_profiling
diff --git a/components/services/heap_profiling/allocation.h b/components/services/heap_profiling/allocation.h
new file mode 100644
index 0000000..b9d51f0b
--- /dev/null
+++ b/components/services/heap_profiling/allocation.h
@@ -0,0 +1,68 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_SERVICES_HEAP_PROFILING_ALLOCATION_H_
+#define COMPONENTS_SERVICES_HEAP_PROFILING_ALLOCATION_H_
+
+#include <unordered_map>
+#include <vector>
+
+#include "components/services/heap_profiling/public/mojom/heap_profiling_client.mojom.h"
+
+namespace heap_profiling {
+
+using Address = uint64_t;
+using mojom::AllocatorType;
+
+// The struct is a descriptor of an allocation site. It is used as a unique
+// key in the AllocationMap.
+struct AllocationSite {
+  AllocationSite(AllocatorType allocator,
+                 std::vector<Address>&& stack,
+                 int context_id);
+  ~AllocationSite();
+
+  // Type of the allocator responsible for the allocation. Possible values are
+  // kMalloc, kPartitionAlloc, or kOilpan.
+  const AllocatorType allocator;
+
+  // Program call stack at the moment of allocation. Each address is correspond
+  // to a code memory location in the inspected process.
+  const std::vector<Address> stack;
+
+  // Each allocation call may be associated with a context string.
+  // This field contains the id of the context string. The string itself is
+  // stored in |context_map| array in ExportParams class.
+  const int context_id;
+
+  struct Hash {
+    size_t operator()(const AllocationSite& alloc) const { return alloc.hash_; }
+  };
+
+ private:
+  const uint32_t hash_;
+
+  DISALLOW_COPY_AND_ASSIGN(AllocationSite);
+};
+
+inline bool operator==(const AllocationSite& a, const AllocationSite& b) {
+  return a.allocator == b.allocator && a.stack == b.stack &&
+         a.context_id == b.context_id;
+}
+
+// Data associated with an allocation site in the AllocationMap.
+struct AllocationMetrics {
+  // Size of the allocation responsible for prodicing the sample.
+  size_t size = 0;
+
+  // Number of allocations associated with the sample.
+  size_t count = 0;
+};
+
+using AllocationMap =
+    std::unordered_map<AllocationSite, AllocationMetrics, AllocationSite::Hash>;
+
+}  // namespace heap_profiling
+
+#endif  // COMPONENTS_SERVICES_HEAP_PROFILING_ALLOCATION_H_
diff --git a/components/services/heap_profiling/allocation_event.cc b/components/services/heap_profiling/allocation_event.cc
deleted file mode 100644
index e56449178..0000000
--- a/components/services/heap_profiling/allocation_event.cc
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "components/services/heap_profiling/allocation_event.h"
-
-namespace heap_profiling {
-
-AllocationEvent::AllocationEvent(AllocatorType allocator,
-                                 Address addr,
-                                 size_t sz,
-                                 const Backtrace* bt,
-                                 int context_id)
-    : allocator_(allocator),
-      address_(addr),
-      size_(sz),
-      backtrace_(bt),
-      context_id_(context_id) {}
-
-AllocationEvent::AllocationEvent(Address addr) : address_(addr) {}
-
-AllocationCountMap AllocationEventSetToCountMap(const AllocationEventSet& set) {
-  AllocationCountMap map;
-  for (const auto& alloc : set)
-    map[alloc]++;
-  return map;
-}
-
-}  // namespace heap_profiling
diff --git a/components/services/heap_profiling/allocation_event.h b/components/services/heap_profiling/allocation_event.h
deleted file mode 100644
index 72ce920..0000000
--- a/components/services/heap_profiling/allocation_event.h
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef COMPONENTS_SERVICES_HEAP_PROFILING_ALLOCATION_EVENT_H_
-#define COMPONENTS_SERVICES_HEAP_PROFILING_ALLOCATION_EVENT_H_
-
-#include <functional>
-#include <map>
-#include <unordered_set>
-
-#include "components/services/heap_profiling/backtrace.h"
-#include "components/services/heap_profiling/public/mojom/heap_profiling_client.mojom.h"
-
-namespace heap_profiling {
-
-using mojom::AllocatorType;
-
-// This class is copyable and assignable.
-//
-// AllocationEvents can be uniquely identified by their address. Caveat: This is
-// true at any given point in time, since each address can only be used in a
-// single allocation. However, it's possible that comparing different points in
-// time, there are different AllocationEvents with the same address.
-class AllocationEvent {
- public:
-  // There must be a reference to this kept in the BacktraceStorage object on
-  // behalf of this class.
-  AllocationEvent(AllocatorType allocator,
-                  Address addr,
-                  size_t sz,
-                  const Backtrace* bt,
-                  int context_id);
-
-  // This partial initializer creates an allocation of empty size for
-  // searching purposes.
-  explicit AllocationEvent(Address addr);
-
-  AllocatorType allocator() const { return allocator_; }
-
-  Address address() const { return address_; }
-  size_t size() const { return size_; }
-
-  // The backtrace for this allocation. There must be a reference to this kept
-  // in the BacktraceStorage object on behalf of this class.
-  const Backtrace* backtrace() const { return backtrace_; }
-
-  // ID into context map, 0 means no context.
-  int context_id() const { return context_id_; }
-
-  struct HashByAddress {
-    size_t operator()(const AllocationEvent& event) const {
-      std::hash<Address> hasher;
-      return hasher(event.address());
-    }
-  };
-
-  struct EqualityByAddress {
-    bool operator()(const AllocationEvent& lhs,
-                    const AllocationEvent& rhs) const {
-      return lhs.address() == rhs.address();
-    }
-  };
-
-  // Implements < for AllocationEvent using everything but the address.
-  struct MetadataPartialLess {
-    bool operator()(const AllocationEvent& lhs,
-                    const AllocationEvent& rhs) const {
-      // Note that we're using pointer compiarisons on the backtrace objects
-      // since they're atoms and the actual ordering is not important.
-      return std::tie(lhs.size_, lhs.backtrace_, lhs.context_id_,
-                      lhs.allocator_) < std::tie(rhs.size_, rhs.backtrace_,
-                                                 rhs.context_id_,
-                                                 rhs.allocator_);
-    }
-  };
-
- private:
-  AllocatorType allocator_ = AllocatorType::kMalloc;
-  Address address_;
-  size_t size_ = 0;
-  const Backtrace* backtrace_ = nullptr;
-  int context_id_ = 0;
-};
-
-// Unique set based on addresses of allocations.
-using AllocationEventSet =
-    std::unordered_set<AllocationEvent,
-                       AllocationEvent::HashByAddress,
-                       AllocationEvent::EqualityByAddress>;
-
-// Maps allocation metadata to allocation counts of that type. In this case,
-// the address of the AllocationEvent is unused.
-using AllocationCountMap =
-    std::map<AllocationEvent, int, AllocationEvent::MetadataPartialLess>;
-
-// Aggregates the allocation events to a count map. The address of the
-// allocation event in the returned map will be the address of the first item
-// in the set with that metadata.
-AllocationCountMap AllocationEventSetToCountMap(const AllocationEventSet& set);
-
-}  // namespace heap_profiling
-
-#endif  // COMPONENTS_SERVICES_HEAP_PROFILING_ALLOCATION_EVENT_H_
diff --git a/components/services/heap_profiling/backtrace.cc b/components/services/heap_profiling/backtrace.cc
deleted file mode 100644
index 12aba33d..0000000
--- a/components/services/heap_profiling/backtrace.cc
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "components/services/heap_profiling/backtrace.h"
-
-#include <cstring>
-#include <utility>
-
-#include "base/hash/hash.h"
-
-namespace heap_profiling {
-
-namespace {
-size_t ComputeHash(const std::vector<Address>& addrs) {
-  if (addrs.empty())
-    return 0;
-  static_assert(std::is_integral<Address>::value,
-                "base::Hash call below needs simple type.");
-  return base::Hash(addrs.data(), addrs.size() * sizeof(Address));
-}
-}  // namespace
-
-Backtrace::Backtrace(std::vector<Address>&& a)
-    : addrs_(std::move(a)), hash_(ComputeHash(addrs_)) {}
-
-Backtrace::Backtrace(Backtrace&& other) noexcept = default;
-
-Backtrace::~Backtrace() = default;
-
-Backtrace& Backtrace::operator=(Backtrace&& other) = default;
-
-bool Backtrace::operator==(const Backtrace& other) const {
-  if (addrs_.size() != other.addrs_.size())
-    return false;
-  return memcmp(addrs_.data(), other.addrs_.data(),
-                addrs_.size() * sizeof(Address)) == 0;
-}
-
-bool Backtrace::operator!=(const Backtrace& other) const {
-  return !operator==(other);
-}
-
-}  // namespace heap_profiling
diff --git a/components/services/heap_profiling/backtrace.h b/components/services/heap_profiling/backtrace.h
deleted file mode 100644
index 0e3c869..0000000
--- a/components/services/heap_profiling/backtrace.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef COMPONENTS_SERVICES_HEAP_PROFILING_BACKTRACE_H_
-#define COMPONENTS_SERVICES_HEAP_PROFILING_BACKTRACE_H_
-
-#include <functional>
-#include <unordered_set>
-#include <vector>
-
-#include "base/macros.h"
-
-namespace heap_profiling {
-
-using Address = uint64_t;
-
-// Holds a move-only stack backtrace and a precomputed hash. This backtrace
-// uses addresses in the instrumented process. This is in contrast to
-// base::StackTrace which is for getting and working with stack traces in the
-// current process.
-class Backtrace {
- public:
-  explicit Backtrace(std::vector<Address>&& a);
-  Backtrace(Backtrace&& other) noexcept;
-  ~Backtrace();
-
-  Backtrace& operator=(Backtrace&& other);
-
-  bool operator==(const Backtrace& other) const;
-  bool operator!=(const Backtrace& other) const;
-
-  const std::vector<Address>& addrs() const { return addrs_; }
-  size_t hash() const { return hash_; }
-
- private:
-  std::vector<Address> addrs_;
-  size_t hash_;
-
-  DISALLOW_COPY_AND_ASSIGN(Backtrace);
-};
-
-using BacktraceStorage = std::unordered_set<Backtrace>;
-
-}  // namespace heap_profiling
-
-namespace std {
-
-template <>
-struct hash<heap_profiling::Backtrace> {
-  using argument_type = heap_profiling::Backtrace;
-  using result_type = size_t;
-  result_type operator()(const argument_type& s) const { return s.hash(); }
-};
-
-}  // namespace std
-
-#endif  // COMPONENTS_SERVICES_HEAP_PROFILING_BACKTRACE_H_
diff --git a/components/services/heap_profiling/backtrace_unittest.cc b/components/services/heap_profiling/backtrace_unittest.cc
deleted file mode 100644
index b812c31..0000000
--- a/components/services/heap_profiling/backtrace_unittest.cc
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "components/services/heap_profiling/backtrace.h"
-
-#include <vector>
-
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace heap_profiling {
-
-TEST(BacktraceStorage, KeyStability) {
-  BacktraceStorage storage;
-
-  // Make a lot of unique backtraces to force reallocation of the hash table
-  // several times.
-  const size_t num_traces = 1000;
-  std::vector<const Backtrace*> traces;
-  for (size_t i = 0; i < num_traces; i++) {
-    // Each backtrace should contain its index as the only stack entry.
-    std::vector<Address> addrs;
-    addrs.push_back(Address(i));
-    traces.push_back(&*storage.insert(Backtrace(std::move(addrs))).first);
-  }
-
-  // Validate the backtraces are still valid.
-  for (size_t i = 0; i < num_traces; i++) {
-    ASSERT_EQ(1u, traces[i]->addrs().size());
-    EXPECT_EQ(Address(i), traces[i]->addrs()[0]);
-  }
-}
-
-}  // namespace heap_profiling
diff --git a/components/services/heap_profiling/connection_manager.cc b/components/services/heap_profiling/connection_manager.cc
index 87aef9c..02a9294f 100644
--- a/components/services/heap_profiling/connection_manager.cc
+++ b/components/services/heap_profiling/connection_manager.cc
@@ -13,11 +13,6 @@
 
 namespace heap_profiling {
 
-namespace {
-const size_t kMinSizeThreshold = 16 * 1024;
-const size_t kMinCountThreshold = 1024;
-}  // namespace
-
 // Tracking information for DumpProcessForTracing(). This struct is
 // refcounted since there will be many background thread calls (one for each
 // AllocationTracker) and the callback is only issued when each has
@@ -188,10 +183,9 @@
     bool strip_path_from_mapped_files,
     uint32_t sampling_rate,
     mojom::HeapProfilePtr profile) {
-  AllocationCountMap counts;
+  AllocationMap allocations;
   ContextMap context_map;
   AddressToStringMap string_map;
-  BacktraceStorage backtrace_storage;
 
   bool success = true;
   for (const mojom::HeapProfileSamplePtr& sample : profile->samples) {
@@ -212,12 +206,43 @@
                                 static_cast<int>(context_map.size() + 1))
                        .first->second;
     }
+
+    size_t alloc_size = sample->size;
+    size_t alloc_count = 1;
+
+    // If allocations were sampled, then we need to desample to return accurate
+    // results.
+    // TODO(alph): Move it closer to the the sampler, so other components
+    // wouldn't care about the math.
+    if (alloc_size < sampling_rate && alloc_size != 0) {
+      // To desample, we need to know the probability P that an allocation will
+      // be sampled. Once we know P, we still have to deal with discretization.
+      // Let's say that there's 1 allocation with P=0.85. Should we report 1 or
+      // 2 allocations? Should we report a fudged size (size / 0.85), or a
+      // discreted size, e.g. (1 * size) or (2 * size)? There are tradeoffs.
+      //
+      // We choose to emit a fudged size, which will return a more accurate
+      // total allocation size, but less accurate per-allocation size.
+      //
+      // The aggregate probability that an allocation will be sampled is
+      // alloc_size / sampling_rate. For a more detailed treatise, see
+      // https://bugs.chromium.org/p/chromium/issues/detail?id=810748#c4
+      float desampling_multiplier =
+          static_cast<float>(sampling_rate) / static_cast<float>(alloc_size);
+      alloc_count *= desampling_multiplier;
+      alloc_size *= desampling_multiplier;
+    }
+
     std::vector<Address> stack(sample->stack.begin(), sample->stack.end());
-    const Backtrace* backtrace =
-        &*backtrace_storage.insert(Backtrace(std::move(stack))).first;
-    AllocationEvent alloc(sample->allocator, Address(0), sample->size,
-                          backtrace, context_id);
-    ++counts[alloc];
+    AllocationMetrics& metrics =
+        allocations
+            .emplace(std::piecewise_construct,
+                     std::forward_as_tuple(sample->allocator, std::move(stack),
+                                           context_id),
+                     std::forward_as_tuple())
+            .first->second;
+    metrics.size += alloc_size;
+    metrics.count += alloc_count;
   }
 
   for (const auto& str : profile->strings) {
@@ -230,8 +255,8 @@
 
   DCHECK(success);
   DoDumpOneProcessForTracing(std::move(tracking), pid, process_type,
-                             strip_path_from_mapped_files, sampling_rate,
-                             success, std::move(counts), std::move(context_map),
+                             strip_path_from_mapped_files, success,
+                             std::move(allocations), std::move(context_map),
                              std::move(string_map));
 }
 
@@ -240,9 +265,8 @@
     base::ProcessId pid,
     mojom::ProcessType process_type,
     bool strip_path_from_mapped_files,
-    uint32_t sampling_rate,
     bool success,
-    AllocationCountMap counts,
+    AllocationMap counts,
     ContextMap context,
     AddressToStringMap mapped_strings) {
   // All code paths through here must issue the callback when waiting_responses
@@ -261,11 +285,8 @@
   params.context_map = std::move(context);
   params.mapped_strings = std::move(mapped_strings);
   params.process_type = process_type;
-  params.min_size_threshold = kMinSizeThreshold;
-  params.min_count_threshold = kMinCountThreshold;
   params.strip_path_from_mapped_files = strip_path_from_mapped_files;
   params.next_id = next_id_;
-  params.sampling_rate = sampling_rate;
 
   auto it = tracking->vm_regions.find(pid);
   if (it != tracking->vm_regions.end())
diff --git a/components/services/heap_profiling/connection_manager.h b/components/services/heap_profiling/connection_manager.h
index 36ce1de..01d65b47 100644
--- a/components/services/heap_profiling/connection_manager.h
+++ b/components/services/heap_profiling/connection_manager.h
@@ -18,7 +18,7 @@
 #include "base/threading/thread.h"
 #include "base/timer/timer.h"
 #include "build/build_config.h"
-#include "components/services/heap_profiling/allocation_event.h"
+#include "components/services/heap_profiling/allocation.h"
 #include "components/services/heap_profiling/public/mojom/heap_profiling_service.mojom.h"
 #include "services/resource_coordinator/public/mojom/memory_instrumentation/memory_instrumentation.mojom.h"
 
@@ -78,9 +78,8 @@
       base::ProcessId pid,
       mojom::ProcessType process_type,
       bool strip_path_from_mapped_files,
-      uint32_t sampling_rate,
       bool success,
-      AllocationCountMap counts,
+      AllocationMap counts,
       ContextMap context,
       AddressToStringMap mapped_strings);
 
diff --git a/components/services/heap_profiling/json_exporter.cc b/components/services/heap_profiling/json_exporter.cc
index 4bd5674..e806ab2 100644
--- a/components/services/heap_profiling/json_exporter.cc
+++ b/components/services/heap_profiling/json_exporter.cc
@@ -5,6 +5,7 @@
 #include "components/services/heap_profiling/json_exporter.h"
 
 #include <map>
+#include <unordered_map>
 
 #include "base/containers/adapters.h"
 #include "base/format_macros.h"
@@ -20,7 +21,10 @@
 namespace {
 
 // Maps strings to integers for the JSON string table.
-using StringTable = std::map<std::string, size_t>;
+using StringTable = std::unordered_map<std::string, size_t>;
+
+// Maps allocation site to node_id of the top frame.
+using AllocationToNodeId = std::unordered_map<const AllocationSite*, size_t>;
 
 constexpr uint32_t kAllocatorCount =
     static_cast<uint32_t>(AllocatorType::kMaxValue) + 1;
@@ -46,32 +50,6 @@
 
 using BacktraceTable = std::map<BacktraceNode, size_t>;
 
-// Used as a temporary map key to uniquify an allocation with a given context
-// and stack. No backtraces are created or destroyed during the lifetime of that
-// structure. Therefore it's safe to use a raw pointer since backtraces are
-// uniquified, this does pointer comparisons on the backtrace to give a stable
-// ordering, even if that ordering has no intrinsic meaning.
-struct UniqueAlloc {
- public:
-  UniqueAlloc(const Backtrace* bt, int ctx_id)
-      : backtrace(bt), context_id(ctx_id) {}
-
-  bool operator<(const UniqueAlloc& other) const {
-    return std::tie(backtrace, context_id) <
-           std::tie(other.backtrace, other.context_id);
-  }
-
-  const Backtrace* backtrace = nullptr;
-  const int context_id = 0;
-};
-
-struct UniqueAllocMetrics {
-  size_t size = 0;
-  size_t count = 0;
-};
-
-using UniqueAllocationMap = std::map<UniqueAlloc, UniqueAllocMetrics>;
-
 // The hardcoded ID for having no context for an allocation.
 constexpr int kUnknownTypeId = 0;
 
@@ -92,11 +70,17 @@
 // Writes the top-level allocators section. This section is used by the tracing
 // UI to show a small summary for each allocator. It's necessary as a
 // placeholder to allow the stack-viewing UI to be shown.
-//
-// Each array should be the number of allocators long.
-void WriteAllocatorsSummary(size_t total_size[],
-                            size_t total_count[],
+void WriteAllocatorsSummary(const AllocationMap& allocations,
                             std::ostream& out) {
+  // Aggregate stats for each allocator type.
+  size_t total_size[kAllocatorCount] = {0};
+  size_t total_count[kAllocatorCount] = {0};
+  for (const auto& alloc_pair : allocations) {
+    uint32_t index = static_cast<uint32_t>(alloc_pair.first.allocator);
+    total_size[index] += alloc_pair.second.size;
+    total_count[index] += alloc_pair.second.count;
+  }
+
   out << "\"allocators\":{\n";
   for (uint32_t i = 0; i < kAllocatorCount; i++) {
     const char* alloc_type = StringForAllocatorType(i);
@@ -169,56 +153,44 @@
 size_t AddOrGetString(const std::string& str,
                       StringTable* string_table,
                       ExportParams* params) {
-  auto result = string_table->emplace(str, params->next_id++);
-  // "result.first" is an iterator into the map.
-  return result.first->second;
+  return string_table->emplace(str, params->next_id++).first->second;
 }
 
-// Processes the context information needed for the give set of allocations.
+// Processes the context information.
 // Strings are added for each referenced context and a mapping between
 // context IDs and string IDs is filled in for each.
-void FillContextStrings(const UniqueAllocationMap& allocations,
-                        ExportParams* params,
+void FillContextStrings(ExportParams* params,
                         StringTable* string_table,
-                        std::map<int, size_t>* context_to_string_map) {
-  std::set<int> used_context;
-  for (const auto& alloc : allocations)
-    used_context.insert(alloc.first.context_id);
-
-  if (used_context.find(kUnknownTypeId) != used_context.end()) {
-    // Hard code a string for the unknown context type.
-    context_to_string_map->emplace(
-        kUnknownTypeId, AddOrGetString("[unknown]", string_table, params));
-  }
-
+                        std::map<int, size_t>* context_to_string_id_map) {
   // The context map is backwards from what we need, so iterate through the
   // whole thing and see which ones are used.
   for (const auto& context : params->context_map) {
-    if (used_context.find(context.second) != used_context.end()) {
-      size_t string_id = AddOrGetString(context.first, string_table, params);
-      context_to_string_map->emplace(context.second, string_id);
-    }
+    size_t string_id = AddOrGetString(context.first, string_table, params);
+    context_to_string_id_map->emplace(context.second, string_id);
   }
+
+  // Hard code a string for the unknown context type.
+  context_to_string_id_map->emplace(
+      kUnknownTypeId, AddOrGetString("[unknown]", string_table, params));
 }
 
 size_t AddOrGetBacktraceNode(BacktraceNode node,
                              BacktraceTable* backtrace_table,
                              ExportParams* params) {
-  auto result = backtrace_table->emplace(std::move(node), params->next_id++);
-  // "result.first" is an iterator into the map.
-  return result.first->second;
+  return backtrace_table->emplace(std::move(node), params->next_id++)
+      .first->second;
 }
 
 // Returns the index into nodes of the node to reference for this stack. That
 // node will reference its parent node, etc. to allow the full stack to
 // be represented.
-size_t AppendBacktraceStrings(const Backtrace& backtrace,
+size_t AppendBacktraceStrings(const AllocationSite& alloc,
                               BacktraceTable* backtrace_table,
                               StringTable* string_table,
                               ExportParams* params) {
-  int parent = -1;
+  size_t parent = BacktraceNode::kNoParent;
   // Addresses must be outputted in reverse order.
-  for (const Address addr : base::Reversed(backtrace.addrs())) {
+  for (const Address addr : base::Reversed(alloc.stack)) {
     size_t sid;
     auto it = params->mapped_strings.find(addr);
     if (it != params->mapped_strings.end()) {
@@ -310,10 +282,14 @@
 
 // Writes the number of matching allocations array which looks like:
 //   "counts":[1, 1, 2]
-void WriteCounts(const UniqueAllocationMap& allocations, std::ostream& out) {
+void WriteCounts(const AllocationMap& allocations,
+                 int allocator,
+                 std::ostream& out) {
   out << "\"counts\":[";
   bool first_time = true;
   for (const auto& cur : allocations) {
+    if (static_cast<int>(cur.first.allocator) != allocator)
+      continue;
     if (!first_time)
       out << ",";
     else
@@ -325,10 +301,14 @@
 
 // Writes the total sizes of each allocation which looks like:
 //   "sizes":[32, 64, 12]
-void WriteSizes(const UniqueAllocationMap& allocations, std::ostream& out) {
+void WriteSizes(const AllocationMap& allocations,
+                int allocator,
+                std::ostream& out) {
   out << "\"sizes\":[";
   bool first_time = true;
   for (const auto& cur : allocations) {
+    if (static_cast<int>(cur.first.allocator) != allocator)
+      continue;
     if (!first_time)
       out << ",";
     else
@@ -340,10 +320,14 @@
 
 // Writes the types array of integers which looks like:
 //   "types":[0, 0, 1]
-void WriteTypes(const UniqueAllocationMap& allocations, std::ostream& out) {
+void WriteTypes(const AllocationMap& allocations,
+                int allocator,
+                std::ostream& out) {
   out << "\"types\":[";
   bool first_time = true;
   for (const auto& cur : allocations) {
+    if (static_cast<int>(cur.first.allocator) != allocator)
+      continue;
     if (!first_time)
       out << ",";
     else
@@ -356,18 +340,20 @@
 // Writes the nodes array which indexes for each allocation into the maps nodes
 // array written above. It looks like:
 //   "nodes":[1, 5, 10]
-void WriteAllocatorNodes(const UniqueAllocationMap& allocations,
-                         const std::map<const Backtrace*, size_t>& backtraces,
+void WriteAllocatorNodes(const AllocationMap& allocations,
+                         int allocator,
+                         const AllocationToNodeId& alloc_to_node_id,
                          std::ostream& out) {
   out << "\"nodes\":[";
   bool first_time = true;
   for (const auto& cur : allocations) {
+    if (static_cast<int>(cur.first.allocator) != allocator)
+      continue;
     if (!first_time)
       out << ",";
     else
       first_time = false;
-    auto found = backtraces.find(cur.first.backtrace);
-    out << found->second;
+    out << alloc_to_node_id.at(&cur.first);
   }
   out << "]";
 }
@@ -389,98 +375,27 @@
   out << R"("level_of_detail": "detailed")"
       << ",\n";
 
-  // Aggregate stats for each allocator type.
-  size_t total_size[kAllocatorCount] = {0};
-  size_t total_count[kAllocatorCount] = {0};
-  UniqueAllocationMap filtered_allocations[kAllocatorCount];
-  for (const auto& alloc_pair : params->allocs) {
-    uint32_t allocator_index =
-        static_cast<uint32_t>(alloc_pair.first.allocator());
-    size_t alloc_count = alloc_pair.second;
-    size_t alloc_size = alloc_pair.first.size();
-    size_t alloc_total_size = alloc_size * alloc_count;
-
-    // If allocations were sampled, then we need to desample to return accurate
-    // results.
-    if (alloc_size < params->sampling_rate && alloc_size != 0) {
-      // To desample, we need to know the probability P that an allocation will
-      // be sampled. Once we know P, we still have to deal with discretization.
-      // Let's say that there's 1 allocation with P=0.85. Should we report 1 or
-      // 2 allocations? Should we report a fudged size (size / 0.85), or a
-      // discreted size, e.g. (1 * size) or (2 * size)? There are tradeoffs.
-      //
-      // We choose to emit a fudged size, which will return a more accurate
-      // total allocation size, but less accurate per-allocation size.
-      //
-      // The aggregate probability that an allocation will be sampled is
-      // alloc_size / sampling_rate. For a more detailed treatise, see
-      // https://bugs.chromium.org/p/chromium/issues/detail?id=810748#c4
-      float desampling_multiplier = static_cast<float>(params->sampling_rate) /
-                                    static_cast<float>(alloc_size);
-      alloc_count *= desampling_multiplier;
-      alloc_total_size *= desampling_multiplier;
-    }
-
-    total_size[allocator_index] += alloc_total_size;
-    total_count[allocator_index] += alloc_count;
-
-    UniqueAlloc unique_alloc(alloc_pair.first.backtrace(),
-                             alloc_pair.first.context_id());
-    UniqueAllocMetrics& unique_alloc_metrics =
-        filtered_allocations[allocator_index][unique_alloc];
-    unique_alloc_metrics.size += alloc_total_size;
-    unique_alloc_metrics.count += alloc_count;
-  }
-
-  // Filter irrelevant allocations.
-  // TODO(crbug:763595): Leave placeholders for pruned allocations.
-  for (uint32_t i = 0; i < kAllocatorCount; i++) {
-    for (auto alloc = filtered_allocations[i].begin();
-         alloc != filtered_allocations[i].end();) {
-      if (alloc->second.size < params->min_size_threshold &&
-          alloc->second.count < params->min_count_threshold) {
-        alloc = filtered_allocations[i].erase(alloc);
-      } else {
-        ++alloc;
-      }
-    }
-  }
-
-  WriteAllocatorsSummary(total_size, total_count, out);
+  WriteAllocatorsSummary(params->allocs, out);
   WriteHeapsV2Header(out);
 
   // Output Heaps_V2 format version. Currently "1" is the only valid value.
   out << "\"version\": 1,\n";
 
-  StringTable string_table;
-
   // Put all required context strings in the string table and generate a
   // mapping from allocation context_id to string ID.
-  std::map<int, size_t> context_to_string_map;
-  for (uint32_t i = 0; i < kAllocatorCount; i++) {
-    FillContextStrings(filtered_allocations[i], params, &string_table,
-                       &context_to_string_map);
-  }
+  StringTable string_table;
+  std::map<int, size_t> context_to_string_id_map;
+  FillContextStrings(params, &string_table, &context_to_string_id_map);
 
-  // Find all backtraces referenced by the set and not filtered. The backtrace
-  // storage will contain more stacks than we want to write out (it will refer
-  // to all processes, while we're only writing one). So do those only on
-  // demand.
-  //
-  // The map maps backtrace keys to node IDs (computed below).
-  std::map<const Backtrace*, size_t> backtraces;
-  for (size_t i = 0; i < kAllocatorCount; i++) {
-    for (const auto& alloc : filtered_allocations[i])
-      backtraces.emplace(alloc.first.backtrace, 0);
-  }
-
-  // Write each backtrace, converting the string for the stack entry to string
-  // IDs. The backtrace -> node ID will be filled in at this time.
+  AllocationToNodeId alloc_to_node_id;
   BacktraceTable nodes;
-  VLOG(1) << "Number of backtraces " << backtraces.size();
-  for (auto& bt : backtraces)
-    bt.second =
-        AppendBacktraceStrings(*bt.first, &nodes, &string_table, params);
+  // For each backtrace, converting the string for the stack entry to string
+  // IDs. The backtrace -> node ID will be filled in at this time.
+  for (const auto& alloc : params->allocs) {
+    size_t node_id =
+        AppendBacktraceStrings(alloc.first, &nodes, &string_table, params);
+    alloc_to_node_id.emplace(&alloc.first, node_id);
+  }
 
   // Maps section.
   out << "\"maps\": {\n";
@@ -488,7 +403,7 @@
   out << ",\n";
   WriteMapNodes(nodes, out);
   out << ",\n";
-  WriteTypeNodes(context_to_string_map, out);
+  WriteTypeNodes(context_to_string_id_map, out);
   out << "},\n";  // End of maps section.
 
   // Allocators section.
@@ -496,13 +411,13 @@
   for (uint32_t i = 0; i < kAllocatorCount; i++) {
     out << "  \"" << StringForAllocatorType(i) << "\":{\n    ";
 
-    WriteCounts(filtered_allocations[i], out);
+    WriteCounts(params->allocs, i, out);
     out << ",\n    ";
-    WriteSizes(filtered_allocations[i], out);
+    WriteSizes(params->allocs, i, out);
     out << ",\n    ";
-    WriteTypes(filtered_allocations[i], out);
+    WriteTypes(params->allocs, i, out);
     out << ",\n    ";
-    WriteAllocatorNodes(filtered_allocations[i], backtraces, out);
+    WriteAllocatorNodes(params->allocs, i, alloc_to_node_id, out);
     out << "\n  }";
 
     // Comma every time but the last.
diff --git a/components/services/heap_profiling/json_exporter.h b/components/services/heap_profiling/json_exporter.h
index 23473786..300d7e0 100644
--- a/components/services/heap_profiling/json_exporter.h
+++ b/components/services/heap_profiling/json_exporter.h
@@ -8,7 +8,7 @@
 #include <iosfwd>
 #include <vector>
 
-#include "components/services/heap_profiling/allocation_event.h"
+#include "components/services/heap_profiling/allocation.h"
 #include "components/services/heap_profiling/public/mojom/heap_profiling_service.mojom.h"
 #include "services/resource_coordinator/public/mojom/memory_instrumentation/memory_instrumentation.mojom.h"
 
@@ -21,7 +21,7 @@
   ~ExportParams();
 
   // Allocation events to export.
-  AllocationCountMap allocs;
+  AllocationMap allocs;
 
   // VM map of all regions in the process.
   std::vector<memory_instrumentation::mojom::VmRegionPtr> maps;
@@ -37,10 +37,6 @@
   // The type of browser [browser, renderer, gpu] that is being heap-dumped.
   mojom::ProcessType process_type = mojom::ProcessType::OTHER;
 
-  // Only allocations exceeding this size or count will be exported.
-  size_t min_size_threshold = 0;
-  size_t min_count_threshold = 0;
-
   // Whether or not the paths should be stripped from mapped files. Doing so
   // anonymizes the trace, since the paths could potentially contain a username.
   // However, it prevents symbolization of locally built instances of Chrome.
@@ -54,12 +50,6 @@
   // parameter, and tells the caller the next unused ID.
   // See https://crbug.com/808066.
   size_t next_id = 1;
-
-  // When sampling is enabled, each allocation is recorded with probability
-  // (size / sampling_rate). The resulting exported JSON needs to be
-  // appropriately updated to reflect de-sampled values.
-  // A |sampling_rate| of 1 is equivalent to recording all allocations.
-  size_t sampling_rate = 1;
 };
 
 // Creates a JSON string representing a JSON dictionary that contains memory
diff --git a/components/services/heap_profiling/json_exporter_unittest.cc b/components/services/heap_profiling/json_exporter_unittest.cc
index 84d65e2f..03195bb 100644
--- a/components/services/heap_profiling/json_exporter_unittest.cc
+++ b/components/services/heap_profiling/json_exporter_unittest.cc
@@ -13,19 +13,12 @@
 #include "base/strings/string_number_conversions.h"
 #include "base/values.h"
 #include "build/build_config.h"
-#include "components/services/heap_profiling/backtrace.h"
 #include "services/resource_coordinator/public/cpp/memory_instrumentation/os_metrics.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace heap_profiling {
-
 namespace {
 
-const size_t kNoSizeThreshold = 0;
-const size_t kNoCountThreshold = 0;
-const size_t kSizeThreshold = 1500;
-const size_t kCountThreshold = 1000;
-
 using MemoryMap = std::vector<memory_instrumentation::mojom::VmRegionPtr>;
 
 static constexpr int kNoParent = -1;
@@ -128,41 +121,38 @@
   return false;
 }
 
-const Backtrace* InsertBacktrace(BacktraceStorage& storage,
-                                 std::vector<Address> addrs) {
-  return &*storage.insert(Backtrace(std::move(addrs))).first;
+void InsertAllocation(AllocationMap* allocs,
+                      AllocatorType type,
+                      size_t size,
+                      std::vector<Address> stack,
+                      int context_id) {
+  AllocationMetrics& metrics =
+      allocs
+          ->emplace(std::piecewise_construct,
+                    std::forward_as_tuple(type, std::move(stack), context_id),
+                    std::forward_as_tuple())
+          .first->second;
+  metrics.size += size;
+  metrics.count++;
 }
 
 }  // namespace
 
 TEST(ProfilingJsonExporterTest, Simple) {
-  BacktraceStorage backtrace_storage;
-
   std::vector<Address> stack1{Address(0x5678), Address(0x1234)};
-  const Backtrace* bt1 = InsertBacktrace(backtrace_storage, std::move(stack1));
-
   std::vector<Address> stack2{Address(0x9013), Address(0x9012),
                               Address(0x1234)};
-  const Backtrace* bt2 = InsertBacktrace(backtrace_storage, std::move(stack2));
-
-  AllocationEventSet events;
-  events.insert(
-      AllocationEvent(AllocatorType::kMalloc, Address(0x1), 20, bt1, 0));
-  events.insert(
-      AllocationEvent(AllocatorType::kMalloc, Address(0x2), 32, bt2, 0));
-  events.insert(
-      AllocationEvent(AllocatorType::kMalloc, Address(0x3), 20, bt1, 0));
-  events.insert(AllocationEvent(AllocatorType::kPartitionAlloc, Address(0x4),
-                                20, bt1, 0));
-  events.insert(
-      AllocationEvent(AllocatorType::kMalloc, Address(0x5), 12, bt2, 0));
+  AllocationMap allocs;
+  InsertAllocation(&allocs, AllocatorType::kMalloc, 20, stack1, 0);
+  InsertAllocation(&allocs, AllocatorType::kMalloc, 32, stack2, 0);
+  InsertAllocation(&allocs, AllocatorType::kMalloc, 20, stack1, 0);
+  InsertAllocation(&allocs, AllocatorType::kPartitionAlloc, 20, stack1, 0);
+  InsertAllocation(&allocs, AllocatorType::kMalloc, 12, stack2, 0);
 
   std::ostringstream stream;
 
   ExportParams params;
-  params.allocs = AllocationEventSetToCountMap(events);
-  params.min_size_threshold = kNoSizeThreshold;
-  params.min_count_threshold = kNoCountThreshold;
+  params.allocs = std::move(allocs);
   ExportMemoryMapsAndV2StackTraceToJSON(&params, stream);
   std::string json = stream.str();
 
@@ -295,169 +285,17 @@
   EXPECT_EQ(1u, sizes->GetList().size());
 }
 
-TEST(ProfilingJsonExporterTest, Sampling) {
-  size_t allocation_size = 20;
-  int sampling_rate = 1000;
-  int expected_count = static_cast<int>(sampling_rate / allocation_size);
-
-  BacktraceStorage backtrace_storage;
-
-  std::vector<Address> stack1{Address(0x5678)};
-  const Backtrace* bt1 = InsertBacktrace(backtrace_storage, std::move(stack1));
-
-  AllocationEventSet events;
-  events.insert(AllocationEvent(AllocatorType::kMalloc, Address(0x1),
-                                allocation_size, bt1, 0));
-
-  std::ostringstream stream;
-
-  ExportParams params;
-  params.allocs = AllocationEventSetToCountMap(events);
-  params.min_size_threshold = kNoSizeThreshold;
-  params.min_count_threshold = kNoCountThreshold;
-  params.sampling_rate = sampling_rate;
-  ExportMemoryMapsAndV2StackTraceToJSON(&params, stream);
-  std::string json = stream.str();
-
-  // JSON should parse.
-  base::JSONReader reader(base::JSON_PARSE_RFC);
-  std::unique_ptr<base::Value> root =
-      reader.ReadToValueDeprecated(stream.str());
-  ASSERT_EQ(base::JSONReader::JSON_NO_ERROR, reader.error_code())
-      << reader.GetErrorMessage();
-  ASSERT_TRUE(root);
-
-  // Validate the allocators summary.
-  const base::Value* malloc_summary = root->FindPath({"allocators", "malloc"});
-  ASSERT_TRUE(malloc_summary);
-  const base::Value* malloc_size =
-      malloc_summary->FindPath({"attrs", "size", "value"});
-  ASSERT_TRUE(malloc_size);
-  EXPECT_EQ("3e8", malloc_size->GetString());
-
-  const base::Value* heaps_v2 = root->FindKey("heaps_v2");
-  ASSERT_TRUE(heaps_v2);
-
-  // Retrieve the allocations and validate their structure.
-  const base::Value* sizes =
-      heaps_v2->FindPath({"allocators", "malloc", "sizes"});
-
-  ASSERT_TRUE(sizes);
-  EXPECT_EQ(1u, sizes->GetList().size());
-  EXPECT_EQ(sampling_rate, sizes->GetList()[0].GetInt());
-
-  const base::Value* counts =
-      heaps_v2->FindPath({"allocators", "malloc", "counts"});
-  ASSERT_TRUE(counts);
-  EXPECT_EQ(1u, counts->GetList().size());
-  EXPECT_EQ(expected_count, counts->GetList()[0].GetInt());
-}
-
-TEST(ProfilingJsonExporterTest, SimpleWithFilteredAllocations) {
-  BacktraceStorage backtrace_storage;
-
-  std::vector<Address> stack1{Address(0x1234)};
-  const Backtrace* bt1 = InsertBacktrace(backtrace_storage, std::move(stack1));
-
-  std::vector<Address> stack2{Address(0x5678)};
-  const Backtrace* bt2 = InsertBacktrace(backtrace_storage, std::move(stack2));
-
-  std::vector<Address> stack3{Address(0x9999)};
-  const Backtrace* bt3 = InsertBacktrace(backtrace_storage, std::move(stack3));
-
-  AllocationEventSet events;
-  events.insert(
-      AllocationEvent(AllocatorType::kMalloc, Address(0x1), 16, bt1, 0));
-  events.insert(
-      AllocationEvent(AllocatorType::kMalloc, Address(0x2), 32, bt1, 0));
-  events.insert(
-      AllocationEvent(AllocatorType::kMalloc, Address(0x3), 1000, bt2, 0));
-  events.insert(
-      AllocationEvent(AllocatorType::kMalloc, Address(0x4), 1000, bt2, 0));
-  for (size_t i = 0; i < kCountThreshold + 1; ++i) {
-    events.insert(
-        AllocationEvent(AllocatorType::kMalloc, Address(0x5 + i), 1, bt3, 0));
-  }
-
-  // Validate filtering by size and count.
-  std::ostringstream stream;
-
-  ExportParams params;
-  params.allocs = AllocationEventSetToCountMap(events);
-  params.min_size_threshold = kSizeThreshold;
-  params.min_count_threshold = kCountThreshold;
-  ExportMemoryMapsAndV2StackTraceToJSON(&params, stream);
-  std::string json = stream.str();
-
-  // JSON should parse.
-  base::JSONReader reader(base::JSON_PARSE_RFC);
-  std::unique_ptr<base::Value> root =
-      reader.ReadToValueDeprecated(stream.str());
-  ASSERT_EQ(base::JSONReader::JSON_NO_ERROR, reader.error_code())
-      << reader.GetErrorMessage();
-  ASSERT_TRUE(root);
-
-  const base::Value* heaps_v2 = root->FindKey("heaps_v2");
-  ASSERT_TRUE(heaps_v2);
-  const base::Value* nodes = heaps_v2->FindPath({"maps", "nodes"});
-  const base::Value* strings = heaps_v2->FindPath({"maps", "strings"});
-  ASSERT_TRUE(nodes);
-  ASSERT_TRUE(strings);
-
-  // Validate the strings table.
-  EXPECT_EQ(3u, strings->GetList().size());
-  int sid_unknown = GetIdFromStringTable(strings, "[unknown]");
-  int sid_1234 = GetIdFromStringTable(strings, "pc:1234");
-  int sid_5678 = GetIdFromStringTable(strings, "pc:5678");
-  int sid_9999 = GetIdFromStringTable(strings, "pc:9999");
-  EXPECT_NE(-1, sid_unknown);
-  EXPECT_EQ(-1, sid_1234);  // Must be filtered.
-  EXPECT_NE(-1, sid_5678);
-  EXPECT_NE(-1, sid_9999);
-
-  // Validate the nodes table.
-  // Nodes should be a list with 4 items.
-  //   [0] => address: 5678  parent: none
-  //   [1] => address: 9999  parent: none
-  EXPECT_EQ(2u, nodes->GetList().size());
-  int id0 = GetNodeWithNameID(nodes, sid_5678);
-  int id1 = GetNodeWithNameID(nodes, sid_9999);
-  EXPECT_NE(-1, id0);
-  EXPECT_NE(-1, id1);
-  EXPECT_TRUE(IsBacktraceInList(nodes, id0, kNoParent));
-  EXPECT_TRUE(IsBacktraceInList(nodes, id1, kNoParent));
-
-  // Counts should be a list with one item. Items with |bt1| are filtered.
-  // For |stack2|, there are two allocations of 1000 bytes. which is above the
-  // 1500 bytes threshold. For |stack3|, there are 1001 allocations of 1 bytes,
-  // which is above the 1000 allocations threshold.
-  const base::Value* backtraces =
-      heaps_v2->FindPath({"allocators", "malloc", "nodes"});
-  ASSERT_TRUE(backtraces);
-  EXPECT_EQ(2u, backtraces->GetList().size());
-
-  int node_bt2 = GetOffsetForBacktraceID(backtraces, id0);
-  int node_bt3 = GetOffsetForBacktraceID(backtraces, id1);
-  EXPECT_NE(-1, node_bt2);
-  EXPECT_NE(-1, node_bt3);
-}
-
 // GetProcessMemoryMaps iterates through every memory region, making allocations
 // for each one. ASAN will potentially, for each allocation, make memory
 // regions. This will cause the test to time out.
 #if !defined(ADDRESS_SANITIZER)
 TEST(ProfilingJsonExporterTest, MemoryMaps) {
-  AllocationEventSet events;
   ExportParams params;
   params.maps = memory_instrumentation::OSMetrics::GetProcessMemoryMaps(
       base::Process::Current().Pid());
   ASSERT_GT(params.maps.size(), 2u);
 
   std::ostringstream stream;
-
-  params.allocs = AllocationEventSetToCountMap(events);
-  params.min_size_threshold = kNoSizeThreshold;
-  params.min_count_threshold = kNoCountThreshold;
   ExportMemoryMapsAndV2StackTraceToJSON(&params, stream);
   std::string json = stream.str();
 
@@ -487,11 +325,9 @@
 #endif  // !defined(ADDRESS_SANITIZER)
 
 TEST(ProfilingJsonExporterTest, Context) {
-  BacktraceStorage backtrace_storage;
   ExportParams params;
 
   std::vector<Address> stack{Address(0x1234)};
-  const Backtrace* bt = InsertBacktrace(backtrace_storage, std::move(stack));
 
   std::string context_str1("Context 1");
   int context_id1 = 1;
@@ -503,21 +339,17 @@
   // Make 4 events, all with identical metadata except context. Two share the
   // same context so should get folded, one has unique context, and one has no
   // context.
-  AllocationEventSet events;
-  events.insert(AllocationEvent(AllocatorType::kPartitionAlloc, Address(0x1),
-                                16, bt, context_id1));
-  events.insert(AllocationEvent(AllocatorType::kPartitionAlloc, Address(0x2),
-                                16, bt, context_id2));
-  events.insert(
-      AllocationEvent(AllocatorType::kPartitionAlloc, Address(0x3), 16, bt, 0));
-  events.insert(AllocationEvent(AllocatorType::kPartitionAlloc, Address(0x4),
-                                16, bt, context_id1));
+  AllocationMap allocs;
+  InsertAllocation(&allocs, AllocatorType::kPartitionAlloc, 16, stack,
+                   context_id1);
+  InsertAllocation(&allocs, AllocatorType::kPartitionAlloc, 16, stack,
+                   context_id2);
+  InsertAllocation(&allocs, AllocatorType::kPartitionAlloc, 16, stack, 0);
+  InsertAllocation(&allocs, AllocatorType::kPartitionAlloc, 16, stack,
+                   context_id1);
+  params.allocs = std::move(allocs);
 
   std::ostringstream stream;
-
-  params.allocs = AllocationEventSetToCountMap(events);
-  params.min_size_threshold = kNoSizeThreshold;
-  params.min_count_threshold = kNoCountThreshold;
   ExportMemoryMapsAndV2StackTraceToJSON(&params, stream);
   std::string json = stream.str();