Project import
diff --git a/perfprofd/Android.mk b/perfprofd/Android.mk
new file mode 100644
index 0000000..0a07949
--- /dev/null
+++ b/perfprofd/Android.mk
@@ -0,0 +1,77 @@
+LOCAL_PATH:= $(call my-dir)
+
+perfprofd_cppflags := \
+  -Wall \
+  -Wno-sign-compare \
+  -Wno-unused-parameter \
+  -Werror \
+  -std=gnu++11 \
+
+#
+# Static library containing guts of AWP daemon.
+#
+include $(CLEAR_VARS)
+LOCAL_CLANG := true
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_MODULE := libperfprofdcore
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_MODULE_PATH := $(TARGET_OUT_OPTIONAL_EXECUTABLES)
+LOCAL_MODULE_TAGS := debug
+proto_header_dir := $(call local-generated-sources-dir)/proto/$(LOCAL_PATH)
+LOCAL_C_INCLUDES += $(proto_header_dir) $(LOCAL_PATH)/quipper/kernel-headers
+LOCAL_STATIC_LIBRARIES := libbase
+LOCAL_EXPORT_C_INCLUDE_DIRS += $(proto_header_dir)
+LOCAL_SRC_FILES :=  \
+	perf_profile.proto \
+	quipper/perf_utils.cc \
+	quipper/base/logging.cc \
+	quipper/address_mapper.cc \
+	quipper/perf_reader.cc \
+	quipper/perf_parser.cc \
+	perf_data_converter.cc \
+	configreader.cc \
+	cpuconfig.cc \
+	perfprofdcore.cc \
+
+LOCAL_CPPFLAGS += $(perfprofd_cppflags)
+include $(BUILD_STATIC_LIBRARY)
+
+#
+# Static library with primary utilities layer (called by perfprofd core)
+#
+include $(CLEAR_VARS)
+LOCAL_CLANG := true
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_CXX_STL := libc++
+LOCAL_MODULE := libperfprofdutils
+LOCAL_MODULE_PATH := $(TARGET_OUT_OPTIONAL_EXECUTABLES)
+LOCAL_MODULE_TAGS := debug
+LOCAL_CPPFLAGS += $(perfprofd_cppflags)
+LOCAL_SRC_FILES := perfprofdutils.cc
+include $(BUILD_STATIC_LIBRARY)
+
+#
+# Main daemon
+#
+include $(CLEAR_VARS)
+LOCAL_CLANG := true
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_CXX_STL := libc++
+LOCAL_SRC_FILES := perfprofdmain.cc
+LOCAL_STATIC_LIBRARIES := libperfprofdcore libperfprofdutils
+LOCAL_SHARED_LIBRARIES := liblog libprotobuf-cpp-lite libbase
+LOCAL_SYSTEM_SHARED_LIBRARIES := libc libstdc++
+LOCAL_CPPFLAGS += $(perfprofd_cppflags)
+LOCAL_CFLAGS := -Wall -Werror -std=gnu++11
+LOCAL_MODULE := perfprofd
+LOCAL_MODULE_PATH := $(TARGET_OUT_OPTIONAL_EXECUTABLES)
+LOCAL_MODULE_TAGS := debug
+LOCAL_SHARED_LIBRARIES += libcutils
+LOCAL_INIT_RC := perfprofd.rc
+include $(BUILD_EXECUTABLE)
+
+# Clean temp vars
+perfprofd_cppflags :=
+proto_header_dir :=
+
+include $(call first-makefiles-under,$(LOCAL_PATH))
diff --git a/perfprofd/NOTICE b/perfprofd/NOTICE
new file mode 100644
index 0000000..8530865
--- /dev/null
+++ b/perfprofd/NOTICE
@@ -0,0 +1,190 @@
+
+   Copyright (c) 2015, The Android Open Source Project
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
diff --git a/perfprofd/configreader.cc b/perfprofd/configreader.cc
new file mode 100644
index 0000000..e9dbdec
--- /dev/null
+++ b/perfprofd/configreader.cc
@@ -0,0 +1,287 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <sstream>
+
+#include <android-base/file.h>
+
+#include "configreader.h"
+#include "perfprofdutils.h"
+
+//
+// Config file path
+//
+static const char *config_file_path =
+    "/data/data/com.google.android.gms/files/perfprofd.conf";
+
+ConfigReader::ConfigReader()
+    : trace_config_read(false)
+{
+  addDefaultEntries();
+}
+
+ConfigReader::~ConfigReader()
+{
+}
+
+const char *ConfigReader::getConfigFilePath()
+{
+  return config_file_path;
+}
+
+void ConfigReader::setConfigFilePath(const char *path)
+{
+  config_file_path = strdup(path);
+  W_ALOGI("config file path set to %s", config_file_path);
+}
+
+//
+// Populate the reader with the set of allowable entries
+//
+void ConfigReader::addDefaultEntries()
+{
+  // Average number of seconds between perf profile collections (if
+  // set to 100, then over time we want to see a perf profile
+  // collected every 100 seconds). The actual time within the interval
+  // for the collection is chosen randomly.
+  addUnsignedEntry("collection_interval", 14400, 100, UINT32_MAX);
+
+  // Use the specified fixed seed for random number generation (unit
+  // testing)
+  addUnsignedEntry("use_fixed_seed", 0, 0, UINT32_MAX);
+
+  // For testing purposes, number of times to iterate through main
+  // loop.  Value of zero indicates that we should loop forever.
+  addUnsignedEntry("main_loop_iterations", 0, 0, UINT32_MAX);
+
+  // Destination directory (where to write profiles). This location
+  // chosen since it is accessible to the uploader service.
+  addStringEntry("destination_directory", "/data/misc/perfprofd");
+
+  // Config directory (where to read configs).
+  addStringEntry("config_directory", "/data/data/com.google.android.gms/files");
+
+  // Full path to 'perf' executable.
+  addStringEntry("perf_path", "/system/xbin/simpleperf");
+
+  // Desired sampling period (passed to perf -c option). Small
+  // sampling periods can perturb the collected profiles, so enforce
+  // min/max.
+  addUnsignedEntry("sampling_period", 500000, 5000, UINT32_MAX);
+
+  // Length of time to collect samples (number of seconds for 'perf
+  // record -a' run).
+  addUnsignedEntry("sample_duration", 3, 2, 600);
+
+  // If this parameter is non-zero it will cause perfprofd to
+  // exit immediately if the build type is not userdebug or eng.
+  // Currently defaults to 1 (true).
+  addUnsignedEntry("only_debug_build", 1, 0, 1);
+
+  // If the "mpdecision" service is running at the point we are ready
+  // to kick off a profiling run, then temporarily disable the service
+  // and hard-wire all cores on prior to the collection run, provided
+  // that the duration of the recording is less than or equal to the value of
+  // 'hardwire_cpus_max_duration'.
+  addUnsignedEntry("hardwire_cpus", 1, 0, 1);
+  addUnsignedEntry("hardwire_cpus_max_duration", 5, 1, UINT32_MAX);
+
+  // Maximum number of unprocessed profiles we can accumulate in the
+  // destination directory. Once we reach this limit, we continue
+  // to collect, but we just overwrite the most recent profile.
+  addUnsignedEntry("max_unprocessed_profiles", 10, 1, UINT32_MAX);
+
+  // If set to 1, pass the -g option when invoking 'perf' (requests
+  // stack traces as opposed to flat profile).
+  addUnsignedEntry("stack_profile", 0, 0, 1);
+
+  // For unit testing only: if set to 1, emit info messages on config
+  // file parsing.
+  addUnsignedEntry("trace_config_read", 0, 0, 1);
+
+  // Control collection of various additional profile tags
+  addUnsignedEntry("collect_cpu_utilization", 1, 0, 1);
+  addUnsignedEntry("collect_charging_state", 1, 0, 1);
+  addUnsignedEntry("collect_booting", 1, 0, 1);
+  addUnsignedEntry("collect_camera_active", 0, 0, 1);
+}
+
+void ConfigReader::addUnsignedEntry(const char *key,
+                                    unsigned default_value,
+                                    unsigned min_value,
+                                    unsigned max_value)
+{
+  std::string ks(key);
+  if (u_entries.find(ks) != u_entries.end() ||
+      s_entries.find(ks) != s_entries.end()) {
+    W_ALOGE("internal error -- duplicate entry for key %s", key);
+    exit(9);
+  }
+  values vals;
+  vals.minv = min_value;
+  vals.maxv = max_value;
+  u_info[ks] = vals;
+  u_entries[ks] = default_value;
+}
+
+void ConfigReader::addStringEntry(const char *key, const char *default_value)
+{
+  std::string ks(key);
+  if (u_entries.find(ks) != u_entries.end() ||
+      s_entries.find(ks) != s_entries.end()) {
+    W_ALOGE("internal error -- duplicate entry for key %s", key);
+    exit(9);
+  }
+  if (default_value == nullptr) {
+    W_ALOGE("internal error -- bad default value for key %s", key);
+    exit(9);
+  }
+  s_entries[ks] = std::string(default_value);
+}
+
+unsigned ConfigReader::getUnsignedValue(const char *key) const
+{
+  std::string ks(key);
+  auto it = u_entries.find(ks);
+  assert(it != u_entries.end());
+  return it->second;
+}
+
+std::string ConfigReader::getStringValue(const char *key) const
+{
+  std::string ks(key);
+  auto it = s_entries.find(ks);
+  assert(it != s_entries.end());
+  return it->second;
+}
+
+void ConfigReader::overrideUnsignedEntry(const char *key, unsigned new_value)
+{
+  std::string ks(key);
+  auto it = u_entries.find(ks);
+  assert(it != u_entries.end());
+  values vals;
+  auto iit = u_info.find(key);
+  assert(iit != u_info.end());
+  vals = iit->second;
+  assert(new_value >= vals.minv && new_value <= vals.maxv);
+  it->second = new_value;
+  W_ALOGI("option %s overridden to %u", key, new_value);
+}
+
+
+//
+// Parse a key=value pair read from the config file. This will issue
+// warnings or errors to the system logs if the line can't be
+// interpreted properly.
+//
+void ConfigReader::parseLine(const char *key,
+                             const char *value,
+                             unsigned linecount)
+{
+  assert(key);
+  assert(value);
+
+  auto uit = u_entries.find(key);
+  if (uit != u_entries.end()) {
+    unsigned uvalue = 0;
+    if (isdigit(value[0]) == 0 || sscanf(value, "%u", &uvalue) != 1) {
+      W_ALOGW("line %d: malformed unsigned value (ignored)", linecount);
+    } else {
+      values vals;
+      auto iit = u_info.find(key);
+      assert(iit != u_info.end());
+      vals = iit->second;
+      if (uvalue < vals.minv || uvalue > vals.maxv) {
+        W_ALOGW("line %d: specified value %u for '%s' "
+                "outside permitted range [%u %u] (ignored)",
+                linecount, uvalue, key, vals.minv, vals.maxv);
+      } else {
+        if (trace_config_read) {
+          W_ALOGI("option %s set to %u", key, uvalue);
+        }
+        uit->second = uvalue;
+      }
+    }
+    trace_config_read = (getUnsignedValue("trace_config_read") != 0);
+    return;
+  }
+
+  auto sit = s_entries.find(key);
+  if (sit != s_entries.end()) {
+    if (trace_config_read) {
+      W_ALOGI("option %s set to %s", key, value);
+    }
+    sit->second = std::string(value);
+    return;
+  }
+
+  W_ALOGW("line %d: unknown option '%s' ignored", linecount, key);
+}
+
+static bool isblank(const std::string &line)
+{
+  for (std::string::const_iterator it = line.begin(); it != line.end(); ++it)
+  {
+    if (isspace(*it) == 0) {
+      return false;
+    }
+  }
+  return true;
+}
+
+bool ConfigReader::readFile()
+{
+  std::string contents;
+  if (! android::base::ReadFileToString(config_file_path, &contents)) {
+    return false;
+  }
+
+  std::stringstream ss(contents);
+  std::string line;
+  for (unsigned linecount = 1;
+       std::getline(ss,line,'\n');
+       linecount += 1)
+  {
+
+    // comment line?
+    if (line[0] == '#') {
+      continue;
+    }
+
+    // blank line?
+    if (isblank(line.c_str())) {
+      continue;
+    }
+
+    // look for X=Y assignment
+    auto efound = line.find('=');
+    if (efound == std::string::npos) {
+      W_ALOGW("line %d: line malformed (no '=' found)", linecount);
+      continue;
+    }
+
+    std::string key(line.substr(0, efound));
+    std::string value(line.substr(efound+1, std::string::npos));
+
+    parseLine(key.c_str(), value.c_str(), linecount);
+  }
+
+  return true;
+}
diff --git a/perfprofd/configreader.h b/perfprofd/configreader.h
new file mode 100644
index 0000000..2e29601
--- /dev/null
+++ b/perfprofd/configreader.h
@@ -0,0 +1,67 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#ifndef SYSTEM_EXTRAS_PERFPROFD_CONFIGREADER_H_
+#define SYSTEM_EXTRAS_PERFPROFD_CONFIGREADER_H_
+
+#include <string>
+#include <map>
+
+//
+// This table describes the perfprofd config file syntax in terms of
+// key/value pairs.  Values come in two flavors: strings, or unsigned
+// integers. In the latter case the reader sets allowable
+// minimum/maximum for the setting.
+//
+class ConfigReader {
+
+ public:
+  ConfigReader();
+  ~ConfigReader();
+
+  // Ask for the current setting of a config item
+  unsigned getUnsignedValue(const char *key) const;
+  std::string getStringValue(const char *key) const;
+
+  // read the specified config file, applying any settings it contains
+  // returns true for successful read, false if conf file cannot be opened.
+  bool readFile();
+
+  // set/get path to config file
+  static void setConfigFilePath(const char *path);
+  static const char *getConfigFilePath();
+
+  // override a config item (for unit testing purposes)
+  void overrideUnsignedEntry(const char *key, unsigned new_value);
+
+ private:
+  void addUnsignedEntry(const char *key,
+                        unsigned default_value,
+                        unsigned min_value,
+                        unsigned max_value);
+  void addStringEntry(const char *key, const char *default_value);
+  void addDefaultEntries();
+  void parseLine(const char *key, const char *value, unsigned linecount);
+
+  typedef struct { unsigned minv, maxv; } values;
+  std::map<std::string, values> u_info;
+  std::map<std::string, unsigned> u_entries;
+  std::map<std::string, std::string> s_entries;
+  bool trace_config_read;
+};
+
+#endif
diff --git a/perfprofd/cpuconfig.cc b/perfprofd/cpuconfig.cc
new file mode 100644
index 0000000..4b3cc36
--- /dev/null
+++ b/perfprofd/cpuconfig.cc
@@ -0,0 +1,105 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <string>
+#include <sstream>
+#include <sys/types.h>
+#include <sys/wait.h>
+
+#include <cutils/properties.h>
+
+#include "cpuconfig.h"
+#include "perfprofdutils.h"
+
+#define SYSFSCPU "/sys/devices/system/cpu"
+
+HardwireCpuHelper::HardwireCpuHelper(bool perform)
+    : mpdecision_stopped_(false)
+{
+  if (perform && GetMpdecisionRunning()) {
+    mpdecision_stopped_ = true;
+    StopMpdecision();
+    int ncores = GetNumCores();
+    for (int i = 0; i < ncores; ++i) {
+      OnlineCore(i, 1);
+    }
+  }
+}
+
+HardwireCpuHelper::~HardwireCpuHelper()
+{
+  if (mpdecision_stopped_) {
+    RestartMpdecision();
+  }
+}
+
+bool HardwireCpuHelper::GetMpdecisionRunning()
+{
+  char propBuf[PROPERTY_VALUE_MAX];
+  property_get("init.svc.mpdecision", propBuf, "");
+  return strcmp(propBuf, "running") == 0;
+}
+
+
+int HardwireCpuHelper::GetNumCores()
+{
+  int ncores = -1;
+  std::string possible(SYSFSCPU "/possible");
+  FILE *fp = fopen(possible.c_str(), "re");
+  if (fp) {
+    unsigned lo = 0, hi = 0;
+    if (fscanf(fp, "%u-%u", &lo, &hi) == 2) {
+      ncores = hi - lo + 1;
+    }
+    fclose(fp);
+  }
+  return ncores;
+}
+
+void HardwireCpuHelper::OnlineCore(int i, int onoff)
+{
+  std::stringstream ss;
+  ss << SYSFSCPU "/cpu" << i << "/online";
+  FILE *fp = fopen(ss.str().c_str(), "we");
+  if (fp) {
+    fprintf(fp, onoff ? "1\n" : "0\n");
+    fclose(fp);
+  } else {
+    W_ALOGW("open failed for %s", ss.str().c_str());
+  }
+}
+
+void HardwireCpuHelper::StopMpdecision()
+{
+  if (property_set("ctl.stop", "mpdecision")) {
+    W_ALOGE("setprop ctl.stop mpdecision failed");
+  }
+}
+
+void HardwireCpuHelper::RestartMpdecision()
+{
+  // Don't try to offline the cores we previously onlined -- let
+  // mpdecision figure out what to do
+
+  if (property_set("ctl.start", "mpdecision")) {
+    W_ALOGE("setprop ctl.start mpdecision failed");
+  }
+}
diff --git a/perfprofd/cpuconfig.h b/perfprofd/cpuconfig.h
new file mode 100644
index 0000000..bc5b5cf
--- /dev/null
+++ b/perfprofd/cpuconfig.h
@@ -0,0 +1,50 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+//
+// Helper class to perform cpu setup (if needed) prior to a profile collection.
+//
+class HardwireCpuHelper {
+ public:
+
+  // The constructor for this class checks to see if the 'mpdecision'
+  // service is running; if so (and if 'perform' is TRUE), then it
+  // disables the service and on-lines all of the available cores/cpus
+  // (anything listed in /sys/devices/system/cpu/possible). The
+  // destructor will re-enable the mpdecision service if it was
+  // previously disabled.
+  explicit HardwireCpuHelper(bool perform);
+  virtual ~HardwireCpuHelper();
+
+ private:
+  bool mpdecision_stopped_;
+
+  // Collect the number of available cpus/cores from /sys/devices/system/cpu/possible
+  int GetNumCores();
+
+  // Returns TRUE if the system service 'mpdecision' is running
+  bool GetMpdecisionRunning();
+
+  // Online/offline the specified cpu
+  void OnlineCore(int whichCore, int onoff);
+
+  // Enable/disable the mpdecision service via the equivalent of
+  //   setprop ctl.start mpdecision
+  //   setprop ctl.stop mpdecision
+  void StopMpdecision();
+  void RestartMpdecision();
+};
diff --git a/perfprofd/perf_data_converter.cc b/perfprofd/perf_data_converter.cc
new file mode 100644
index 0000000..e3d3737
--- /dev/null
+++ b/perfprofd/perf_data_converter.cc
@@ -0,0 +1,190 @@
+
+#include "perf_data_converter.h"
+#include "quipper/perf_parser.h"
+#include <map>
+
+using std::map;
+
+namespace wireless_android_logging_awp {
+
+typedef quipper::ParsedEvent::DSOAndOffset DSOAndOffset;
+typedef std::vector<DSOAndOffset> callchain;
+
+struct callchain_lt {
+  bool operator()(const callchain *c1, const callchain *c2) const {
+    if (c1->size() != c2->size()) {
+      return c1->size() < c2->size();
+    }
+    for (unsigned idx = 0; idx < c1->size(); ++idx) {
+      const DSOAndOffset *do1 = &(*c1)[idx];
+      const DSOAndOffset *do2 = &(*c2)[idx];
+      if (do1->offset() != do2->offset()) {
+        return do1->offset() < do2->offset();
+      }
+      int rc = do1->dso_name().compare(do2->dso_name());
+      if (rc) {
+        return rc < 0;
+      }
+    }
+    return false;
+  }
+};
+
+struct RangeTarget {
+  RangeTarget(uint64 start, uint64 end, uint64 to)
+      : start(start), end(end), to(to) {}
+
+  bool operator<(const RangeTarget &r) const {
+    if (start != r.start) {
+      return start < r.start;
+    } else if (end != r.end) {
+      return end < r.end;
+    } else {
+      return to < r.to;
+    }
+  }
+  uint64 start;
+  uint64 end;
+  uint64 to;
+};
+
+struct BinaryProfile {
+  map<uint64, uint64> address_count_map;
+  map<RangeTarget, uint64> range_count_map;
+  map<const callchain *, uint64, callchain_lt> callchain_count_map;
+};
+
+wireless_android_play_playlog::AndroidPerfProfile
+RawPerfDataToAndroidPerfProfile(const string &perf_file) {
+  wireless_android_play_playlog::AndroidPerfProfile ret;
+  quipper::PerfParser parser;
+  if (!parser.ReadFile(perf_file) || !parser.ParseRawEvents()) {
+    return ret;
+  }
+
+  typedef map<string, BinaryProfile> ModuleProfileMap;
+  typedef map<string, ModuleProfileMap> ProgramProfileMap;
+
+  // Note: the callchain_count_map member in BinaryProfile contains
+  // pointers into callchains owned by "parser" above, meaning
+  // that once the parser is destroyed, callchain pointers in
+  // name_profile_map will become stale (e.g. keep these two
+  // together in the same region).
+  ProgramProfileMap name_profile_map;
+  uint64 total_samples = 0;
+  bool seen_branch_stack = false;
+  bool seen_callchain = false;
+  for (const auto &event : parser.parsed_events()) {
+    if (!event.raw_event ||
+        event.raw_event->header.type != PERF_RECORD_SAMPLE) {
+      continue;
+    }
+    string dso_name = event.dso_and_offset.dso_name();
+    string program_name;
+    const string kernel_name = "[kernel.kallsyms]";
+    if (dso_name.substr(0, kernel_name.length()) == kernel_name) {
+      dso_name = kernel_name;
+      program_name = "[kernel.kallsyms]";
+    } else if (event.command() == "") {
+      program_name = "unknown_program";
+    } else {
+      program_name = event.command();
+    }
+    total_samples++;
+    // We expect to see either all callchain events, all branch stack
+    // events, or all flat sample events, not a mix. For callchains,
+    // however, it can be the case that none of the IPs in a chain
+    // are mappable, in which case the parsed/mapped chain will appear
+    // empty (appearing as a flat sample).
+    if (!event.callchain.empty()) {
+      CHECK(!seen_branch_stack && "examining callchain");
+      seen_callchain = true;
+      const callchain *cc = &event.callchain;
+      name_profile_map[program_name][dso_name].callchain_count_map[cc]++;
+    } else if (!event.branch_stack.empty()) {
+      CHECK(!seen_callchain && "examining branch stack");
+      seen_branch_stack = true;
+      name_profile_map[program_name][dso_name].address_count_map[
+          event.dso_and_offset.offset()]++;
+    } else {
+      name_profile_map[program_name][dso_name].address_count_map[
+          event.dso_and_offset.offset()]++;
+    }
+    for (size_t i = 1; i < event.branch_stack.size(); i++) {
+      if (dso_name == event.branch_stack[i - 1].to.dso_name()) {
+        uint64 start = event.branch_stack[i].to.offset();
+        uint64 end = event.branch_stack[i - 1].from.offset();
+        uint64 to = event.branch_stack[i - 1].to.offset();
+        // The interval between two taken branches should not be too large.
+        if (end < start || end - start > (1 << 20)) {
+          LOG(WARNING) << "Bogus LBR data: " << start << "->" << end;
+          continue;
+        }
+        name_profile_map[program_name][dso_name].range_count_map[
+            RangeTarget(start, end, to)]++;
+      }
+    }
+  }
+
+  map<string, int> name_id_map;
+  for (const auto &program_profile : name_profile_map) {
+    for (const auto &module_profile : program_profile.second) {
+      name_id_map[module_profile.first] = 0;
+    }
+  }
+  int current_index = 0;
+  for (auto iter = name_id_map.begin(); iter != name_id_map.end(); ++iter) {
+    iter->second = current_index++;
+  }
+
+  map<string, string> name_buildid_map;
+  parser.GetFilenamesToBuildIDs(&name_buildid_map);
+  ret.set_total_samples(total_samples);
+  for (const auto &name_id : name_id_map) {
+    auto load_module = ret.add_load_modules();
+    load_module->set_name(name_id.first);
+    auto nbmi = name_buildid_map.find(name_id.first);
+    if (nbmi != name_buildid_map.end()) {
+      const std::string &build_id = nbmi->second;
+      if (build_id.size() == 40 && build_id.substr(32) == "00000000") {
+        load_module->set_build_id(build_id.substr(0, 32));
+      } else {
+        load_module->set_build_id(build_id);
+      }
+    }
+  }
+  for (const auto &program_profile : name_profile_map) {
+    auto program = ret.add_programs();
+    program->set_name(program_profile.first);
+    for (const auto &module_profile : program_profile.second) {
+      int32 module_id = name_id_map[module_profile.first];
+      auto module = program->add_modules();
+      module->set_load_module_id(module_id);
+      for (const auto &addr_count : module_profile.second.address_count_map) {
+        auto address_samples = module->add_address_samples();
+        address_samples->add_address(addr_count.first);
+        address_samples->set_count(addr_count.second);
+      }
+      for (const auto &range_count : module_profile.second.range_count_map) {
+        auto range_samples = module->add_range_samples();
+        range_samples->set_start(range_count.first.start);
+        range_samples->set_end(range_count.first.end);
+        range_samples->set_to(range_count.first.to);
+        range_samples->set_count(range_count.second);
+      }
+      for (const auto &callchain_count :
+               module_profile.second.callchain_count_map) {
+        auto address_samples = module->add_address_samples();
+        address_samples->set_count(callchain_count.second);
+        for (const auto &d_o : *callchain_count.first) {
+          int32 module_id = name_id_map[d_o.dso_name()];
+          address_samples->add_load_module_id(module_id);
+          address_samples->add_address(d_o.offset());
+        }
+      }
+    }
+  }
+  return ret;
+}
+
+}  // namespace wireless_android_logging_awp
diff --git a/perfprofd/perf_data_converter.h b/perfprofd/perf_data_converter.h
new file mode 100644
index 0000000..fdbde00
--- /dev/null
+++ b/perfprofd/perf_data_converter.h
@@ -0,0 +1,15 @@
+#ifndef WIRELESS_ANDROID_LOGGING_AWP_PERF_DATA_CONVERTER_H_
+#define WIRELESS_ANDROID_LOGGING_AWP_PERF_DATA_CONVERTER_H_
+
+#include "perf_profile.pb.h"
+
+#include <string>
+
+namespace wireless_android_logging_awp {
+
+wireless_android_play_playlog::AndroidPerfProfile
+RawPerfDataToAndroidPerfProfile(const std::string &perf_file);
+
+}  // namespace wireless_android_logging_awp
+
+#endif  // WIRELESS_ANDROID_LOGGING_AWP_PERF_DATA_CONVERTER_H_
diff --git a/perfprofd/perf_profile.proto b/perfprofd/perf_profile.proto
new file mode 100644
index 0000000..2f60e12
--- /dev/null
+++ b/perfprofd/perf_profile.proto
@@ -0,0 +1,117 @@
+
+syntax = "proto2";
+
+option java_package = "com.google.common.logging";
+
+option optimize_for = LITE_RUNTIME;
+
+package wireless_android_play_playlog;
+
+// An entry of the map from a stack of addresses to count.
+// Address here is the offset of the instruction address to the load address
+// of the load_module.
+message AddressSample {
+  // List of addresses that represents a call stack.
+  // address[0] is the leaf of the call stack.
+  repeated uint64 address = 1;
+
+  // List of load_module_ids that represents a call stack.
+  // load_module_id[0] is the leaf of the call stack.
+  // This field can be set as empty if all frame share the same load_module_id
+  // with LoadModuleSamples.load_module_id.
+  repeated int32 load_module_id = 2;
+
+  // Total count that the address/address_range is sampled.
+  optional int64 count = 3;
+};
+
+// An entry of the map from address_range to count.
+// [start, end] represents the range of addresses, end->to represents the
+// taken branch that ends the range.
+message RangeSample {
+  // Start instruction address of a range.
+  optional uint64 start = 1;
+
+  // If "end" and "to" is not provided, "start" represents a single instruction.
+  optional uint64 end = 2;
+  optional uint64 to = 3;
+
+  // Total count that the address/address_range is sampled.
+  optional int64 count = 4;
+};
+
+// A load module.
+message LoadModule {
+  // Name of the load_module.
+  optional string name = 1;
+
+  // LoadModule's linker build_id.
+  optional string build_id = 2;
+}
+
+// All samples for a load_module.
+message LoadModuleSamples {
+  optional int32 load_module_id = 1;
+
+  // Map from a stack of addresses to count.
+  repeated AddressSample address_samples = 2;
+
+  // Map from a range triplet (start, end, to) to count.
+  repeated RangeSample range_samples = 3;
+}
+
+// All samples for a program.
+message ProgramSamples {
+  // Name of the program.
+  optional string name = 1;
+
+  // Load module profiles.
+  repeated LoadModuleSamples modules = 2;
+}
+
+// A compressed representation of a perf profile, which contains samples from
+// multiple binaries.
+message AndroidPerfProfile {
+
+  // Type of the hardware event.
+  enum EventType {
+    CYCLE = 0;
+    BRANCH = 1;
+  }
+  // Hardware event used in profiling.
+  optional EventType event = 1;
+
+  // Total number of samples in this profile.
+  // This is the sum of counts of address_samples and range_samples in all
+  // load_module_samples.
+  optional int64 total_samples = 2;
+
+  // Samples for all profiled programs.
+  repeated ProgramSamples programs = 3;
+
+  // List of all load modules.
+  repeated LoadModule load_modules = 4;
+
+  // is device screen on at point when profile is collected?
+  optional bool display_on = 5;
+
+  // system load at point when profile is collected; corresponds
+  // to first value from /proc/loadavg multiplied by 100 then
+  // converted to int32
+  optional int32 sys_load_average = 6;
+
+  // At the point when the profile was collected, was a camera active?
+  optional bool camera_active = 7;
+
+  // At the point when the profile was collected, was the device still booting?
+  optional bool booting = 8;
+
+  // At the point when the profile was collected, was the device plugged into
+  // a charger?
+  optional bool on_charger = 9;
+
+  // CPU utilization measured prior to profile collection (expressed as
+  // 100 minus the idle percentage).
+  optional int32 cpu_utilization = 10;
+
+}
diff --git a/perfprofd/perfprofd.conf b/perfprofd/perfprofd.conf
new file mode 100644
index 0000000..696c3de
--- /dev/null
+++ b/perfprofd/perfprofd.conf
@@ -0,0 +1,24 @@
+#
+# Configuration file for perf profile collection daemon (perfprofd)
+#
+#------------------------------------------------------------------------
+#
+# Destination directory for profiles
+#
+destination_directory=/data/misc/perfprofd
+#
+# Config directory for perfprofd
+#
+config_directory=/data/data/com.google.android.gms/files
+#
+# Sampling period (for perf -c option)
+#
+sampling_period=500000
+#
+# Average interval to wait between profile collection attempts (seconds)
+#
+collection_interval=86400
+#
+# Number of seconds of profile data to collect
+#
+sample_duration=3
diff --git a/perfprofd/perfprofd.rc b/perfprofd/perfprofd.rc
new file mode 100644
index 0000000..40aab6b
--- /dev/null
+++ b/perfprofd/perfprofd.rc
@@ -0,0 +1,6 @@
+service perfprofd /system/xbin/perfprofd
+    class late_start
+    user root
+    group root wakelock
+    oneshot
+    writepid /dev/cpuset/system-background/tasks
diff --git a/perfprofd/perfprofdcore.cc b/perfprofd/perfprofdcore.cc
new file mode 100644
index 0000000..746ed41
--- /dev/null
+++ b/perfprofd/perfprofdcore.cc
@@ -0,0 +1,923 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#include <assert.h>
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <time.h>
+#include <unistd.h>
+#include <string>
+#include <sstream>
+#include <map>
+#include <set>
+#include <cctype>
+
+#include <android-base/file.h>
+#include <android-base/stringprintf.h>
+#include <cutils/properties.h>
+
+#include "perfprofdcore.h"
+#include "perfprofdutils.h"
+#include "perf_data_converter.h"
+#include "cpuconfig.h"
+#include "configreader.h"
+
+//
+// Perf profiling daemon -- collects system-wide profiles using
+//
+//       simpleperf record -a
+//
+// and encodes them so that they can be uploaded by a separate service.
+//
+
+//......................................................................
+
+//
+// Output file from 'perf record'.
+//
+#define PERF_OUTPUT "perf.data"
+
+//
+// This enum holds the results of the "should we profile" configuration check.
+//
+typedef enum {
+
+  // All systems go for profile collection.
+  DO_COLLECT_PROFILE,
+
+  // The selected configuration directory doesn't exist.
+  DONT_PROFILE_MISSING_CONFIG_DIR,
+
+  // Destination directory does not contain the semaphore file that
+  // the perf profile uploading service creates when it determines
+  // that the user has opted "in" for usage data collection. No
+  // semaphore -> no user approval -> no profiling.
+  DONT_PROFILE_MISSING_SEMAPHORE,
+
+  // No perf executable present
+  DONT_PROFILE_MISSING_PERF_EXECUTABLE,
+
+  // We're running in the emulator, perf won't be able to do much
+  DONT_PROFILE_RUNNING_IN_EMULATOR
+
+} CKPROFILE_RESULT;
+
+//
+// Are we running in the emulator? If so, stub out profile collection
+// Starts as uninitialized (-1), then set to 1 or 0 at init time.
+//
+static int running_in_emulator = -1;
+
+//
+// Is this a debug build ('userdebug' or 'eng')?
+// Starts as uninitialized (-1), then set to 1 or 0 at init time.
+//
+static int is_debug_build = -1;
+
+//
+// Random number generator seed (set at startup time).
+//
+static unsigned short random_seed[3];
+
+//
+// SIGHUP handler. Sending SIGHUP to the daemon can be used to break it
+// out of a sleep() call so as to trigger a new collection (debugging)
+//
+static void sig_hup(int /* signum */)
+{
+  W_ALOGW("SIGHUP received");
+}
+
+//
+// Parse command line args. Currently you can supply "-c P" to set
+// the path of the config file to P.
+//
+static void parse_args(int argc, char** argv)
+{
+  int ac;
+
+  for (ac = 1; ac < argc; ++ac) {
+    if (!strcmp(argv[ac], "-c")) {
+      if (ac >= argc-1) {
+        W_ALOGE("malformed command line: -c option requires argument)");
+        continue;
+      }
+      ConfigReader::setConfigFilePath(argv[ac+1]);
+      ++ac;
+    } else {
+      W_ALOGE("malformed command line: unknown option or arg %s)", argv[ac]);
+      continue;
+    }
+  }
+}
+
+//
+// Convert a CKPROFILE_RESULT to a string
+//
+const char *ckprofile_result_to_string(CKPROFILE_RESULT result)
+{
+  switch (result) {
+    case DO_COLLECT_PROFILE:
+      return "DO_COLLECT_PROFILE";
+    case DONT_PROFILE_MISSING_CONFIG_DIR:
+      return "missing config directory";
+    case DONT_PROFILE_MISSING_SEMAPHORE:
+      return "missing semaphore file";
+    case DONT_PROFILE_MISSING_PERF_EXECUTABLE:
+      return "missing 'perf' executable";
+    case DONT_PROFILE_RUNNING_IN_EMULATOR:
+      return "running in emulator";
+    default: return "unknown";
+  }
+  return "notreached";
+}
+
+//
+// Convert a PROFILE_RESULT to a string
+//
+const char *profile_result_to_string(PROFILE_RESULT result)
+{
+  switch(result) {
+    case OK_PROFILE_COLLECTION:
+      return "profile collection succeeded";
+    case ERR_FORK_FAILED:
+      return "fork() system call failed";
+    case ERR_PERF_RECORD_FAILED:
+      return "perf record returned bad exit status";
+    case ERR_PERF_ENCODE_FAILED:
+      return "failure encoding perf.data to protobuf";
+    case ERR_OPEN_ENCODED_FILE_FAILED:
+      return "failed to open encoded perf file";
+    case ERR_WRITE_ENCODED_FILE_FAILED:
+      return "write to encoded perf file failed";
+    default: return "unknown";
+  }
+  return "notreached";
+}
+
+//
+// Check to see whether we should perform a profile collection
+//
+static CKPROFILE_RESULT check_profiling_enabled(ConfigReader &config)
+{
+  //
+  // Profile collection in the emulator doesn't make sense
+  //
+  assert(running_in_emulator != -1);
+  if (running_in_emulator) {
+    return DONT_PROFILE_RUNNING_IN_EMULATOR;
+  }
+
+  //
+  // Check for existence of semaphore file in config directory
+  //
+  if (access(config.getStringValue("config_directory").c_str(), F_OK) == -1) {
+    W_ALOGW("unable to open config directory %s: (%s)",
+            config.getStringValue("config_directory").c_str(), strerror(errno));
+    return DONT_PROFILE_MISSING_CONFIG_DIR;
+  }
+
+
+  // Check for existence of semaphore file
+  std::string semaphore_filepath = config.getStringValue("config_directory")
+                                   + "/" + SEMAPHORE_FILENAME;
+  if (access(semaphore_filepath.c_str(), F_OK) == -1) {
+    return DONT_PROFILE_MISSING_SEMAPHORE;
+  }
+
+  // Check for existence of simpleperf/perf executable
+  std::string pp = config.getStringValue("perf_path");
+  if (access(pp.c_str(), R_OK|X_OK) == -1) {
+    W_ALOGW("unable to access/execute %s", pp.c_str());
+    return DONT_PROFILE_MISSING_PERF_EXECUTABLE;
+  }
+
+  //
+  // We are good to go
+  //
+  return DO_COLLECT_PROFILE;
+}
+
+bool get_booting()
+{
+  char propBuf[PROPERTY_VALUE_MAX];
+  propBuf[0] = '\0';
+  property_get("sys.boot_completed", propBuf, "");
+  return (propBuf[0] != '1');
+}
+
+//
+// Constructor takes a timeout (in seconds) and a child pid; If an
+// alarm set for the specified number of seconds triggers, then a
+// SIGKILL is sent to the child. Destructor resets alarm. Example:
+//
+//       pid_t child_pid = ...;
+//       { AlarmHelper h(10, child_pid);
+//         ... = read_from_child(child_pid, ...);
+//       }
+//
+// NB: this helper is not re-entrant-- avoid nested use or
+// use by multiple threads
+//
+class AlarmHelper {
+ public:
+  AlarmHelper(unsigned num_seconds, pid_t child)
+  {
+    struct sigaction sigact;
+    assert(child);
+    assert(child_ == 0);
+    memset(&sigact, 0, sizeof(sigact));
+    sigact.sa_sigaction = handler;
+    sigaction(SIGALRM, &sigact, &oldsigact_);
+    child_ = child;
+    alarm(num_seconds);
+  }
+  ~AlarmHelper()
+  {
+    alarm(0);
+    child_ = 0;
+    sigaction(SIGALRM, &oldsigact_, NULL);
+  }
+  static void handler(int, siginfo_t *, void *);
+
+ private:
+  struct sigaction oldsigact_;
+  static pid_t child_;
+};
+
+pid_t AlarmHelper::child_;
+
+void AlarmHelper::handler(int, siginfo_t *, void *)
+{
+  W_ALOGW("SIGALRM timeout");
+  kill(child_, SIGKILL);
+}
+
+//
+// This implementation invokes "dumpsys media.camera" and inspects the
+// output to determine if any camera clients are active. NB: this is
+// currently disable (via config option) until the selinux issues can
+// be sorted out. Another possible implementation (not yet attempted)
+// would be to use the binder to call into the native camera service
+// via "ICameraService".
+//
+bool get_camera_active()
+{
+  int pipefds[2];
+  if (pipe2(pipefds, O_CLOEXEC) != 0) {
+    W_ALOGE("pipe2() failed (%s)", strerror(errno));
+    return false;
+  }
+  pid_t pid = fork();
+  if (pid == -1) {
+    W_ALOGE("fork() failed (%s)", strerror(errno));
+    close(pipefds[0]);
+    close(pipefds[1]);
+    return false;
+  } else if (pid == 0) {
+    // child
+    close(pipefds[0]);
+    dup2(pipefds[1], fileno(stderr));
+    dup2(pipefds[1], fileno(stdout));
+    const char *argv[10];
+    unsigned slot = 0;
+    argv[slot++] = "/system/bin/dumpsys";
+    argv[slot++] = "media.camera";
+    argv[slot++] = nullptr;
+    execvp(argv[0], (char * const *)argv);
+    W_ALOGE("execvp() failed (%s)", strerror(errno));
+    return false;
+  }
+  // parent
+  AlarmHelper helper(10, pid);
+  close(pipefds[1]);
+
+  // read output
+  bool have_cam = false;
+  bool have_clients = true;
+  std::string dump_output;
+  bool result = android::base::ReadFdToString(pipefds[0], &dump_output);
+  close(pipefds[0]);
+  if (result) {
+    std::stringstream ss(dump_output);
+    std::string line;
+    while (std::getline(ss,line,'\n')) {
+      if (line.find("Camera module API version:") !=
+          std::string::npos) {
+        have_cam = true;
+      }
+      if (line.find("No camera module available") !=
+          std::string::npos ||
+          line.find("No active camera clients yet") !=
+          std::string::npos) {
+        have_clients = false;
+      }
+    }
+  }
+
+  // reap child (no zombies please)
+  int st = 0;
+  TEMP_FAILURE_RETRY(waitpid(pid, &st, 0));
+  return have_cam && have_clients;
+}
+
+bool get_charging()
+{
+  std::string psdir("/sys/class/power_supply");
+  DIR* dir = opendir(psdir.c_str());
+  if (dir == NULL) {
+    W_ALOGE("Failed to open dir %s (%s)", psdir.c_str(), strerror(errno));
+    return false;
+  }
+  struct dirent* e;
+  bool result = false;
+  while ((e = readdir(dir)) != 0) {
+    if (e->d_name[0] != '.') {
+      std::string online_path = psdir + "/" + e->d_name + "/online";
+      std::string contents;
+      int value = 0;
+      if (android::base::ReadFileToString(online_path.c_str(), &contents) &&
+          sscanf(contents.c_str(), "%d", &value) == 1) {
+        if (value) {
+          result = true;
+          break;
+        }
+      }
+    }
+  }
+  closedir(dir);
+  return result;
+}
+
+bool postprocess_proc_stat_contents(const std::string &pscontents,
+                                    long unsigned *idleticks,
+                                    long unsigned *remainingticks)
+{
+  long unsigned usertime, nicetime, systime, idletime, iowaittime;
+  long unsigned irqtime, softirqtime;
+
+  int rc = sscanf(pscontents.c_str(), "cpu  %lu %lu %lu %lu %lu %lu %lu",
+                  &usertime, &nicetime, &systime, &idletime,
+                  &iowaittime, &irqtime, &softirqtime);
+  if (rc != 7) {
+    return false;
+  }
+  *idleticks = idletime;
+  *remainingticks = usertime + nicetime + systime + iowaittime + irqtime + softirqtime;
+  return true;
+}
+
+unsigned collect_cpu_utilization()
+{
+  std::string contents;
+  long unsigned idle[2];
+  long unsigned busy[2];
+  for (unsigned iter = 0; iter < 2; ++iter) {
+    if (!android::base::ReadFileToString("/proc/stat", &contents)) {
+      return 0;
+    }
+    if (!postprocess_proc_stat_contents(contents, &idle[iter], &busy[iter])) {
+      return 0;
+    }
+    if (iter == 0) {
+      sleep(1);
+    }
+  }
+  long unsigned total_delta = (idle[1] + busy[1]) - (idle[0] + busy[0]);
+  long unsigned busy_delta = busy[1] - busy[0];
+  return busy_delta * 100 / total_delta;
+}
+
+static void annotate_encoded_perf_profile(wireless_android_play_playlog::AndroidPerfProfile *profile,
+                                          const ConfigReader &config,
+                                          unsigned cpu_utilization)
+{
+  //
+  // Incorporate cpu utilization (collected prior to perf run)
+  //
+  if (config.getUnsignedValue("collect_cpu_utilization")) {
+    profile->set_cpu_utilization(cpu_utilization);
+  }
+
+  //
+  // Load average as reported by the kernel
+  //
+  std::string load;
+  double fload = 0.0;
+  if (android::base::ReadFileToString("/proc/loadavg", &load) &&
+      sscanf(load.c_str(), "%lf", &fload) == 1) {
+    int iload = static_cast<int>(fload * 100.0);
+    profile->set_sys_load_average(iload);
+  } else {
+    W_ALOGE("Failed to read or scan /proc/loadavg (%s)", strerror(errno));
+  }
+
+  //
+  // Device still booting? Camera in use? Plugged into charger?
+  //
+  bool is_booting = get_booting();
+  if (config.getUnsignedValue("collect_booting")) {
+    profile->set_booting(is_booting);
+  }
+  if (config.getUnsignedValue("collect_camera_active")) {
+    profile->set_camera_active(is_booting ? false : get_camera_active());
+  }
+  if (config.getUnsignedValue("collect_charging_state")) {
+    profile->set_on_charger(get_charging());
+  }
+
+  //
+  // Examine the contents of wake_unlock to determine whether the
+  // device display is on or off. NB: is this really the only way to
+  // determine this info?
+  //
+  std::string disp;
+  if (android::base::ReadFileToString("/sys/power/wake_unlock", &disp)) {
+    bool ison = (strstr(disp.c_str(), "PowerManagerService.Display") == 0);
+    profile->set_display_on(ison);
+  } else {
+    W_ALOGE("Failed to read /sys/power/wake_unlock (%s)", strerror(errno));
+  }
+}
+
+inline char* string_as_array(std::string* str) {
+  return str->empty() ? NULL : &*str->begin();
+}
+
+PROFILE_RESULT encode_to_proto(const std::string &data_file_path,
+                               const char *encoded_file_path,
+                               const ConfigReader &config,
+                               unsigned cpu_utilization)
+{
+  //
+  // Open and read perf.data file
+  //
+  const wireless_android_play_playlog::AndroidPerfProfile &encodedProfile =
+      wireless_android_logging_awp::RawPerfDataToAndroidPerfProfile(data_file_path);
+
+  //
+  // Issue error if no samples
+  //
+  if (encodedProfile.programs().size() == 0) {
+    return ERR_PERF_ENCODE_FAILED;
+  }
+
+  // All of the info in 'encodedProfile' is derived from the perf.data file;
+  // here we tack display status, cpu utilization, system load, etc.
+  wireless_android_play_playlog::AndroidPerfProfile &prof =
+      const_cast<wireless_android_play_playlog::AndroidPerfProfile&>
+      (encodedProfile);
+  annotate_encoded_perf_profile(&prof, config, cpu_utilization);
+
+  //
+  // Serialize protobuf to array
+  //
+  int size = encodedProfile.ByteSize();
+  std::string data;
+  data.resize(size);
+  ::google::protobuf::uint8* dtarget =
+        reinterpret_cast<::google::protobuf::uint8*>(string_as_array(&data));
+  encodedProfile.SerializeWithCachedSizesToArray(dtarget);
+
+  //
+  // Open file and write encoded data to it
+  //
+  FILE *fp = fopen(encoded_file_path, "w");
+  if (!fp) {
+    return ERR_OPEN_ENCODED_FILE_FAILED;
+  }
+  size_t fsiz = size;
+  if (fwrite(dtarget, fsiz, 1, fp) != 1) {
+    fclose(fp);
+    return ERR_WRITE_ENCODED_FILE_FAILED;
+  }
+  fclose(fp);
+  chmod(encoded_file_path, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH);
+
+  return OK_PROFILE_COLLECTION;
+}
+
+//
+// Invoke "perf record". Return value is OK_PROFILE_COLLECTION for
+// success, or some other error code if something went wrong.
+//
+static PROFILE_RESULT invoke_perf(const std::string &perf_path,
+                                  unsigned sampling_period,
+                                  const char *stack_profile_opt,
+                                  unsigned duration,
+                                  const std::string &data_file_path,
+                                  const std::string &perf_stderr_path)
+{
+  pid_t pid = fork();
+
+  if (pid == -1) {
+    return ERR_FORK_FAILED;
+  }
+
+  if (pid == 0) {
+    // child
+
+    // Open file to receive stderr/stdout from perf
+    FILE *efp = fopen(perf_stderr_path.c_str(), "w");
+    if (efp) {
+      dup2(fileno(efp), STDERR_FILENO);
+      dup2(fileno(efp), STDOUT_FILENO);
+    } else {
+      W_ALOGW("unable to open %s for writing", perf_stderr_path.c_str());
+    }
+
+    // marshall arguments
+    constexpr unsigned max_args = 13;
+    const char *argv[max_args];
+    unsigned slot = 0;
+    argv[slot++] = perf_path.c_str();
+    argv[slot++] = "record";
+
+    // -o perf.data
+    argv[slot++] = "-o";
+    argv[slot++] = data_file_path.c_str();
+
+    // -c N
+    argv[slot++] = "-c";
+    std::string p_str = android::base::StringPrintf("%u", sampling_period);
+    argv[slot++] = p_str.c_str();
+
+    // -g if desired
+    if (stack_profile_opt)
+      argv[slot++] = stack_profile_opt;
+
+    // system wide profiling
+    argv[slot++] = "-a";
+
+    // no need for kernel symbols
+    argv[slot++] = "--no-dump-kernel-symbols";
+
+    // sleep <duration>
+    argv[slot++] = "/system/bin/sleep";
+    std::string d_str = android::base::StringPrintf("%u", duration);
+    argv[slot++] = d_str.c_str();
+
+    // terminator
+    argv[slot++] = nullptr;
+    assert(slot < max_args);
+
+    // record the final command line in the error output file for
+    // posterity/debugging purposes
+    fprintf(stderr, "perf invocation (pid=%d):\n", getpid());
+    for (unsigned i = 0; argv[i] != nullptr; ++i) {
+      fprintf(stderr, "%s%s", i ? " " : "", argv[i]);
+    }
+    fprintf(stderr, "\n");
+
+    // exec
+    execvp(argv[0], (char * const *)argv);
+    fprintf(stderr, "exec failed: %s\n", strerror(errno));
+    exit(1);
+
+  } else {
+    // parent
+    int st = 0;
+    pid_t reaped = TEMP_FAILURE_RETRY(waitpid(pid, &st, 0));
+
+    if (reaped == -1) {
+      W_ALOGW("waitpid failed: %s", strerror(errno));
+    } else if (WIFSIGNALED(st)) {
+      W_ALOGW("perf killed by signal %d", WTERMSIG(st));
+    } else if (WEXITSTATUS(st) != 0) {
+      W_ALOGW("perf bad exit status %d", WEXITSTATUS(st));
+    } else {
+      return OK_PROFILE_COLLECTION;
+    }
+  }
+
+  return ERR_PERF_RECORD_FAILED;
+}
+
+//
+// Remove all files in the destination directory during initialization
+//
+static void cleanup_destination_dir(const ConfigReader &config)
+{
+  std::string dest_dir = config.getStringValue("destination_directory");
+  DIR* dir = opendir(dest_dir.c_str());
+  if (dir != NULL) {
+    struct dirent* e;
+    while ((e = readdir(dir)) != 0) {
+      if (e->d_name[0] != '.') {
+        std::string file_path = dest_dir + "/" + e->d_name;
+        remove(file_path.c_str());
+      }
+    }
+    closedir(dir);
+  } else {
+    W_ALOGW("unable to open destination dir %s for cleanup",
+            dest_dir.c_str());
+  }
+}
+
+//
+// Post-processes after profile is collected and converted to protobuf.
+// * GMS core stores processed file sequence numbers in
+//   /data/data/com.google.android.gms/files/perfprofd_processed.txt
+// * Update /data/misc/perfprofd/perfprofd_produced.txt to remove the sequence
+//   numbers that have been processed and append the current seq number
+// Returns true if the current_seq should increment.
+//
+static bool post_process(const ConfigReader &config, int current_seq)
+{
+  std::string dest_dir = config.getStringValue("destination_directory");
+  std::string processed_file_path =
+      config.getStringValue("config_directory") + "/" + PROCESSED_FILENAME;
+  std::string produced_file_path = dest_dir + "/" + PRODUCED_FILENAME;
+
+
+  std::set<int> processed;
+  FILE *fp = fopen(processed_file_path.c_str(), "r");
+  if (fp != NULL) {
+    int seq;
+    while(fscanf(fp, "%d\n", &seq) > 0) {
+      if (remove(android::base::StringPrintf(
+          "%s/perf.data.encoded.%d", dest_dir.c_str(),seq).c_str()) == 0) {
+        processed.insert(seq);
+      }
+    }
+    fclose(fp);
+  }
+
+  std::set<int> produced;
+  fp = fopen(produced_file_path.c_str(), "r");
+  if (fp != NULL) {
+    int seq;
+    while(fscanf(fp, "%d\n", &seq) > 0) {
+      if (processed.find(seq) == processed.end()) {
+        produced.insert(seq);
+      }
+    }
+    fclose(fp);
+  }
+
+  unsigned maxLive = config.getUnsignedValue("max_unprocessed_profiles");
+  if (produced.size() >= maxLive) {
+    return false;
+  }
+
+  produced.insert(current_seq);
+  fp = fopen(produced_file_path.c_str(), "w");
+  if (fp == NULL) {
+    W_ALOGW("Cannot write %s", produced_file_path.c_str());
+    return false;
+  }
+  for (std::set<int>::const_iterator iter = produced.begin();
+       iter != produced.end(); ++iter) {
+    fprintf(fp, "%d\n", *iter);
+  }
+  fclose(fp);
+  chmod(produced_file_path.c_str(),
+        S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH);
+  return true;
+}
+
+//
+// Collect a perf profile. Steps for this operation are:
+// - kick off 'perf record'
+// - read perf.data, convert to protocol buf
+//
+static PROFILE_RESULT collect_profile(const ConfigReader &config, int seq)
+{
+  //
+  // Collect cpu utilization if enabled
+  //
+  unsigned cpu_utilization = 0;
+  if (config.getUnsignedValue("collect_cpu_utilization")) {
+    cpu_utilization = collect_cpu_utilization();
+  }
+
+  //
+  // Form perf.data file name, perf error output file name
+  //
+  std::string destdir = config.getStringValue("destination_directory");
+  std::string data_file_path(destdir);
+  data_file_path += "/";
+  data_file_path += PERF_OUTPUT;
+  std::string perf_stderr_path(destdir);
+  perf_stderr_path += "/perferr.txt";
+
+  //
+  // Remove any existing perf.data file -- if we don't do this, perf
+  // will rename the old file and we'll have extra cruft lying around.
+  //
+  struct stat statb;
+  if (stat(data_file_path.c_str(), &statb) == 0) { // if file exists...
+    if (unlink(data_file_path.c_str())) {          // then try to remove
+      W_ALOGW("unable to unlink previous perf.data file");
+    }
+  }
+
+  //
+  // The "mpdecision" daemon can cause problems for profile
+  // collection: if it decides to online a CPU partway through the
+  // 'perf record' run, the activity on that CPU will be invisible to
+  // perf, and if it offlines a CPU during the recording this can
+  // sometimes leave the PMU in an unusable state (dmesg errors of the
+  // form "perfevents: unable to request IRQXXX for ...").  To avoid
+  // these issues, if "mpdecision" is running the helper below will
+  // stop the service and then online all available CPUs. The object
+  // destructor (invoked when this routine terminates) will then
+  // restart the service again when needed.
+  //
+  unsigned duration = config.getUnsignedValue("sample_duration");
+  unsigned hardwire = config.getUnsignedValue("hardwire_cpus");
+  unsigned max_duration = config.getUnsignedValue("hardwire_cpus_max_duration");
+  bool take_action = (hardwire && duration <= max_duration);
+  HardwireCpuHelper helper(take_action);
+
+  //
+  // Invoke perf
+  //
+  const char *stack_profile_opt =
+      (config.getUnsignedValue("stack_profile") != 0 ? "-g" : nullptr);
+  std::string perf_path = config.getStringValue("perf_path");
+  unsigned period = config.getUnsignedValue("sampling_period");
+
+  PROFILE_RESULT ret = invoke_perf(perf_path.c_str(),
+                                  period,
+                                  stack_profile_opt,
+                                  duration,
+                                  data_file_path,
+                                  perf_stderr_path);
+  if (ret != OK_PROFILE_COLLECTION) {
+    return ret;
+  }
+
+  //
+  // Read the resulting perf.data file, encode into protocol buffer, then write
+  // the result to the file perf.data.encoded
+  //
+  std::string path = android::base::StringPrintf(
+      "%s.encoded.%d", data_file_path.c_str(), seq);
+  return encode_to_proto(data_file_path, path.c_str(), config, cpu_utilization);
+}
+
+//
+// Assuming that we want to collect a profile every N seconds,
+// randomly partition N into two sub-intervals.
+//
+static void determine_before_after(unsigned &sleep_before_collect,
+                                   unsigned &sleep_after_collect,
+                                   unsigned collection_interval)
+{
+  double frac = erand48(random_seed);
+  sleep_before_collect = (unsigned) (((double)collection_interval) * frac);
+  assert(sleep_before_collect <= collection_interval);
+  sleep_after_collect = collection_interval - sleep_before_collect;
+}
+
+//
+// Set random number generator seed
+//
+static void set_seed(ConfigReader &config)
+{
+  unsigned seed = 0;
+  unsigned use_fixed_seed = config.getUnsignedValue("use_fixed_seed");
+  if (use_fixed_seed) {
+    //
+    // Use fixed user-specified seed
+    //
+    seed = use_fixed_seed;
+  } else {
+    //
+    // Randomized seed
+    //
+    seed = arc4random();
+  }
+  W_ALOGI("random seed set to %u", seed);
+  // Distribute the 32-bit seed into the three 16-bit array
+  // elements. The specific values being written do not especially
+  // matter as long as we are setting them to something based on the seed.
+  random_seed[0] = seed & 0xffff;
+  random_seed[1] = (seed >> 16);
+  random_seed[2] = (random_seed[0] ^ random_seed[1]);
+}
+
+//
+// Initialization
+//
+static void init(ConfigReader &config)
+{
+  if (!config.readFile()) {
+    W_ALOGE("unable to open configuration file %s",
+            config.getConfigFilePath());
+  }
+
+  // Children of init inherit an artificially low OOM score -- this is not
+  // desirable for perfprofd (its OOM score should be on par with
+  // other user processes).
+  std::stringstream oomscore_path;
+  oomscore_path << "/proc/" << getpid() << "/oom_score_adj";
+  if (!android::base::WriteStringToFile("0", oomscore_path.str())) {
+    W_ALOGE("unable to write to %s", oomscore_path.str().c_str());
+  }
+
+  set_seed(config);
+  cleanup_destination_dir(config);
+
+  char propBuf[PROPERTY_VALUE_MAX];
+  propBuf[0] = '\0';
+  property_get("ro.kernel.qemu", propBuf, "");
+  running_in_emulator = (propBuf[0] == '1');
+  property_get("ro.debuggable", propBuf, "");
+  is_debug_build = (propBuf[0] == '1');
+
+  signal(SIGHUP, sig_hup);
+}
+
+//
+// Main routine:
+// 1. parse cmd line args
+// 2. read config file
+// 3. loop: {
+//       sleep for a while
+//       perform a profile collection
+//    }
+//
+int perfprofd_main(int argc, char** argv)
+{
+  ConfigReader config;
+
+  W_ALOGI("starting Android Wide Profiling daemon");
+
+  parse_args(argc, argv);
+  init(config);
+
+  // Early exit if we're not supposed to run on this build flavor
+  if (is_debug_build != 1 &&
+      config.getUnsignedValue("only_debug_build") == 1) {
+    W_ALOGI("early exit due to inappropriate build type");
+    return 0;
+  }
+
+  unsigned iterations = 0;
+  int seq = 0;
+  while(config.getUnsignedValue("main_loop_iterations") == 0 ||
+        iterations < config.getUnsignedValue("main_loop_iterations")) {
+
+    // Figure out where in the collection interval we're going to actually
+    // run perf
+    unsigned sleep_before_collect = 0;
+    unsigned sleep_after_collect = 0;
+    determine_before_after(sleep_before_collect, sleep_after_collect,
+                           config.getUnsignedValue("collection_interval"));
+    perfprofd_sleep(sleep_before_collect);
+
+    // Reread config file -- the uploader may have rewritten it as a result
+    // of a gservices change
+    config.readFile();
+
+    // Check for profiling enabled...
+    CKPROFILE_RESULT ckresult = check_profiling_enabled(config);
+    if (ckresult != DO_COLLECT_PROFILE) {
+      W_ALOGI("profile collection skipped (%s)",
+              ckprofile_result_to_string(ckresult));
+    } else {
+      // Kick off the profiling run...
+      W_ALOGI("initiating profile collection");
+      PROFILE_RESULT result = collect_profile(config, seq);
+      if (result != OK_PROFILE_COLLECTION) {
+        W_ALOGI("profile collection failed (%s)",
+                profile_result_to_string(result));
+      } else {
+        if (post_process(config, seq)) {
+          seq++;
+        }
+        W_ALOGI("profile collection complete");
+      }
+    }
+    perfprofd_sleep(sleep_after_collect);
+    iterations += 1;
+  }
+
+  W_ALOGI("finishing Android Wide Profiling daemon");
+  return 0;
+}
diff --git a/perfprofd/perfprofdcore.h b/perfprofd/perfprofdcore.h
new file mode 100644
index 0000000..2607c48
--- /dev/null
+++ b/perfprofd/perfprofdcore.h
@@ -0,0 +1,83 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#ifndef SYSTEM_EXTRAS_PERFPROFD_PERFPROFDCORE_H_
+#define SYSTEM_EXTRAS_PERFPROFD_PERFPROFDCORE_H_
+
+class ConfigReader;
+
+// Semaphore file that indicates that the user is opting in
+#define SEMAPHORE_FILENAME "perf_profile_collection_enabled.txt"
+
+// File containing a list of sequence numbers corresponding to profiles
+// that have been processed/uploaded. Written by the GmsCore uploader,
+// within the GmsCore files directory.
+#define PROCESSED_FILENAME "perfprofd_processed.txt"
+
+// File containing a list of sequence numbers corresponding to profiles
+// that have been created by the perfprofd but not yet uploaded. Written
+// by perfprofd within the destination directory; consumed by GmsCore.
+#define PRODUCED_FILENAME "perfprofd_produced.txt"
+
+// Main routine for perfprofd daemon
+extern int perfprofd_main(int argc, char **argv);
+
+//
+// This enumeration holds the results of what happened when on an
+// attempted perf profiling run.
+//
+typedef enum {
+
+  // Success
+  OK_PROFILE_COLLECTION,
+
+  // Fork system call failed (lo mem?)
+  ERR_FORK_FAILED,
+
+  // Perf ran but crashed or returned a bad exit status
+  ERR_PERF_RECORD_FAILED,
+
+  // The perf.data encoding process failed somehow
+  ERR_PERF_ENCODE_FAILED,
+
+  // We tried to open the output file perf.data.encoded but the open failed
+  ERR_OPEN_ENCODED_FILE_FAILED,
+
+  // Error while writing perf.data.encoded
+  ERR_WRITE_ENCODED_FILE_FAILED
+} PROFILE_RESULT;
+
+//
+// Given a full path to a perf.data file specified by "data_file_path",
+// read/summarize/encode the contents into a new file specified
+// by "encoded_file_path". Return status indicates whether the operation
+// was successful (either OK_PROFILE_COLLECTION or an error of some sort).
+//
+PROFILE_RESULT encode_to_proto(const std::string &data_file_path,
+                               const char *encoded_file_path,
+                               const ConfigReader &config,
+                               unsigned cpu_utilization);
+
+//
+// Exposed for unit testing
+//
+extern unsigned collect_cpu_utilization();
+extern bool get_booting();
+extern bool get_charging();
+extern bool get_camera_active();
+
+#endif
diff --git a/perfprofd/perfprofdmain.cc b/perfprofd/perfprofdmain.cc
new file mode 100644
index 0000000..35cdb95
--- /dev/null
+++ b/perfprofd/perfprofdmain.cc
@@ -0,0 +1,23 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+extern int perfprofd_main(int argc, char** argv);
+
+int main(int argc, char** argv)
+{
+  return perfprofd_main(argc, argv);
+}
diff --git a/perfprofd/perfprofdutils.cc b/perfprofd/perfprofdutils.cc
new file mode 100644
index 0000000..32d55c7
--- /dev/null
+++ b/perfprofd/perfprofdutils.cc
@@ -0,0 +1,54 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#define LOG_TAG "perfprofd"
+
+#include <stdarg.h>
+#include <unistd.h>
+
+#include <utils/Log.h>
+
+#include "perfprofdutils.h"
+
+void perfprofd_log_error(const char *fmt, ...)
+{
+    va_list ap;
+    va_start(ap, fmt);
+    LOG_PRI_VA(ANDROID_LOG_ERROR, LOG_TAG, fmt, ap);
+    va_end(ap);
+}
+
+void perfprofd_log_warning(const char *fmt, ...)
+{
+    va_list ap;
+    va_start(ap, fmt);
+    LOG_PRI_VA(ANDROID_LOG_WARN, LOG_TAG, fmt, ap);
+    va_end(ap);
+}
+
+void perfprofd_log_info(const char *fmt, ...)
+{
+    va_list ap;
+    va_start(ap, fmt);
+    LOG_PRI_VA(ANDROID_LOG_INFO, LOG_TAG, fmt, ap);
+    va_end(ap);
+}
+
+void perfprofd_sleep(int seconds)
+{
+  sleep(seconds);
+}
diff --git a/perfprofd/perfprofdutils.h b/perfprofd/perfprofdutils.h
new file mode 100644
index 0000000..a17356b
--- /dev/null
+++ b/perfprofd/perfprofdutils.h
@@ -0,0 +1,36 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#include <sys/cdefs.h>
+
+__BEGIN_DECLS
+
+//
+// These routines are separated out from the core perfprofd so
+// as to be used as part of the unit test (see the README.txt
+// alongside the unit test for more info).
+//
+extern void perfprofd_log_error(const char *fmt, ...);
+extern void perfprofd_log_warning(const char *fmt, ...);
+extern void perfprofd_log_info(const char *fmt, ...);
+extern void perfprofd_sleep(int seconds);
+
+#define W_ALOGE perfprofd_log_error
+#define W_ALOGW perfprofd_log_warning
+#define W_ALOGI perfprofd_log_info
+
+__END_DECLS
diff --git a/perfprofd/quipper/address_mapper.cc b/perfprofd/quipper/address_mapper.cc
new file mode 100644
index 0000000..70a2e5e
--- /dev/null
+++ b/perfprofd/quipper/address_mapper.cc
@@ -0,0 +1,217 @@
+// Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "address_mapper.h"
+
+#include "base/logging.h"
+
+namespace quipper {
+
+AddressMapper::AddressMapper(const AddressMapper& source) {
+  mappings_ = source.mappings_;
+}
+
+bool AddressMapper::Map(const uint64_t real_addr,
+                        const uint64_t size,
+                        const bool remove_existing_mappings) {
+  return MapWithID(real_addr, size, kuint64max, 0, remove_existing_mappings);
+}
+
+bool AddressMapper::MapWithID(const uint64_t real_addr,
+                              const uint64_t size,
+                              const uint64_t id,
+                              const uint64_t offset_base,
+                              bool remove_existing_mappings) {
+  MappedRange range;
+  range.real_addr = real_addr;
+  range.size = size;
+  range.id = id;
+  range.offset_base = offset_base;
+
+  if (size == 0) {
+    LOG(ERROR) << "Must allocate a nonzero-length address range.";
+    return false;
+  }
+
+  // Check that this mapping does not overflow the address space.
+  if (real_addr + size - 1 != kuint64max &&
+      !(real_addr + size > real_addr)) {
+    DumpToLog();
+    LOG(ERROR) << "Address mapping at " << std::hex << real_addr
+               << " with size " << std::hex << size << " overflows.";
+    return false;
+  }
+
+  // Check for collision with an existing mapping.  This must be an overlap that
+  // does not result in one range being completely covered by another
+  MappingList::iterator iter;
+  MappingList mappings_to_delete;
+  bool old_range_found = false;
+  MappedRange old_range;
+  for (iter = mappings_.begin(); iter != mappings_.end(); ++iter) {
+    if (!iter->Intersects(range))
+      continue;
+    // Quit if existing ranges that collide aren't supposed to be removed.
+    if (!remove_existing_mappings)
+      return false;
+    if (!old_range_found && iter->Covers(range) && iter->size > range.size) {
+      old_range_found = true;
+      old_range = *iter;
+      continue;
+    }
+    mappings_to_delete.push_back(*iter);
+  }
+
+  while (!mappings_to_delete.empty()) {
+    const MappedRange& range = mappings_to_delete.front();
+    CHECK(Unmap(range));
+    mappings_to_delete.pop_front();
+  }
+
+  // Otherwise check for this range being covered by another range.  If that
+  // happens, split or reduce the existing range to make room.
+  if (old_range_found) {
+    CHECK(Unmap(old_range));
+
+    uint64_t gap_before = range.real_addr - old_range.real_addr;
+    uint64_t gap_after = (old_range.real_addr + old_range.size) -
+                         (range.real_addr + range.size);
+
+    if (gap_before) {
+      CHECK(MapWithID(old_range.real_addr,
+                      gap_before,
+                      old_range.id,
+                      old_range.offset_base,
+                      false));
+    }
+
+    CHECK(MapWithID(range.real_addr, range.size, id, offset_base, false));
+
+    if (gap_after) {
+      CHECK(MapWithID(range.real_addr + range.size,
+                      gap_after,
+                      old_range.id,
+                      old_range.offset_base + gap_before + range.size,
+                      false));
+    }
+
+    return true;
+  }
+
+  // Now search for a location for the new range.  It should be in the first
+  // free block in quipper space.
+
+  // If there is no existing mapping, add it to the beginning of quipper space.
+  if (mappings_.empty()) {
+    range.mapped_addr = 0;
+    range.unmapped_space_after = kuint64max - range.size;
+    mappings_.push_back(range);
+    return true;
+  }
+
+  // If there is space before the first mapped range in quipper space, use it.
+  if (mappings_.begin()->mapped_addr >= range.size) {
+    range.mapped_addr = 0;
+    range.unmapped_space_after = mappings_.begin()->mapped_addr - range.size;
+    mappings_.push_front(range);
+    return true;
+  }
+
+  // Otherwise, search through the existing mappings for a free block after one
+  // of them.
+  for (iter = mappings_.begin(); iter != mappings_.end(); ++iter) {
+    if (iter->unmapped_space_after < range.size)
+      continue;
+
+    range.mapped_addr = iter->mapped_addr + iter->size;
+    range.unmapped_space_after = iter->unmapped_space_after - range.size;
+    iter->unmapped_space_after = 0;
+
+    mappings_.insert(++iter, range);
+    return true;
+  }
+
+  // If it still hasn't succeeded in mapping, it means there is no free space in
+  // quipper space large enough for a mapping of this size.
+  DumpToLog();
+  LOG(ERROR) << "Could not find space to map addr=" << std::hex << real_addr
+             << " with size " << std::hex << size;
+  return false;
+}
+
+void AddressMapper::DumpToLog() const {
+  MappingList::const_iterator it;
+  for (it = mappings_.begin(); it != mappings_.end(); ++it) {
+    LOG(INFO) << " real_addr: " << std::hex << it->real_addr
+              << " mapped: " << std::hex << it->mapped_addr
+              << " id: " << std::hex << it->id
+              << " size: " << std::hex << it->size;
+  }
+}
+
+bool AddressMapper::GetMappedAddress(const uint64_t real_addr,
+                                     uint64_t* mapped_addr) const {
+  CHECK(mapped_addr);
+  MappingList::const_iterator iter;
+  for (iter = mappings_.begin(); iter != mappings_.end(); ++iter) {
+    if (!iter->ContainsAddress(real_addr))
+      continue;
+    *mapped_addr = iter->mapped_addr + real_addr - iter->real_addr;
+    return true;
+  }
+  return false;
+}
+
+bool AddressMapper::GetMappedIDAndOffset(const uint64_t real_addr,
+                                         uint64_t* id,
+                                         uint64_t* offset) const {
+  CHECK(id);
+  CHECK(offset);
+  MappingList::const_iterator iter;
+  for (iter = mappings_.begin(); iter != mappings_.end(); ++iter) {
+    if (!iter->ContainsAddress(real_addr))
+      continue;
+    *id = iter->id;
+    *offset = real_addr - iter->real_addr + iter->offset_base;
+    return true;
+  }
+  return false;
+}
+
+uint64_t AddressMapper::GetMaxMappedLength() const {
+  if (IsEmpty())
+    return 0;
+
+  uint64_t min = mappings_.begin()->mapped_addr;
+
+  MappingList::const_iterator iter = mappings_.end();
+  --iter;
+  uint64_t max = iter->mapped_addr + iter->size;
+
+  return max - min;
+}
+
+bool AddressMapper::Unmap(const MappedRange& range) {
+  MappingList::iterator iter;
+  // TODO(sque): this is highly inefficient since Unmap() is called from a
+  // function that has already iterated to the right place within |mappings_|.
+  // For a first revision, I am sacrificing efficiency for of clarity, due to
+  // the trickiness of removing elements using iterators.
+  for (iter = mappings_.begin(); iter != mappings_.end(); ++iter) {
+    if (range.real_addr == iter->real_addr && range.size == iter->size) {
+      // Add the freed up space to the free space counter of the previous
+      // mapped region, if it exists.
+      if (iter != mappings_.begin()) {
+        --iter;
+        iter->unmapped_space_after += range.size + range.unmapped_space_after;
+        ++iter;
+      }
+      mappings_.erase(iter);
+      return true;
+    }
+  }
+  return false;
+}
+
+}  // namespace quipper
diff --git a/perfprofd/quipper/address_mapper.h b/perfprofd/quipper/address_mapper.h
new file mode 100644
index 0000000..ef2d6d2
--- /dev/null
+++ b/perfprofd/quipper/address_mapper.h
@@ -0,0 +1,128 @@
+// Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROMIUMOS_WIDE_PROFILING_ADDRESS_MAPPER_H_
+#define CHROMIUMOS_WIDE_PROFILING_ADDRESS_MAPPER_H_
+
+#include <stdint.h>
+
+#include <list>
+
+namespace quipper {
+
+class AddressMapper {
+ public:
+  AddressMapper() {}
+
+  // Copy constructor: copies mappings from |source| to this AddressMapper. This
+  // is useful for copying mappings from parent to child process upon fork(). It
+  // is also useful to copy kernel mappings to any process that is created.
+  AddressMapper(const AddressMapper& source);
+
+  // Maps a new address range to quipper space.
+  // |remove_existing_mappings| indicates whether to remove old mappings that
+  // collide with the new range in real address space, indicating it has been
+  // unmapped.
+  // Returns true if mapping was successful.
+  bool Map(const uint64_t real_addr,
+           const uint64_t length,
+           bool remove_existing_mappings);
+
+  // Like Map(real_addr, length, remove_existing_mappings).  |id| is an
+  // identifier value to be stored along with the mapping.  AddressMapper does
+  // not care whether it is unique compared to all other IDs passed in.  That is
+  // up to the caller to keep track of.
+  // |offset_base| represents the offset within the original region at which the
+  // mapping begins. The original region can be much larger than the mapped
+  // region.
+  // e.g. Given a mapped region with base=0x4000 and size=0x2000 mapped with
+  // offset_base=0x10000, then the address 0x5000 maps to an offset of 0x11000
+  // (0x5000 - 0x4000 + 0x10000).
+  bool MapWithID(const uint64_t real_addr,
+                 const uint64_t length,
+                 const uint64_t id,
+                 const uint64_t offset_base,
+                 bool remove_existing_mappings);
+
+  // Looks up |real_addr| and returns the mapped address.
+  bool GetMappedAddress(const uint64_t real_addr, uint64_t* mapped_addr) const;
+
+  // Looks up |real_addr| and returns the mapping's ID and offset from the
+  // start of the mapped space.
+  bool GetMappedIDAndOffset(const uint64_t real_addr,
+                            uint64_t* id,
+                            uint64_t* offset) const;
+
+  // Returns true if there are no mappings.
+  bool IsEmpty() const {
+    return mappings_.empty();
+  }
+
+  // Returns the number of address ranges that are currently mapped.
+  unsigned int GetNumMappedRanges() const {
+    return mappings_.size();
+  }
+
+  // Returns the maximum length of quipper space containing mapped areas.
+  // There may be gaps in between blocks.
+  // If the result is 2^64 (all of quipper space), this returns 0.  Call
+  // IsEmpty() to distinguish this from actual emptiness.
+  uint64_t GetMaxMappedLength() const;
+
+  // Dumps the state of the address mapper to logs. Useful for debugging.
+  void DumpToLog() const;
+
+ private:
+  struct MappedRange {
+    uint64_t real_addr;
+    uint64_t mapped_addr;
+    uint64_t size;
+
+    uint64_t id;
+    uint64_t offset_base;
+
+    // Length of unmapped space after this range.
+    uint64_t unmapped_space_after;
+
+    // Determines if this range intersects another range in real space.
+    inline bool Intersects(const MappedRange& range) const {
+      return (real_addr <= range.real_addr + range.size - 1) &&
+             (real_addr + size - 1 >= range.real_addr);
+    }
+
+    // Determines if this range fully covers another range in real space.
+    inline bool Covers(const MappedRange& range) const {
+      return (real_addr <= range.real_addr) &&
+             (real_addr + size - 1 >= range.real_addr + range.size - 1);
+    }
+
+    // Determines if this range fully contains another range in real space.
+    // This is different from Covers() in that the boundaries cannot overlap.
+    inline bool Contains(const MappedRange& range) const {
+      return (real_addr < range.real_addr) &&
+             (real_addr + size - 1 > range.real_addr + range.size - 1);
+    }
+
+    // Determines if this range contains the given address |addr|.
+    inline bool ContainsAddress(uint64_t addr) const {
+      return (addr >= real_addr && addr <= real_addr + size - 1);
+    }
+  };
+
+  // TODO(sque): implement with set or map to improve searching.
+  typedef std::list<MappedRange> MappingList;
+
+  // Removes an existing address mapping.
+  // Returns true if successful, false if no mapped address range was found.
+  bool Unmap(const MappedRange& range);
+
+  // Container for all the existing mappings.
+  MappingList mappings_;
+
+  bool CheckMappings() const;
+};
+
+}  // namespace quipper
+
+#endif  // CHROMIUMOS_WIDE_PROFILING_ADDRESS_MAPPER_H_
diff --git a/perfprofd/quipper/base/basictypes.h b/perfprofd/quipper/base/basictypes.h
new file mode 100644
index 0000000..cec5bed
--- /dev/null
+++ b/perfprofd/quipper/base/basictypes.h
@@ -0,0 +1,58 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains definitions of our old basic integral types
+// ((u)int{8,16,32,64}) and further includes. I recommend that you use the C99
+// standard types instead, and include <stdint.h>/<stddef.h>/etc. as needed.
+// Note that the macros and macro-like constructs that were formerly defined in
+// this file are now available separately in base/macros.h.
+
+#ifndef BASE_BASICTYPES_H_
+#define BASE_BASICTYPES_H_
+
+#include <limits.h>  // So we can set the bounds of our types.
+#include <stddef.h>  // For size_t.
+#include <stdint.h>  // For intptr_t.
+
+#include "quipper/base/macros.h"
+#include "quipper/base/port.h"  // Types that only need exist on certain systems.
+
+// DEPRECATED: Please use (u)int{8,16,32,64}_t instead (and include <stdint.h>).
+typedef int8_t int8;
+typedef uint8_t uint8;
+typedef int16_t int16;
+typedef int32_t int32;
+typedef uint16_t uint16;
+typedef uint32_t uint32;
+
+// TODO(vtl): Figure what's up with the 64-bit types. Can we just define them as
+// |int64_t|/|uint64_t|?
+// The NSPR system headers define 64-bit as |long| when possible, except on
+// Mac OS X.  In order to not have typedef mismatches, we do the same on LP64.
+//
+// On Mac OS X, |long long| is used for 64-bit types for compatibility with
+// <inttypes.h> format macros even in the LP64 model.
+#if defined(__LP64__) && !defined(OS_MACOSX) && !defined(OS_OPENBSD)
+typedef long int64;
+typedef unsigned long uint64;
+#else
+typedef long long int64;
+typedef unsigned long long uint64;
+#endif
+
+// DEPRECATED: Please use std::numeric_limits (from <limits>) instead.
+const uint8  kuint8max  =  0xFF;
+const uint16 kuint16max =  0xFFFF;
+const uint32 kuint32max =  0xFFFFFFFF;
+const uint64 kuint64max =  0xFFFFFFFFFFFFFFFFULL;
+const  int8  kint8min   = -0x7F - 1;
+const  int8  kint8max   =  0x7F;
+const  int16 kint16min  = -0x7FFF - 1;
+const  int16 kint16max  =  0x7FFF;
+const  int32 kint32min  = -0x7FFFFFFF - 1;
+const  int32 kint32max  =  0x7FFFFFFF;
+const  int64 kint64min  = -0x7FFFFFFFFFFFFFFFLL - 1;
+const  int64 kint64max  =  0x7FFFFFFFFFFFFFFFLL;
+
+#endif  // BASE_BASICTYPES_H_
diff --git a/perfprofd/quipper/base/compiler_specific.h b/perfprofd/quipper/base/compiler_specific.h
new file mode 100644
index 0000000..000c7d7
--- /dev/null
+++ b/perfprofd/quipper/base/compiler_specific.h
@@ -0,0 +1,208 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_COMPILER_SPECIFIC_H_
+#define BASE_COMPILER_SPECIFIC_H_
+
+#include "quipper/build/build_config.h"
+
+#if defined(COMPILER_MSVC)
+
+// Macros for suppressing and disabling warnings on MSVC.
+//
+// Warning numbers are enumerated at:
+// http://msdn.microsoft.com/en-us/library/8x5x43k7(VS.80).aspx
+//
+// The warning pragma:
+// http://msdn.microsoft.com/en-us/library/2c8f766e(VS.80).aspx
+//
+// Using __pragma instead of #pragma inside macros:
+// http://msdn.microsoft.com/en-us/library/d9x1s805.aspx
+
+// MSVC_SUPPRESS_WARNING disables warning |n| for the remainder of the line and
+// for the next line of the source file.
+#define MSVC_SUPPRESS_WARNING(n) __pragma(warning(suppress:n))
+
+// MSVC_PUSH_DISABLE_WARNING pushes |n| onto a stack of warnings to be disabled.
+// The warning remains disabled until popped by MSVC_POP_WARNING.
+#define MSVC_PUSH_DISABLE_WARNING(n) __pragma(warning(push)) \
+                                     __pragma(warning(disable:n))
+
+// MSVC_PUSH_WARNING_LEVEL pushes |n| as the global warning level.  The level
+// remains in effect until popped by MSVC_POP_WARNING().  Use 0 to disable all
+// warnings.
+#define MSVC_PUSH_WARNING_LEVEL(n) __pragma(warning(push, n))
+
+// Pop effects of innermost MSVC_PUSH_* macro.
+#define MSVC_POP_WARNING() __pragma(warning(pop))
+
+#define MSVC_DISABLE_OPTIMIZE() __pragma(optimize("", off))
+#define MSVC_ENABLE_OPTIMIZE() __pragma(optimize("", on))
+
+// Allows exporting a class that inherits from a non-exported base class.
+// This uses suppress instead of push/pop because the delimiter after the
+// declaration (either "," or "{") has to be placed before the pop macro.
+//
+// Example usage:
+// class EXPORT_API Foo : NON_EXPORTED_BASE(public Bar) {
+//
+// MSVC Compiler warning C4275:
+// non dll-interface class 'Bar' used as base for dll-interface class 'Foo'.
+// Note that this is intended to be used only when no access to the base class'
+// static data is done through derived classes or inline methods. For more info,
+// see http://msdn.microsoft.com/en-us/library/3tdb471s(VS.80).aspx
+#define NON_EXPORTED_BASE(code) MSVC_SUPPRESS_WARNING(4275) \
+                                code
+
+#else  // Not MSVC
+
+#define MSVC_SUPPRESS_WARNING(n)
+#define MSVC_PUSH_DISABLE_WARNING(n)
+#define MSVC_PUSH_WARNING_LEVEL(n)
+#define MSVC_POP_WARNING()
+#define MSVC_DISABLE_OPTIMIZE()
+#define MSVC_ENABLE_OPTIMIZE()
+#define NON_EXPORTED_BASE(code) code
+
+#endif  // COMPILER_MSVC
+
+
+// The C++ standard requires that static const members have an out-of-class
+// definition (in a single compilation unit), but MSVC chokes on this (when
+// language extensions, which are required, are enabled). (You're only likely to
+// notice the need for a definition if you take the address of the member or,
+// more commonly, pass it to a function that takes it as a reference argument --
+// probably an STL function.) This macro makes MSVC do the right thing. See
+// http://msdn.microsoft.com/en-us/library/34h23df8(v=vs.100).aspx for more
+// information. Use like:
+//
+// In .h file:
+//   struct Foo {
+//     static const int kBar = 5;
+//   };
+//
+// In .cc file:
+//   STATIC_CONST_MEMBER_DEFINITION const int Foo::kBar;
+#if defined(COMPILER_MSVC)
+#define STATIC_CONST_MEMBER_DEFINITION __declspec(selectany)
+#else
+#define STATIC_CONST_MEMBER_DEFINITION
+#endif
+
+// Annotate a variable indicating it's ok if the variable is not used.
+// (Typically used to silence a compiler warning when the assignment
+// is important for some other reason.)
+// Use like:
+//   int x ALLOW_UNUSED = ...;
+#if defined(COMPILER_GCC)
+#define ALLOW_UNUSED __attribute__((unused))
+#else
+#define ALLOW_UNUSED
+#endif
+
+// Annotate a function indicating it should not be inlined.
+// Use like:
+//   NOINLINE void DoStuff() { ... }
+#if defined(COMPILER_GCC)
+#define NOINLINE __attribute__((noinline))
+#elif defined(COMPILER_MSVC)
+#define NOINLINE __declspec(noinline)
+#else
+#define NOINLINE
+#endif
+
+// Specify memory alignment for structs, classes, etc.
+// Use like:
+//   class ALIGNAS(16) MyClass { ... }
+//   ALIGNAS(16) int array[4];
+#if defined(COMPILER_MSVC)
+#define ALIGNAS(byte_alignment) __declspec(align(byte_alignment))
+#elif defined(COMPILER_GCC)
+#define ALIGNAS(byte_alignment) __attribute__((aligned(byte_alignment)))
+#endif
+
+// Return the byte alignment of the given type (available at compile time).  Use
+// sizeof(type) prior to checking __alignof to workaround Visual C++ bug:
+// http://goo.gl/isH0C
+// Use like:
+//   ALIGNOF(int32)  // this would be 4
+#if defined(COMPILER_MSVC)
+#define ALIGNOF(type) (sizeof(type) - sizeof(type) + __alignof(type))
+#elif defined(COMPILER_GCC)
+#define ALIGNOF(type) __alignof__(type)
+#endif
+
+// Annotate a virtual method indicating it must be overriding a virtual
+// method in the parent class.
+// Use like:
+//   virtual void foo() OVERRIDE;
+#define OVERRIDE override
+
+// Annotate a virtual method indicating that subclasses must not override it,
+// or annotate a class to indicate that it cannot be subclassed.
+// Use like:
+//   virtual void foo() FINAL;
+//   class B FINAL : public A {};
+#define FINAL final
+
+// Annotate a function indicating the caller must examine the return value.
+// Use like:
+//   int foo() WARN_UNUSED_RESULT;
+// To explicitly ignore a result, see |ignore_result()| in <base/basictypes.h>.
+#if defined(COMPILER_GCC)
+#define WARN_UNUSED_RESULT __attribute__((warn_unused_result))
+#else
+#define WARN_UNUSED_RESULT
+#endif
+
+// Tell the compiler a function is using a printf-style format string.
+// |format_param| is the one-based index of the format string parameter;
+// |dots_param| is the one-based index of the "..." parameter.
+// For v*printf functions (which take a va_list), pass 0 for dots_param.
+// (This is undocumented but matches what the system C headers do.)
+#if defined(COMPILER_GCC)
+#define PRINTF_FORMAT(format_param, dots_param) \
+    __attribute__((format(printf, format_param, dots_param)))
+#else
+#define PRINTF_FORMAT(format_param, dots_param)
+#endif
+
+// WPRINTF_FORMAT is the same, but for wide format strings.
+// This doesn't appear to yet be implemented in any compiler.
+// See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=38308 .
+#define WPRINTF_FORMAT(format_param, dots_param)
+// If available, it would look like:
+//   __attribute__((format(wprintf, format_param, dots_param)))
+
+// MemorySanitizer annotations.
+#if defined(MEMORY_SANITIZER) && !defined(OS_NACL)
+#include <sanitizer/msan_interface.h>
+
+// Mark a memory region fully initialized.
+// Use this to annotate code that deliberately reads uninitialized data, for
+// example a GC scavenging root set pointers from the stack.
+#define MSAN_UNPOISON(p, s)  __msan_unpoison(p, s)
+#else  // MEMORY_SANITIZER
+#define MSAN_UNPOISON(p, s)
+#endif  // MEMORY_SANITIZER
+
+// Macro useful for writing cross-platform function pointers.
+#if !defined(CDECL)
+#if defined(OS_WIN)
+#define CDECL __cdecl
+#else  // defined(OS_WIN)
+#define CDECL
+#endif  // defined(OS_WIN)
+#endif  // !defined(CDECL)
+
+// Macro for hinting that an expression is likely to be false.
+#if !defined(UNLIKELY)
+#if defined(COMPILER_GCC)
+#define UNLIKELY(x) __builtin_expect(!!(x), 0)
+#else
+#define UNLIKELY(x) (x)
+#endif  // defined(COMPILER_GCC)
+#endif  // !defined(UNLIKELY)
+
+#endif  // BASE_COMPILER_SPECIFIC_H_
diff --git a/perfprofd/quipper/base/logging.cc b/perfprofd/quipper/base/logging.cc
new file mode 100644
index 0000000..cc73d28
--- /dev/null
+++ b/perfprofd/quipper/base/logging.cc
@@ -0,0 +1,110 @@
+//
+// Logging support functions. These are designed to mimic those used in
+// chromium_org/base in terms of interface, but to redirect error to
+// the system log.
+//
+
+#include "quipper/base/logging.h"
+
+#if defined(OS_POSIX)
+#include <errno.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include <algorithm>
+#include <cstring>
+#include <ctime>
+#include <iomanip>
+#include <ostream>
+#include <string>
+
+#include <android/log.h>
+
+#define LOG_TAG "perf_reader"
+
+namespace logging {
+
+namespace {
+
+int min_log_level = 0;
+
+}
+
+void SetMinLogLevel(int level) {
+  min_log_level = std::min(LOG_FATAL, level);
+}
+
+int GetMinLogLevel() {
+  return min_log_level;
+}
+
+// MSVC doesn't like complex extern templates and DLLs.
+#if !defined(COMPILER_MSVC)
+// Explicit instantiations for commonly used comparisons.
+template std::string* MakeCheckOpString<int, int>(
+    const int&, const int&, const char* names);
+template std::string* MakeCheckOpString<unsigned long, unsigned long>(
+    const unsigned long&, const unsigned long&, const char* names);
+template std::string* MakeCheckOpString<unsigned long, unsigned int>(
+    const unsigned long&, const unsigned int&, const char* names);
+template std::string* MakeCheckOpString<unsigned int, unsigned long>(
+    const unsigned int&, const unsigned long&, const char* names);
+template std::string* MakeCheckOpString<std::string, std::string>(
+    const std::string&, const std::string&, const char* name);
+#endif
+
+LogMessage::LogMessage(const char* file, int line, LogSeverity severity)
+    : severity_(severity), file_(file), line_(line) {
+  Init(file, line);
+}
+
+LogMessage::LogMessage(const char* file, int line, std::string* result)
+    : severity_(LOG_FATAL), file_(file), line_(line) {
+  Init(file, line);
+  stream_ << "Check failed: " << *result;
+  delete result;
+}
+
+LogMessage::LogMessage(const char* file, int line, LogSeverity severity,
+                       std::string* result)
+    : severity_(severity), file_(file), line_(line) {
+  Init(file, line);
+  stream_ << "Check failed: " << *result;
+  delete result;
+}
+
+LogMessage::~LogMessage() {
+  stream_ << std::endl;
+  std::string str_newline(stream_.str());
+
+  android_LogPriority priority =
+      (severity_ < 0) ? ANDROID_LOG_VERBOSE : ANDROID_LOG_UNKNOWN;
+  switch (severity_) {
+    case LOG_INFO:
+      priority = ANDROID_LOG_INFO;
+      break;
+    case LOG_WARNING:
+      priority = ANDROID_LOG_WARN;
+      break;
+    case LOG_ERROR:
+      priority = ANDROID_LOG_ERROR;
+      break;
+    case LOG_FATAL:
+      priority = ANDROID_LOG_FATAL;
+      break;
+  }
+  __android_log_write(priority, LOG_TAG, str_newline.c_str());
+
+  if (severity_ == LOG_FATAL) {
+    exit(9);
+  }
+}
+
+void LogMessage::Init(const char* /* file */, int /* line */) {
+}
+
+}  // namespace logging
diff --git a/perfprofd/quipper/base/logging.h b/perfprofd/quipper/base/logging.h
new file mode 100644
index 0000000..aaf01c1
--- /dev/null
+++ b/perfprofd/quipper/base/logging.h
@@ -0,0 +1,671 @@
+
+#ifndef BASE_LOGGING_H_
+#define BASE_LOGGING_H_
+
+#include <cassert>
+#include <string>
+#include <cstring>
+#include <sstream>
+
+#include "quipper/base/macros.h"
+#include "quipper/base/basictypes.h"
+
+//
+// Logging macros designed to mimic those used in chromium_org/base.
+//
+
+// Instructions
+// ------------
+//
+// Make a bunch of macros for logging.  The way to log things is to stream
+// things to LOG(<a particular severity level>).  E.g.,
+//
+//   LOG(INFO) << "Found " << num_cookies << " cookies";
+//
+// You can also do conditional logging:
+//
+//   LOG_IF(INFO, num_cookies > 10) << "Got lots of cookies";
+//
+// The CHECK(condition) macro is active in both debug and release builds and
+// effectively performs a LOG(FATAL) which terminates the process and
+// generates a crashdump unless a debugger is attached.
+//
+// There are also "debug mode" logging macros like the ones above:
+//
+//   DLOG(INFO) << "Found cookies";
+//
+//   DLOG_IF(INFO, num_cookies > 10) << "Got lots of cookies";
+//
+// All "debug mode" logging is compiled away to nothing for non-debug mode
+// compiles.  LOG_IF and development flags also work well together
+// because the code can be compiled away sometimes.
+//
+// We also have
+//
+//   LOG_ASSERT(assertion);
+//   DLOG_ASSERT(assertion);
+//
+// which is syntactic sugar for {,D}LOG_IF(FATAL, assert fails) << assertion;
+//
+// There are "verbose level" logging macros.  They look like
+//
+//   VLOG(1) << "I'm printed when you run the program with --v=1 or more";
+//   VLOG(2) << "I'm printed when you run the program with --v=2 or more";
+//
+// These always log at the INFO log level (when they log at all).
+// The verbose logging can also be turned on module-by-module.  For instance,
+//    --vmodule=profile=2,icon_loader=1,browser_*=3,*/chromeos/*=4 --v=0
+// will cause:
+//   a. VLOG(2) and lower messages to be printed from profile.{h,cc}
+//   b. VLOG(1) and lower messages to be printed from icon_loader.{h,cc}
+//   c. VLOG(3) and lower messages to be printed from files prefixed with
+//      "browser"
+//   d. VLOG(4) and lower messages to be printed from files under a
+//     "chromeos" directory.
+//   e. VLOG(0) and lower messages to be printed from elsewhere
+//
+// The wildcarding functionality shown by (c) supports both '*' (match
+// 0 or more characters) and '?' (match any single character)
+// wildcards.  Any pattern containing a forward or backward slash will
+// be tested against the whole pathname and not just the module.
+// E.g., "*/foo/bar/*=2" would change the logging level for all code
+// in source files under a "foo/bar" directory.
+//
+// There's also VLOG_IS_ON(n) "verbose level" condition macro. To be used as
+//
+//   if (VLOG_IS_ON(2)) {
+//     // do some logging preparation and logging
+//     // that can't be accomplished with just VLOG(2) << ...;
+//   }
+//
+// There is also a VLOG_IF "verbose level" condition macro for sample
+// cases, when some extra computation and preparation for logs is not
+// needed.
+//
+//   VLOG_IF(1, (size > 1024))
+//      << "I'm printed when size is more than 1024 and when you run the "
+//         "program with --v=1 or more";
+//
+// We also override the standard 'assert' to use 'DLOG_ASSERT'.
+//
+// Lastly, there is:
+//
+//   PLOG(ERROR) << "Couldn't do foo";
+//   DPLOG(ERROR) << "Couldn't do foo";
+//   PLOG_IF(ERROR, cond) << "Couldn't do foo";
+//   DPLOG_IF(ERROR, cond) << "Couldn't do foo";
+//   PCHECK(condition) << "Couldn't do foo";
+//   DPCHECK(condition) << "Couldn't do foo";
+//
+// which append the last system error to the message in string form (taken from
+// GetLastError() on Windows and errno on POSIX).
+//
+// The supported severity levels for macros that allow you to specify one
+// are (in increasing order of severity) INFO, WARNING, ERROR, and FATAL.
+//
+// Very important: logging a message at the FATAL severity level causes
+// the program to terminate (after the message is logged).
+//
+// There is the special severity of DFATAL, which logs FATAL in debug mode,
+// ERROR in normal mode.
+
+#define BASE_EXPORT
+
+namespace logging {
+
+// Sets the log level. Anything at or above this level will be written to the
+// log file/displayed to the user (if applicable). Anything below this level
+// will be silently ignored. The log level defaults to 0 (everything is logged
+// up to level INFO) if this function is not called.
+// Note that log messages for VLOG(x) are logged at level -x, so setting
+// the min log level to negative values enables verbose logging.
+BASE_EXPORT void SetMinLogLevel(int level);
+
+// Gets the current log level.
+BASE_EXPORT int GetMinLogLevel();
+
+// Gets the VLOG default verbosity level.
+BASE_EXPORT int GetVlogVerbosity();
+
+typedef int LogSeverity;
+const LogSeverity LOG_VERBOSE = -1;  // This is level 1 verbosity
+// Note: the log severities are used to index into the array of names,
+// see log_severity_names.
+const LogSeverity LOG_INFO = 0;
+const LogSeverity LOG_WARNING = 1;
+const LogSeverity LOG_ERROR = 2;
+const LogSeverity LOG_FATAL = 3;
+const LogSeverity LOG_NUM_SEVERITIES = 4;
+
+// A few definitions of macros that don't generate much code. These are used
+// by LOG() and LOG_IF, etc. Since these are used all over our code, it's
+// better to have compact code for these operations.
+#define COMPACT_GOOGLE_LOG_EX_INFO(ClassName, ...) \
+  logging::ClassName(__FILE__, __LINE__, logging::LOG_INFO , ##__VA_ARGS__)
+#define COMPACT_GOOGLE_LOG_EX_WARNING(ClassName, ...) \
+  logging::ClassName(__FILE__, __LINE__, logging::LOG_WARNING , ##__VA_ARGS__)
+#define COMPACT_GOOGLE_LOG_EX_ERROR(ClassName, ...) \
+  logging::ClassName(__FILE__, __LINE__, logging::LOG_ERROR , ##__VA_ARGS__)
+#define COMPACT_GOOGLE_LOG_EX_FATAL(ClassName, ...) \
+  logging::ClassName(__FILE__, __LINE__, logging::LOG_FATAL , ##__VA_ARGS__)
+#define COMPACT_GOOGLE_LOG_EX_DFATAL(ClassName, ...) \
+  logging::ClassName(__FILE__, __LINE__, logging::LOG_DFATAL , ##__VA_ARGS__)
+
+#define COMPACT_GOOGLE_LOG_INFO \
+  COMPACT_GOOGLE_LOG_EX_INFO(LogMessage)
+#define COMPACT_GOOGLE_LOG_WARNING \
+  COMPACT_GOOGLE_LOG_EX_WARNING(LogMessage)
+#define COMPACT_GOOGLE_LOG_ERROR \
+  COMPACT_GOOGLE_LOG_EX_ERROR(LogMessage)
+#define COMPACT_GOOGLE_LOG_FATAL \
+  COMPACT_GOOGLE_LOG_EX_FATAL(LogMessage)
+#define COMPACT_GOOGLE_LOG_DFATAL \
+  COMPACT_GOOGLE_LOG_EX_DFATAL(LogMessage)
+
+// As special cases, we can assume that LOG_IS_ON(FATAL) always holds. Also,
+// LOG_IS_ON(DFATAL) always holds in debug mode. In particular, CHECK()s will
+// always fire if they fail.
+#define LOG_IS_ON(severity) \
+  ((::logging::LOG_ ## severity) >= ::logging::GetMinLogLevel())
+
+#define VLOG_IS_ON(verboselevel) false
+
+// Helper macro which avoids evaluating the arguments to a stream if
+// the condition doesn't hold.
+#define LAZY_STREAM(stream, condition)                                  \
+  !(condition) ? (void) 0 : ::logging::LogMessageVoidify() & (stream) /* NOLINT */
+
+// We use the preprocessor's merging operator, "##", so that, e.g.,
+// LOG(INFO) becomes the token COMPACT_GOOGLE_LOG_INFO.  There's some funny
+// subtle difference between ostream member streaming functions (e.g.,
+// ostream::operator<<(int) and ostream non-member streaming functions
+// (e.g., ::operator<<(ostream&, string&): it turns out that it's
+// impossible to stream something like a string directly to an unnamed
+// ostream. We employ a neat hack by calling the stream() member
+// function of LogMessage which seems to avoid the problem.
+#define LOG_STREAM(severity) COMPACT_GOOGLE_LOG_ ## severity.stream()
+
+#define LOG(severity) LAZY_STREAM(LOG_STREAM(severity), LOG_IS_ON(severity))
+#define LOG_IF(severity, condition) \
+  LAZY_STREAM(LOG_STREAM(severity), LOG_IS_ON(severity) && (condition))
+
+// The VLOG macros log with negative verbosities.
+#define VLOG_STREAM(verbose_level) \
+  logging::LogMessage(__FILE__, __LINE__, -(verbose_level)).stream()
+
+#define VLOG(verbose_level) \
+  LAZY_STREAM(VLOG_STREAM(verbose_level), VLOG_IS_ON(verbose_level))
+
+#define VLOG_IF(verbose_level, condition) \
+  LAZY_STREAM(VLOG_STREAM(verbose_level), \
+      VLOG_IS_ON(verbose_level) && (condition))
+
+// TODO(akalin): Add more VLOG variants, e.g. VPLOG.
+
+#define LOG_ASSERT(condition)  \
+  LOG_IF(FATAL, !(condition)) << "Assert failed: " #condition ". "
+#define SYSLOG_ASSERT(condition) \
+  SYSLOG_IF(FATAL, !(condition)) << "Assert failed: " #condition ". "
+
+#define PLOG(severity)                                          \
+  LAZY_STREAM(PLOG_STREAM(severity), LOG_IS_ON(severity))
+
+#define PLOG_IF(severity, condition) \
+  LAZY_STREAM(PLOG_STREAM(severity), LOG_IS_ON(severity) && (condition))
+
+// The actual stream used isn't important.
+#define EAT_STREAM_PARAMETERS                                           \
+  true ? (void) 0 : ::logging::LogMessageVoidify() & LOG_STREAM(FATAL) /* NOLINT */
+
+// CHECK dies with a fatal error if condition is not true.  It is *not*
+// controlled by NDEBUG, so the check will be executed regardless of
+// compilation mode.
+//
+// We make sure CHECK et al. always evaluates their arguments, as
+// doing CHECK(FunctionWithSideEffect()) is a common idiom.
+
+#define CHECK(condition)                       \
+  LAZY_STREAM(LOG_STREAM(FATAL), !(condition)) \
+  << "Check failed: " #condition ". "
+
+#define PCHECK(condition) \
+  LAZY_STREAM(PLOG_STREAM(FATAL), !(condition)) \
+  << "Check failed: " #condition ". "
+
+// Helper macro for binary operators.
+// Don't use this macro directly in your code, use CHECK_EQ et al below.
+//
+// TODO(akalin): Rewrite this so that constructs like if (...)
+// CHECK_EQ(...) else { ... } work properly.
+#define CHECK_OP(name, op, val1, val2)                          \
+  if (std::string* _result =                                    \
+      logging::Check##name##Impl((val1), (val2),                \
+                                 #val1 " " #op " " #val2))      \
+    logging::LogMessage(__FILE__, __LINE__, _result).stream()
+
+// Build the error message string.  This is separate from the "Impl"
+// function template because it is not performance critical and so can
+// be out of line, while the "Impl" code should be inline.  Caller
+// takes ownership of the returned string.
+template<class t1, class t2>
+std::string* MakeCheckOpString(const t1& v1, const t2& v2, const char* names) {
+  std::ostringstream ss;
+  ss << names << " (" << v1 << " vs. " << v2 << ")";
+  std::string* msg = new std::string(ss.str());
+  return msg;
+}
+
+// MSVC doesn't like complex extern templates and DLLs.
+#if !defined(COMPILER_MSVC)
+// Commonly used instantiations of MakeCheckOpString<>. Explicitly instantiated
+// in logging.cc.
+extern template BASE_EXPORT std::string* MakeCheckOpString<int, int>(
+    const int&, const int&, const char* names);
+extern template BASE_EXPORT
+std::string* MakeCheckOpString<unsigned long, unsigned long>(
+    const unsigned long&, const unsigned long&, const char* names);
+extern template BASE_EXPORT
+std::string* MakeCheckOpString<unsigned long, unsigned int>(
+    const unsigned long&, const unsigned int&, const char* names);
+extern template BASE_EXPORT
+std::string* MakeCheckOpString<unsigned int, unsigned long>(
+    const unsigned int&, const unsigned long&, const char* names);
+extern template BASE_EXPORT
+std::string* MakeCheckOpString<std::string, std::string>(
+    const std::string&, const std::string&, const char* name);
+#endif
+
+// Helper functions for CHECK_OP macro.
+// The (int, int) specialization works around the issue that the compiler
+// will not instantiate the template version of the function on values of
+// unnamed enum type - see comment below.
+#define DEFINE_CHECK_OP_IMPL(name, op) \
+  template <class t1, class t2> \
+  inline std::string* Check##name##Impl(const t1& v1, const t2& v2, \
+                                        const char* names) { \
+    if (v1 op v2) return NULL; \
+    else return MakeCheckOpString(v1, v2, names); \
+  } \
+  inline std::string* Check##name##Impl(int v1, int v2, const char* names) { \
+    if (v1 op v2) return NULL; \
+    else return MakeCheckOpString(v1, v2, names); \
+  }
+DEFINE_CHECK_OP_IMPL(EQ, ==)
+DEFINE_CHECK_OP_IMPL(NE, !=)
+DEFINE_CHECK_OP_IMPL(LE, <=)
+DEFINE_CHECK_OP_IMPL(LT, < )
+DEFINE_CHECK_OP_IMPL(GE, >=)
+DEFINE_CHECK_OP_IMPL(GT, > )
+#undef DEFINE_CHECK_OP_IMPL
+
+#define CHECK_EQ(val1, val2) CHECK_OP(EQ, ==, val1, val2)
+#define CHECK_NE(val1, val2) CHECK_OP(NE, !=, val1, val2)
+#define CHECK_LE(val1, val2) CHECK_OP(LE, <=, val1, val2)
+#define CHECK_LT(val1, val2) CHECK_OP(LT, < , val1, val2)
+#define CHECK_GE(val1, val2) CHECK_OP(GE, >=, val1, val2)
+#define CHECK_GT(val1, val2) CHECK_OP(GT, > , val1, val2)
+
+#if defined(NDEBUG)
+#define ENABLE_DLOG 0
+#else
+#define ENABLE_DLOG 1
+#endif
+
+#if defined(NDEBUG) && !defined(DCHECK_ALWAYS_ON)
+#define DCHECK_IS_ON 0
+#else
+#define DCHECK_IS_ON 1
+#endif
+
+// Definitions for DLOG et al.
+
+#if ENABLE_DLOG
+
+#define DLOG_IS_ON(severity) LOG_IS_ON(severity)
+#define DLOG_IF(severity, condition) LOG_IF(severity, condition)
+#define DLOG_ASSERT(condition) LOG_ASSERT(condition)
+#define DPLOG_IF(severity, condition) PLOG_IF(severity, condition)
+#define DVLOG_IF(verboselevel, condition) VLOG_IF(verboselevel, condition)
+#define DVPLOG_IF(verboselevel, condition) VPLOG_IF(verboselevel, condition)
+
+#else  // ENABLE_DLOG
+
+// If ENABLE_DLOG is off, we want to avoid emitting any references to
+// |condition| (which may reference a variable defined only if NDEBUG
+// is not defined).  Contrast this with DCHECK et al., which has
+// different behavior.
+
+#define DLOG_IS_ON(severity) false
+#define DLOG_IF(severity, condition) EAT_STREAM_PARAMETERS
+#define DLOG_ASSERT(condition) EAT_STREAM_PARAMETERS
+#define DPLOG_IF(severity, condition) EAT_STREAM_PARAMETERS
+#define DVLOG_IF(verboselevel, condition) EAT_STREAM_PARAMETERS
+#define DVPLOG_IF(verboselevel, condition) EAT_STREAM_PARAMETERS
+
+#endif  // ENABLE_DLOG
+
+// DEBUG_MODE is for uses like
+//   if (DEBUG_MODE) foo.CheckThatFoo();
+// instead of
+//   #ifndef NDEBUG
+//     foo.CheckThatFoo();
+//   #endif
+//
+// We tie its state to ENABLE_DLOG.
+enum { DEBUG_MODE = ENABLE_DLOG };
+
+#undef ENABLE_DLOG
+
+#define DLOG(severity)                                          \
+  LAZY_STREAM(LOG_STREAM(severity), DLOG_IS_ON(severity))
+
+#define DPLOG(severity)                                         \
+  LAZY_STREAM(PLOG_STREAM(severity), DLOG_IS_ON(severity))
+
+#define DVLOG(verboselevel) DVLOG_IF(verboselevel, VLOG_IS_ON(verboselevel))
+
+#define DVPLOG(verboselevel) DVPLOG_IF(verboselevel, VLOG_IS_ON(verboselevel))
+
+// Definitions for DCHECK et al.
+
+#if DCHECK_IS_ON
+
+#define COMPACT_GOOGLE_LOG_EX_DCHECK(ClassName, ...) \
+  COMPACT_GOOGLE_LOG_EX_FATAL(ClassName , ##__VA_ARGS__)
+#define COMPACT_GOOGLE_LOG_DCHECK COMPACT_GOOGLE_LOG_FATAL
+const LogSeverity LOG_DCHECK = LOG_FATAL;
+
+#else  // DCHECK_IS_ON
+
+// These are just dummy values.
+#define COMPACT_GOOGLE_LOG_EX_DCHECK(ClassName, ...) \
+  COMPACT_GOOGLE_LOG_EX_INFO(ClassName , ##__VA_ARGS__)
+#define COMPACT_GOOGLE_LOG_DCHECK COMPACT_GOOGLE_LOG_INFO
+const LogSeverity LOG_DCHECK = LOG_INFO;
+
+#endif  // DCHECK_IS_ON
+
+// DCHECK et al. make sure to reference |condition| regardless of
+// whether DCHECKs are enabled; this is so that we don't get unused
+// variable warnings if the only use of a variable is in a DCHECK.
+// This behavior is different from DLOG_IF et al.
+
+#define DCHECK(condition)                                         \
+  LAZY_STREAM(LOG_STREAM(DCHECK), DCHECK_IS_ON && !(condition))   \
+  << "Check failed: " #condition ". "
+
+#define DPCHECK(condition)                                        \
+  LAZY_STREAM(PLOG_STREAM(DCHECK), DCHECK_IS_ON && !(condition))  \
+  << "Check failed: " #condition ". "
+
+// Helper macro for binary operators.
+// Don't use this macro directly in your code, use DCHECK_EQ et al below.
+#define DCHECK_OP(name, op, val1, val2)                         \
+  if (DCHECK_IS_ON)                                             \
+    if (std::string* _result =                                  \
+        logging::Check##name##Impl((val1), (val2),              \
+                                   #val1 " " #op " " #val2))    \
+      logging::LogMessage(                                      \
+          __FILE__, __LINE__, ::logging::LOG_DCHECK,            \
+          _result).stream()
+
+// Equality/Inequality checks - compare two values, and log a
+// LOG_DCHECK message including the two values when the result is not
+// as expected.  The values must have operator<<(ostream, ...)
+// defined.
+//
+// You may append to the error message like so:
+//   DCHECK_NE(1, 2) << ": The world must be ending!";
+//
+// We are very careful to ensure that each argument is evaluated exactly
+// once, and that anything which is legal to pass as a function argument is
+// legal here.  In particular, the arguments may be temporary expressions
+// which will end up being destroyed at the end of the apparent statement,
+// for example:
+//   DCHECK_EQ(string("abc")[1], 'b');
+//
+// WARNING: These may not compile correctly if one of the arguments is a pointer
+// and the other is NULL. To work around this, simply static_cast NULL to the
+// type of the desired pointer.
+
+#define DCHECK_EQ(val1, val2) DCHECK_OP(EQ, ==, val1, val2)
+#define DCHECK_NE(val1, val2) DCHECK_OP(NE, !=, val1, val2)
+#define DCHECK_LE(val1, val2) DCHECK_OP(LE, <=, val1, val2)
+#define DCHECK_LT(val1, val2) DCHECK_OP(LT, < , val1, val2)
+#define DCHECK_GE(val1, val2) DCHECK_OP(GE, >=, val1, val2)
+#define DCHECK_GT(val1, val2) DCHECK_OP(GT, > , val1, val2)
+
+#if defined(NDEBUG) && defined(OS_CHROMEOS)
+#define NOTREACHED() LOG(ERROR) << "NOTREACHED() hit in " << \
+    __FUNCTION__ << ". "
+#else
+#define NOTREACHED() DCHECK(false)
+#endif
+
+// Redefine the standard assert to use our nice log files
+#undef assert
+#define assert(x) DLOG_ASSERT(x)
+
+// This class more or less represents a particular log message.  You
+// create an instance of LogMessage and then stream stuff to it.
+// When you finish streaming to it, ~LogMessage is called and the
+// full message gets streamed to the appropriate destination.
+//
+// You shouldn't actually use LogMessage's constructor to log things,
+// though.  You should use the LOG() macro (and variants thereof)
+// above.
+class BASE_EXPORT LogMessage {
+ public:
+  // Used for LOG(severity).
+  LogMessage(const char* file, int line, LogSeverity severity);
+
+  // Used for CHECK_EQ(), etc. Takes ownership of the given string.
+  // Implied severity = LOG_FATAL.
+  LogMessage(const char* file, int line, std::string* result);
+
+  // Used for DCHECK_EQ(), etc. Takes ownership of the given string.
+  LogMessage(const char* file, int line, LogSeverity severity,
+             std::string* result);
+
+  ~LogMessage();
+
+  std::ostream& stream() { return stream_; }
+
+ private:
+  void Init(const char* file, int line);
+
+  LogSeverity severity_;
+  std::ostringstream stream_;
+  size_t message_start_;  // Offset of the start of the message (past prefix
+                          // info).
+  // The file and line information passed in to the constructor.
+  const char* file_;
+  const int line_;
+
+#if defined(OS_WIN)
+  // Stores the current value of GetLastError in the constructor and restores
+  // it in the destructor by calling SetLastError.
+  // This is useful since the LogMessage class uses a lot of Win32 calls
+  // that will lose the value of GLE and the code that called the log function
+  // will have lost the thread error value when the log call returns.
+  class SaveLastError {
+   public:
+    SaveLastError();
+    ~SaveLastError();
+
+    unsigned long get_error() const { return last_error_; }
+
+   protected:
+    unsigned long last_error_;
+  };
+
+  SaveLastError last_error_;
+#endif
+
+  DISALLOW_COPY_AND_ASSIGN(LogMessage);
+};
+
+// A non-macro interface to the log facility; (useful
+// when the logging level is not a compile-time constant).
+inline void LogAtLevel(int const log_level, std::string const &msg) {
+  LogMessage(__FILE__, __LINE__, log_level).stream() << msg;
+}
+
+// This class is used to explicitly ignore values in the conditional
+// logging macros.  This avoids compiler warnings like "value computed
+// is not used" and "statement has no effect".
+class LogMessageVoidify {
+ public:
+  LogMessageVoidify() { }
+  // This has to be an operator with a precedence lower than << but
+  // higher than ?:
+  void operator&(std::ostream&) { }
+};
+
+#if defined(OS_WIN)
+typedef unsigned long SystemErrorCode;
+#elif defined(OS_POSIX)
+typedef int SystemErrorCode;
+#endif
+
+// Alias for ::GetLastError() on Windows and errno on POSIX. Avoids having to
+// pull in windows.h just for GetLastError() and DWORD.
+BASE_EXPORT SystemErrorCode GetLastSystemErrorCode();
+BASE_EXPORT std::string SystemErrorCodeToString(SystemErrorCode error_code);
+
+#if defined(OS_WIN)
+// Appends a formatted system message of the GetLastError() type.
+class BASE_EXPORT Win32ErrorLogMessage {
+ public:
+  Win32ErrorLogMessage(const char* file,
+                       int line,
+                       LogSeverity severity,
+                       SystemErrorCode err);
+
+  // Appends the error message before destructing the encapsulated class.
+  ~Win32ErrorLogMessage();
+
+  std::ostream& stream() { return log_message_.stream(); }
+
+ private:
+  SystemErrorCode err_;
+  LogMessage log_message_;
+
+  DISALLOW_COPY_AND_ASSIGN(Win32ErrorLogMessage);
+};
+#elif defined(OS_POSIX)
+// Appends a formatted system message of the errno type
+class BASE_EXPORT ErrnoLogMessage {
+ public:
+  ErrnoLogMessage(const char* file,
+                  int line,
+                  LogSeverity severity,
+                  SystemErrorCode err);
+
+  // Appends the error message before destructing the encapsulated class.
+  ~ErrnoLogMessage();
+
+  std::ostream& stream() { return log_message_.stream(); }
+
+ private:
+  SystemErrorCode err_;
+  LogMessage log_message_;
+
+  DISALLOW_COPY_AND_ASSIGN(ErrnoLogMessage);
+};
+#endif  // OS_WIN
+
+// Closes the log file explicitly if open.
+// NOTE: Since the log file is opened as necessary by the action of logging
+//       statements, there's no guarantee that it will stay closed
+//       after this call.
+BASE_EXPORT void CloseLogFile();
+
+// Async signal safe logging mechanism.
+BASE_EXPORT void RawLog(int level, const char* message);
+
+#define RAW_LOG(level, message) logging::RawLog(logging::LOG_ ## level, message)
+
+#define RAW_CHECK(condition)                                                   \
+  do {                                                                         \
+    if (!(condition))                                                          \
+      logging::RawLog(logging::LOG_FATAL, "Check failed: " #condition "\n");   \
+  } while (0)
+
+#if defined(OS_WIN)
+// Returns the default log file path.
+BASE_EXPORT std::wstring GetLogFileFullPath();
+#endif
+
+}  // namespace logging
+
+// Note that "The behavior of a C++ program is undefined if it adds declarations
+// or definitions to namespace std or to a namespace within namespace std unless
+// otherwise specified." --C++11[namespace.std]
+//
+// We've checked that this particular definition has the intended behavior on
+// our implementations, but it's prone to breaking in the future, and please
+// don't imitate this in your own definitions without checking with some
+// standard library experts.
+namespace std {
+// These functions are provided as a convenience for logging, which is where we
+// use streams (it is against Google style to use streams in other places). It
+// is designed to allow you to emit non-ASCII Unicode strings to the log file,
+// which is normally ASCII. It is relatively slow, so try not to use it for
+// common cases. Non-ASCII characters will be converted to UTF-8 by these
+// operators.
+BASE_EXPORT std::ostream& operator<<(std::ostream& out, const wchar_t* wstr);
+inline std::ostream& operator<<(std::ostream& out, const std::wstring& wstr) {
+  return out << wstr.c_str();
+}
+}  // namespace std
+
+// The NOTIMPLEMENTED() macro annotates codepaths which have
+// not been implemented yet.
+//
+// The implementation of this macro is controlled by NOTIMPLEMENTED_POLICY:
+//   0 -- Do nothing (stripped by compiler)
+//   1 -- Warn at compile time
+//   2 -- Fail at compile time
+//   3 -- Fail at runtime (DCHECK)
+//   4 -- [default] LOG(ERROR) at runtime
+//   5 -- LOG(ERROR) at runtime, only once per call-site
+
+#ifndef NOTIMPLEMENTED_POLICY
+#if defined(OS_ANDROID) && defined(OFFICIAL_BUILD)
+#define NOTIMPLEMENTED_POLICY 0
+#else
+// WebView: Hide NOTIMPLEMENTED entirely in Android release branch.
+#define NOTIMPLEMENTED_POLICY 0
+#endif
+#endif
+
+#if defined(COMPILER_GCC)
+// On Linux, with GCC, we can use __PRETTY_FUNCTION__ to get the demangled name
+// of the current function in the NOTIMPLEMENTED message.
+#define NOTIMPLEMENTED_MSG "Not implemented reached in " << __PRETTY_FUNCTION__
+#else
+#define NOTIMPLEMENTED_MSG "NOT IMPLEMENTED"
+#endif
+
+#if NOTIMPLEMENTED_POLICY == 0
+#define NOTIMPLEMENTED() EAT_STREAM_PARAMETERS
+#elif NOTIMPLEMENTED_POLICY == 1
+// TODO, figure out how to generate a warning
+#define NOTIMPLEMENTED() COMPILE_ASSERT(false, NOT_IMPLEMENTED)
+#elif NOTIMPLEMENTED_POLICY == 2
+#define NOTIMPLEMENTED() COMPILE_ASSERT(false, NOT_IMPLEMENTED)
+#elif NOTIMPLEMENTED_POLICY == 3
+#define NOTIMPLEMENTED() NOTREACHED()
+#elif NOTIMPLEMENTED_POLICY == 4
+#define NOTIMPLEMENTED() LOG(ERROR) << NOTIMPLEMENTED_MSG
+#elif NOTIMPLEMENTED_POLICY == 5
+#define NOTIMPLEMENTED() do {\
+  static bool logged_once = false;\
+  LOG_IF(ERROR, !logged_once) << NOTIMPLEMENTED_MSG;\
+  logged_once = true;\
+} while(0);\
+EAT_STREAM_PARAMETERS
+#endif
+
+#endif  // BASE_LOGGING_H_
diff --git a/perfprofd/quipper/base/macros.h b/perfprofd/quipper/base/macros.h
new file mode 100644
index 0000000..be14792
--- /dev/null
+++ b/perfprofd/quipper/base/macros.h
@@ -0,0 +1,257 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains macros and macro-like constructs (e.g., templates) that
+// are commonly used throughout Chromium source. (It may also contain things
+// that are closely related to things that are commonly used that belong in this
+// file.)
+
+#ifndef BASE_MACROS_H_
+#define BASE_MACROS_H_
+
+#include <stddef.h>  // For size_t.
+#include <string.h>  // For memcpy.
+
+#include "quipper/base/compiler_specific.h"  // For ALLOW_UNUSED.
+
+// Put this in the private: declarations for a class to be uncopyable.
+#define DISALLOW_COPY(TypeName) \
+  TypeName(const TypeName&)
+
+// Put this in the private: declarations for a class to be unassignable.
+#define DISALLOW_ASSIGN(TypeName) \
+  void operator=(const TypeName&)
+
+// A macro to disallow the copy constructor and operator= functions
+// This should be used in the private: declarations for a class
+#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
+  TypeName(const TypeName&);               \
+  void operator=(const TypeName&)
+
+// An older, deprecated, politically incorrect name for the above.
+// NOTE: The usage of this macro was banned from our code base, but some
+// third_party libraries are yet using it.
+// TODO(tfarina): Figure out how to fix the usage of this macro in the
+// third_party libraries and get rid of it.
+#define DISALLOW_EVIL_CONSTRUCTORS(TypeName) DISALLOW_COPY_AND_ASSIGN(TypeName)
+
+// A macro to disallow all the implicit constructors, namely the
+// default constructor, copy constructor and operator= functions.
+//
+// This should be used in the private: declarations for a class
+// that wants to prevent anyone from instantiating it. This is
+// especially useful for classes containing only static methods.
+#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
+  TypeName();                                    \
+  DISALLOW_COPY_AND_ASSIGN(TypeName)
+
+// The arraysize(arr) macro returns the # of elements in an array arr.
+// The expression is a compile-time constant, and therefore can be
+// used in defining new arrays, for example.  If you use arraysize on
+// a pointer by mistake, you will get a compile-time error.
+//
+// One caveat is that arraysize() doesn't accept any array of an
+// anonymous type or a type defined inside a function.  In these rare
+// cases, you have to use the unsafe ARRAYSIZE_UNSAFE() macro below.  This is
+// due to a limitation in C++'s template system.  The limitation might
+// eventually be removed, but it hasn't happened yet.
+
+// This template function declaration is used in defining arraysize.
+// Note that the function doesn't need an implementation, as we only
+// use its type.
+template <typename T, size_t N>
+char (&ArraySizeHelper(T (&array)[N]))[N];
+
+// That gcc wants both of these prototypes seems mysterious. VC, for
+// its part, can't decide which to use (another mystery). Matching of
+// template overloads: the final frontier.
+#ifndef _MSC_VER
+template <typename T, size_t N>
+char (&ArraySizeHelper(const T (&array)[N]))[N];
+#endif
+
+#define arraysize(array) (sizeof(ArraySizeHelper(array)))
+
+// ARRAYSIZE_UNSAFE performs essentially the same calculation as arraysize,
+// but can be used on anonymous types or types defined inside
+// functions.  It's less safe than arraysize as it accepts some
+// (although not all) pointers.  Therefore, you should use arraysize
+// whenever possible.
+//
+// The expression ARRAYSIZE_UNSAFE(a) is a compile-time constant of type
+// size_t.
+//
+// ARRAYSIZE_UNSAFE catches a few type errors.  If you see a compiler error
+//
+//   "warning: division by zero in ..."
+//
+// when using ARRAYSIZE_UNSAFE, you are (wrongfully) giving it a pointer.
+// You should only use ARRAYSIZE_UNSAFE on statically allocated arrays.
+//
+// The following comments are on the implementation details, and can
+// be ignored by the users.
+//
+// ARRAYSIZE_UNSAFE(arr) works by inspecting sizeof(arr) (the # of bytes in
+// the array) and sizeof(*(arr)) (the # of bytes in one array
+// element).  If the former is divisible by the latter, perhaps arr is
+// indeed an array, in which case the division result is the # of
+// elements in the array.  Otherwise, arr cannot possibly be an array,
+// and we generate a compiler error to prevent the code from
+// compiling.
+//
+// Since the size of bool is implementation-defined, we need to cast
+// !(sizeof(a) & sizeof(*(a))) to size_t in order to ensure the final
+// result has type size_t.
+//
+// This macro is not perfect as it wrongfully accepts certain
+// pointers, namely where the pointer size is divisible by the pointee
+// size.  Since all our code has to go through a 32-bit compiler,
+// where a pointer is 4 bytes, this means all pointers to a type whose
+// size is 3 or greater than 4 will be (righteously) rejected.
+
+#define ARRAYSIZE_UNSAFE(a) \
+  ((sizeof(a) / sizeof(*(a))) / \
+   static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
+
+
+// Use implicit_cast as a safe version of static_cast or const_cast
+// for upcasting in the type hierarchy (i.e. casting a pointer to Foo
+// to a pointer to SuperclassOfFoo or casting a pointer to Foo to
+// a const pointer to Foo).
+// When you use implicit_cast, the compiler checks that the cast is safe.
+// Such explicit implicit_casts are necessary in surprisingly many
+// situations where C++ demands an exact type match instead of an
+// argument type convertible to a target type.
+//
+// The From type can be inferred, so the preferred syntax for using
+// implicit_cast is the same as for static_cast etc.:
+//
+//   implicit_cast<ToType>(expr)
+//
+// implicit_cast would have been part of the C++ standard library,
+// but the proposal was submitted too late.  It will probably make
+// its way into the language in the future.
+template<typename To, typename From>
+inline To implicit_cast(From const &f) {
+  return f;
+}
+
+// The COMPILE_ASSERT macro can be used to verify that a compile time
+// expression is true. For example, you could use it to verify the
+// size of a static array:
+//
+//   COMPILE_ASSERT(ARRAYSIZE_UNSAFE(content_type_names) == CONTENT_NUM_TYPES,
+//                  content_type_names_incorrect_size);
+//
+// or to make sure a struct is smaller than a certain size:
+//
+//   COMPILE_ASSERT(sizeof(foo) < 128, foo_too_large);
+//
+// The second argument to the macro is the name of the variable. If
+// the expression is false, most compilers will issue a warning/error
+// containing the name of the variable.
+
+#undef COMPILE_ASSERT
+#define COMPILE_ASSERT(expr, msg) static_assert(expr, #msg)
+
+// bit_cast<Dest,Source> is a template function that implements the
+// equivalent of "*reinterpret_cast<Dest*>(&source)".  We need this in
+// very low-level functions like the protobuf library and fast math
+// support.
+//
+//   float f = 3.14159265358979;
+//   int i = bit_cast<int32>(f);
+//   // i = 0x40490fdb
+//
+// The classical address-casting method is:
+//
+//   // WRONG
+//   float f = 3.14159265358979;            // WRONG
+//   int i = * reinterpret_cast<int*>(&f);  // WRONG
+//
+// The address-casting method actually produces undefined behavior
+// according to ISO C++ specification section 3.10 -15 -.  Roughly, this
+// section says: if an object in memory has one type, and a program
+// accesses it with a different type, then the result is undefined
+// behavior for most values of "different type".
+//
+// This is true for any cast syntax, either *(int*)&f or
+// *reinterpret_cast<int*>(&f).  And it is particularly true for
+// conversions between integral lvalues and floating-point lvalues.
+//
+// The purpose of 3.10 -15- is to allow optimizing compilers to assume
+// that expressions with different types refer to different memory.  gcc
+// 4.0.1 has an optimizer that takes advantage of this.  So a
+// non-conforming program quietly produces wildly incorrect output.
+//
+// The problem is not the use of reinterpret_cast.  The problem is type
+// punning: holding an object in memory of one type and reading its bits
+// back using a different type.
+//
+// The C++ standard is more subtle and complex than this, but that
+// is the basic idea.
+//
+// Anyways ...
+//
+// bit_cast<> calls memcpy() which is blessed by the standard,
+// especially by the example in section 3.9 .  Also, of course,
+// bit_cast<> wraps up the nasty logic in one place.
+//
+// Fortunately memcpy() is very fast.  In optimized mode, with a
+// constant size, gcc 2.95.3, gcc 4.0.1, and msvc 7.1 produce inline
+// code with the minimal amount of data movement.  On a 32-bit system,
+// memcpy(d,s,4) compiles to one load and one store, and memcpy(d,s,8)
+// compiles to two loads and two stores.
+//
+// I tested this code with gcc 2.95.3, gcc 4.0.1, icc 8.1, and msvc 7.1.
+//
+// WARNING: if Dest or Source is a non-POD type, the result of the memcpy
+// is likely to surprise you.
+
+template <class Dest, class Source>
+inline Dest bit_cast(const Source& source) {
+  COMPILE_ASSERT(sizeof(Dest) == sizeof(Source), VerifySizesAreEqual);
+
+  Dest dest;
+  memcpy(&dest, &source, sizeof(dest));
+  return dest;
+}
+
+// Used to explicitly mark the return value of a function as unused. If you are
+// really sure you don't want to do anything with the return value of a function
+// that has been marked WARN_UNUSED_RESULT, wrap it with this. Example:
+//
+//   scoped_ptr<MyType> my_var = ...;
+//   if (TakeOwnership(my_var.get()) == SUCCESS)
+//     ignore_result(my_var.release());
+//
+template<typename T>
+inline void ignore_result(const T&) {
+}
+
+// The following enum should be used only as a constructor argument to indicate
+// that the variable has static storage class, and that the constructor should
+// do nothing to its state.  It indicates to the reader that it is legal to
+// declare a static instance of the class, provided the constructor is given
+// the base::LINKER_INITIALIZED argument.  Normally, it is unsafe to declare a
+// static variable that has a constructor or a destructor because invocation
+// order is undefined.  However, IF the type can be initialized by filling with
+// zeroes (which the loader does for static variables), AND the destructor also
+// does nothing to the storage, AND there are no virtual methods, then a
+// constructor declared as
+//       explicit MyClass(base::LinkerInitialized x) {}
+// and invoked as
+//       static MyClass my_variable_name(base::LINKER_INITIALIZED);
+namespace base {
+enum LinkerInitialized { LINKER_INITIALIZED };
+
+// Use these to declare and define a static local variable (static T;) so that
+// it is leaked so that its destructors are not called at exit. If you need
+// thread-safe initialization, use base/lazy_instance.h instead.
+#define CR_DEFINE_STATIC_LOCAL(type, name, arguments) \
+  static type& name = *new type arguments /* NOLINT */
+
+}  // base
+
+#endif  // BASE_MACROS_H_
diff --git a/perfprofd/quipper/base/port.h b/perfprofd/quipper/base/port.h
new file mode 100644
index 0000000..58f4969
--- /dev/null
+++ b/perfprofd/quipper/base/port.h
@@ -0,0 +1,48 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_PORT_H_
+#define BASE_PORT_H_
+
+#include <stdarg.h>
+#include "quipper/build/build_config.h"
+
+// DEPRECATED: Use ...LL and ...ULL suffixes.
+// TODO(viettrungluu): Delete these. These are only here until |GG_(U)INT64_C|
+// are deleted (some other header files (re)define |GG_(U)INT64_C|, so our
+// definitions of them must exactly match theirs).
+#ifdef COMPILER_MSVC
+#define GG_LONGLONG(x) x##I64
+#define GG_ULONGLONG(x) x##UI64
+#else
+#define GG_LONGLONG(x) x##LL
+#define GG_ULONGLONG(x) x##ULL
+#endif
+
+// DEPRECATED: In Chromium, we force-define __STDC_CONSTANT_MACROS, so you can
+// just use the regular (U)INTn_C macros from <stdint.h>.
+// TODO(viettrungluu): Remove the remaining GG_(U)INTn_C macros.
+#define GG_INT64_C(x)   GG_LONGLONG(x)
+#define GG_UINT64_C(x)  GG_ULONGLONG(x)
+
+// It's possible for functions that use a va_list, such as StringPrintf, to
+// invalidate the data in it upon use.  The fix is to make a copy of the
+// structure before using it and use that copy instead.  va_copy is provided
+// for this purpose.  MSVC does not provide va_copy, so define an
+// implementation here.  It is not guaranteed that assignment is a copy, so the
+// StringUtil.VariableArgsFunc unit test tests this capability.
+#if defined(COMPILER_GCC)
+#define GG_VA_COPY(a, b) (va_copy(a, b))
+#elif defined(COMPILER_MSVC)
+#define GG_VA_COPY(a, b) (a = b)
+#endif
+
+// Define an OS-neutral wrapper for shared library entry points
+#if defined(OS_WIN)
+#define API_CALL __stdcall
+#else
+#define API_CALL
+#endif
+
+#endif  // BASE_PORT_H_
diff --git a/perfprofd/quipper/build/build_config.h b/perfprofd/quipper/build/build_config.h
new file mode 100644
index 0000000..5534846
--- /dev/null
+++ b/perfprofd/quipper/build/build_config.h
@@ -0,0 +1,159 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file adds defines about the platform we're currently building on.
+//  Operating System:
+//    OS_WIN / OS_MACOSX / OS_LINUX / OS_POSIX (MACOSX or LINUX) / OS_NACL
+//  Compiler:
+//    COMPILER_MSVC / COMPILER_GCC
+//  Processor:
+//    ARCH_CPU_X86 / ARCH_CPU_X86_64 / ARCH_CPU_X86_FAMILY (X86 or X86_64)
+//    ARCH_CPU_32_BITS / ARCH_CPU_64_BITS
+
+#ifndef BUILD_BUILD_CONFIG_H_
+#define BUILD_BUILD_CONFIG_H_
+
+// A set of macros to use for platform detection.
+#if defined(__native_client__)
+// __native_client__ must be first, so that other OS_ defines are not set.
+#define OS_NACL 1
+#elif defined(ANDROID)
+#define OS_ANDROID 1
+#elif defined(__APPLE__)
+// only include TargetConditions after testing ANDROID as some android builds
+// on mac don't have this header available and it's not needed unless the target
+// is really mac/ios.
+#include <TargetConditionals.h>
+#define OS_MACOSX 1
+#if defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE
+#define OS_IOS 1
+#endif  // defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE
+#elif defined(__linux__)
+#define OS_LINUX 1
+// include a system header to pull in features.h for glibc/uclibc macros.
+#include <unistd.h>
+#if defined(__GLIBC__) && !defined(__UCLIBC__)
+// we really are using glibc, not uClibc pretending to be glibc
+#define LIBC_GLIBC 1
+#endif
+#elif defined(_WIN32)
+#define OS_WIN 1
+#define TOOLKIT_VIEWS 1
+#elif defined(__FreeBSD__)
+#define OS_FREEBSD 1
+#elif defined(__OpenBSD__)
+#define OS_OPENBSD 1
+#elif defined(__sun)
+#define OS_SOLARIS 1
+#elif defined(__QNXNTO__)
+#define OS_QNX 1
+#else
+#error Please add support for your platform in build/build_config.h
+#endif
+
+#if defined(USE_OPENSSL) && defined(USE_NSS)
+#error Cannot use both OpenSSL and NSS
+#endif
+
+// For access to standard BSD features, use OS_BSD instead of a
+// more specific macro.
+#if defined(OS_FREEBSD) || defined(OS_OPENBSD)
+#define OS_BSD 1
+#endif
+
+// For access to standard POSIXish features, use OS_POSIX instead of a
+// more specific macro.
+#if defined(OS_MACOSX) || defined(OS_LINUX) || defined(OS_FREEBSD) ||     \
+    defined(OS_OPENBSD) || defined(OS_SOLARIS) || defined(OS_ANDROID) ||  \
+    defined(OS_NACL) || defined(OS_QNX)
+#define OS_POSIX 1
+#endif
+
+// Use tcmalloc
+#if (defined(OS_WIN) || defined(OS_LINUX) || defined(OS_ANDROID)) && \
+    !defined(NO_TCMALLOC)
+#define USE_TCMALLOC 1
+#endif
+
+// Compiler detection.
+#if defined(__GNUC__)
+#define COMPILER_GCC 1
+#elif defined(_MSC_VER)
+#define COMPILER_MSVC 1
+#else
+#error Please add support for your compiler in build/build_config.h
+#endif
+
+// Processor architecture detection.  For more info on what's defined, see:
+//   http://msdn.microsoft.com/en-us/library/b0084kay.aspx
+//   http://www.agner.org/optimize/calling_conventions.pdf
+//   or with gcc, run: "echo | gcc -E -dM -"
+#if defined(_M_X64) || defined(__x86_64__)
+#define ARCH_CPU_X86_FAMILY 1
+#define ARCH_CPU_X86_64 1
+#define ARCH_CPU_64_BITS 1
+#define ARCH_CPU_LITTLE_ENDIAN 1
+#elif defined(_M_IX86) || defined(__i386__)
+#define ARCH_CPU_X86_FAMILY 1
+#define ARCH_CPU_X86 1
+#define ARCH_CPU_32_BITS 1
+#define ARCH_CPU_LITTLE_ENDIAN 1
+#elif defined(__ARMEL__)
+#define ARCH_CPU_ARM_FAMILY 1
+#define ARCH_CPU_ARMEL 1
+#define ARCH_CPU_32_BITS 1
+#define ARCH_CPU_LITTLE_ENDIAN 1
+#elif defined(__aarch64__)
+#define ARCH_CPU_ARM_FAMILY 1
+#define ARCH_CPU_ARM64 1
+#define ARCH_CPU_64_BITS 1
+#define ARCH_CPU_LITTLE_ENDIAN 1
+#elif defined(__pnacl__)
+#define ARCH_CPU_32_BITS 1
+#define ARCH_CPU_LITTLE_ENDIAN 1
+#elif defined(__MIPSEL__)
+#if defined(__LP64__)
+#define ARCH_CPU_MIPS64_FAMILY 1
+#define ARCH_CPU_MIPS64EL 1
+#define ARCH_CPU_64_BITS 1
+#define ARCH_CPU_LITTLE_ENDIAN 1
+#else
+#define ARCH_CPU_MIPS_FAMILY 1
+#define ARCH_CPU_MIPSEL 1
+#define ARCH_CPU_32_BITS 1
+#define ARCH_CPU_LITTLE_ENDIAN 1
+#endif
+#else
+#error Please add support for your architecture in build/build_config.h
+#endif
+
+// Type detection for wchar_t.
+#if defined(OS_WIN)
+#define WCHAR_T_IS_UTF16
+#elif defined(OS_POSIX) && defined(COMPILER_GCC) && \
+    defined(__WCHAR_MAX__) && \
+    (__WCHAR_MAX__ == 0x7fffffff || __WCHAR_MAX__ == 0xffffffff)
+#define WCHAR_T_IS_UTF32
+#elif defined(OS_POSIX) && defined(COMPILER_GCC) && \
+    defined(__WCHAR_MAX__) && \
+    (__WCHAR_MAX__ == 0x7fff || __WCHAR_MAX__ == 0xffff)
+// On Posix, we'll detect short wchar_t, but projects aren't guaranteed to
+// compile in this mode (in particular, Chrome doesn't). This is intended for
+// other projects using base who manage their own dependencies and make sure
+// short wchar works for them.
+#define WCHAR_T_IS_UTF16
+#else
+#error Please add support for your compiler in build/build_config.h
+#endif
+
+#if defined(OS_ANDROID)
+// The compiler thinks std::string::const_iterator and "const char*" are
+// equivalent types.
+#define STD_STRING_ITERATOR_IS_CHAR_POINTER
+// The compiler thinks base::string16::const_iterator and "char16*" are
+// equivalent types.
+#define BASE_STRING16_ITERATOR_IS_CHAR16_POINTER
+#endif
+
+#endif  // BUILD_BUILD_CONFIG_H_
diff --git a/perfprofd/quipper/kernel-headers/tools/perf/perf.h b/perfprofd/quipper/kernel-headers/tools/perf/perf.h
new file mode 100644
index 0000000..e58da9a
--- /dev/null
+++ b/perfprofd/quipper/kernel-headers/tools/perf/perf.h
@@ -0,0 +1,196 @@
+/****************************************************************************
+ ****************************************************************************
+ ***
+ ***   This header was automatically generated from a Linux kernel header
+ ***   of the same name, to make information necessary for userspace to
+ ***   call into the kernel available to libc.  It contains only constants,
+ ***   structures, and macros generated from the original header, and thus,
+ ***   contains no copyrightable information.
+ ***
+ ***   To edit the content of this header, modify the corresponding
+ ***   source file (e.g. under external/kernel-headers/original/) then
+ ***   run bionic/libc/kernel/tools/update_all.py
+ ***
+ ***   Any manual change here will be lost the next time this script will
+ ***   be run. You've been warned!
+ ***
+ ****************************************************************************
+ ****************************************************************************/
+#ifndef _PERF_PERF_H
+#define _PERF_PERF_H
+#ifdef __i386__
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#define rmb() asm volatile("lock; addl $0,0(%%esp)" : : : "memory")
+#define cpu_relax() asm volatile("rep; nop" : : : "memory");
+#define CPUINFO_PROC "model name"
+#ifndef __NR_perf_event_open
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#define __NR_perf_event_open 336
+#endif
+#endif
+#ifdef __x86_64__
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#define rmb() asm volatile("lfence" : : : "memory")
+#define cpu_relax() asm volatile("rep; nop" : : : "memory");
+#define CPUINFO_PROC "model name"
+#ifndef __NR_perf_event_open
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#define __NR_perf_event_open 298
+#endif
+#endif
+#ifdef __powerpc__
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#define rmb() asm volatile("sync" : : : "memory")
+#define cpu_relax() asm volatile("" : : : "memory");
+#define CPUINFO_PROC "cpu"
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#endif
+#ifdef __s390__
+#define rmb() asm volatile("bcr 15,0" : : : "memory")
+#define cpu_relax() asm volatile("" : : : "memory");
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#endif
+#ifdef __sh__
+#if defined(__SH4A__) || defined(__SH5__)
+#define rmb() asm volatile("synco" : : : "memory")
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#else
+#define rmb() asm volatile("" : : : "memory")
+#endif
+#define cpu_relax() asm volatile("" : : : "memory")
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#define CPUINFO_PROC "cpu type"
+#endif
+#ifdef __hppa__
+#define rmb() asm volatile("" : : : "memory")
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#define cpu_relax() asm volatile("" : : : "memory");
+#define CPUINFO_PROC "cpu"
+#endif
+#ifdef __sparc__
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#define rmb() asm volatile("" : : : "memory")
+#define cpu_relax() asm volatile("" : : : "memory")
+#define CPUINFO_PROC "cpu"
+#endif
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#ifdef __alpha__
+#define rmb() asm volatile("mb" : : : "memory")
+#define cpu_relax() asm volatile("" : : : "memory")
+#define CPUINFO_PROC "cpu model"
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#endif
+#ifdef __ia64__
+#define rmb() asm volatile("mf" : : : "memory")
+#define cpu_relax() asm volatile("hint @pause" : : : "memory")
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#define CPUINFO_PROC "model name"
+#endif
+#ifdef __arm__
+#define rmb() ((void(*) (void)) 0xffff0fa0) ()
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#define cpu_relax() asm volatile("" : : : "memory")
+#define CPUINFO_PROC "Processor"
+#endif
+#ifdef __aarch64__
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#define rmb() asm volatile("dmb ld" : : : "memory")
+#define cpu_relax() asm volatile("yield" : : : "memory")
+#endif
+#ifdef __mips__
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#define rmb() asm volatile(".set	mips2\n\t" "sync\n\t" ".set	mips0" : : : "memory")
+#define cpu_relax() asm volatile("" : : : "memory")
+#define CPUINFO_PROC "cpu model"
+#endif
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#ifdef __arc__
+#define rmb() asm volatile("" : : : "memory")
+#define cpu_relax() rmb()
+#define CPUINFO_PROC "Processor"
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#endif
+#ifdef __metag__
+#define rmb() asm volatile("" : : : "memory")
+#define cpu_relax() asm volatile("" : : : "memory")
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#define CPUINFO_PROC "CPU"
+#endif
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#define PR_TASK_PERF_EVENTS_DISABLE 31
+#define PR_TASK_PERF_EVENTS_ENABLE 32
+#ifndef NSEC_PER_SEC
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#define NSEC_PER_SEC 1000000000ULL
+#endif
+#ifndef NSEC_PER_USEC
+#define NSEC_PER_USEC 1000ULL
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#endif
+#define __user
+#define asmlinkage
+#define unlikely(x) __builtin_expect(! ! (x), 0)
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#define min(x,y) ({ typeof(x) _min1 = (x); typeof(y) _min2 = (y); (void) (& _min1 == & _min2); _min1 < _min2 ? _min1 : _min2; })
+#define MAX_COUNTERS 256
+#define MAX_NR_CPUS 256
+struct ip_callchain {
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  u64 nr;
+  u64 ips[0];
+};
+struct branch_flags {
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  u64 mispred : 1;
+  u64 predicted : 1;
+  u64 reserved : 62;
+};
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+struct branch_entry {
+  u64 from;
+  u64 to;
+  struct branch_flags flags;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+};
+struct branch_stack {
+  u64 nr;
+  struct branch_entry entries[0];
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+};
+enum perf_call_graph_mode {
+  CALLCHAIN_NONE,
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  CALLCHAIN_FP,
+  CALLCHAIN_DWARF
+};
+struct perf_record_opts {
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  struct perf_target target;
+  int call_graph;
+  bool group;
+  bool inherit_stat;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  bool no_delay;
+  bool no_inherit;
+  bool no_samples;
+  bool pipe_output;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  bool raw_samples;
+  bool sample_address;
+  bool sample_weight;
+  bool sample_time;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  bool period;
+  unsigned int freq;
+  unsigned int mmap_pages;
+  unsigned int user_freq;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  u64 branch_stack;
+  u64 default_interval;
+  u64 user_interval;
+  u16 stack_dump_size;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+};
+#endif
+
diff --git a/perfprofd/quipper/kernel-headers/tools/perf/util/build-id.h b/perfprofd/quipper/kernel-headers/tools/perf/util/build-id.h
new file mode 100644
index 0000000..b7dbc16
--- /dev/null
+++ b/perfprofd/quipper/kernel-headers/tools/perf/util/build-id.h
@@ -0,0 +1,25 @@
+/****************************************************************************
+ ****************************************************************************
+ ***
+ ***   This header was automatically generated from a Linux kernel header
+ ***   of the same name, to make information necessary for userspace to
+ ***   call into the kernel available to libc.  It contains only constants,
+ ***   structures, and macros generated from the original header, and thus,
+ ***   contains no copyrightable information.
+ ***
+ ***   To edit the content of this header, modify the corresponding
+ ***   source file (e.g. under external/kernel-headers/original/) then
+ ***   run bionic/libc/kernel/tools/update_all.py
+ ***
+ ***   Any manual change here will be lost the next time this script will
+ ***   be run. You've been warned!
+ ***
+ ****************************************************************************
+ ****************************************************************************/
+#ifndef PERF_BUILD_ID_H_
+#define PERF_BUILD_ID_H_ 1
+#define BUILD_ID_SIZE 20
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+struct dso;
+#endif
+
diff --git a/perfprofd/quipper/kernel-headers/tools/perf/util/event.h b/perfprofd/quipper/kernel-headers/tools/perf/util/event.h
new file mode 100644
index 0000000..0028344
--- /dev/null
+++ b/perfprofd/quipper/kernel-headers/tools/perf/util/event.h
@@ -0,0 +1,204 @@
+/****************************************************************************
+ ****************************************************************************
+ ***
+ ***   This header was automatically generated from a Linux kernel header
+ ***   of the same name, to make information necessary for userspace to
+ ***   call into the kernel available to libc.  It contains only constants,
+ ***   structures, and macros generated from the original header, and thus,
+ ***   contains no copyrightable information.
+ ***
+ ***   To edit the content of this header, modify the corresponding
+ ***   source file (e.g. under external/kernel-headers/original/) then
+ ***   run bionic/libc/kernel/tools/update_all.py
+ ***
+ ***   Any manual change here will be lost the next time this script will
+ ***   be run. You've been warned!
+ ***
+ ****************************************************************************
+ ****************************************************************************/
+#ifndef __PERF_RECORD_H
+#define __PERF_RECORD_H
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+struct mmap_event {
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  struct perf_event_header header;
+  u32 pid, tid;
+  u64 start;
+  u64 len;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  u64 pgoff;
+  char filename[PATH_MAX];
+};
+struct mmap2_event {
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  struct perf_event_header header;
+  u32 pid, tid;
+  u64 start;
+  u64 len;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  u64 pgoff;
+  u32 maj;
+  u32 min;
+  u64 ino;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  u64 ino_generation;
+  char filename[PATH_MAX];
+};
+struct comm_event {
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  struct perf_event_header header;
+  u32 pid, tid;
+  char comm[16];
+};
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+struct fork_event {
+  struct perf_event_header header;
+  u32 pid, ppid;
+  u32 tid, ptid;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  u64 time;
+};
+struct lost_event {
+  struct perf_event_header header;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  u64 id;
+  u64 lost;
+};
+struct read_event {
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  struct perf_event_header header;
+  u32 pid, tid;
+  u64 value;
+  u64 time_enabled;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  u64 time_running;
+  u64 id;
+};
+#define PERF_SAMPLE_MASK (PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR | PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD | PERF_SAMPLE_IDENTIFIER)
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+struct sample_event {
+  struct perf_event_header header;
+  u64 array[];
+};
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+struct regs_dump {
+  u64 abi;
+  u64 * regs;
+};
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+struct stack_dump {
+  u16 offset;
+  u64 size;
+  char * data;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+};
+struct sample_read_value {
+  u64 value;
+  u64 id;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+};
+struct sample_read {
+  u64 time_enabled;
+  u64 time_running;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  union {
+    struct {
+      u64 nr;
+      struct sample_read_value * values;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+    } group;
+    struct sample_read_value one;
+  };
+};
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+struct perf_sample {
+  u64 ip;
+  u32 pid, tid;
+  u64 time;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  u64 addr;
+  u64 id;
+  u64 stream_id;
+  u64 period;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  u64 weight;
+  u32 cpu;
+  u32 raw_size;
+  u64 data_src;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  void * raw_data;
+  struct ip_callchain * callchain;
+  struct branch_stack * branch_stack;
+  struct regs_dump user_regs;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  struct stack_dump user_stack;
+  struct sample_read read;
+};
+#define PERF_MEM_DATA_SRC_NONE (PERF_MEM_S(OP, NA) | PERF_MEM_S(LVL, NA) | PERF_MEM_S(SNOOP, NA) | PERF_MEM_S(LOCK, NA) | PERF_MEM_S(TLB, NA))
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+struct build_id_event {
+  struct perf_event_header header;
+  pid_t pid;
+  u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  char filename[];
+};
+enum perf_user_event_type {
+  PERF_RECORD_USER_TYPE_START = 64,
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  PERF_RECORD_HEADER_ATTR = 64,
+  PERF_RECORD_HEADER_EVENT_TYPE = 65,
+  PERF_RECORD_HEADER_TRACING_DATA = 66,
+  PERF_RECORD_HEADER_BUILD_ID = 67,
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  PERF_RECORD_FINISHED_ROUND = 68,
+  PERF_RECORD_HEADER_MAX
+};
+struct attr_event {
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  struct perf_event_header header;
+  struct perf_event_attr attr;
+  u64 id[];
+};
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#define MAX_EVENT_NAME 64
+struct perf_trace_event_type {
+  u64 event_id;
+  char name[MAX_EVENT_NAME];
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+};
+struct event_type_event {
+  struct perf_event_header header;
+  struct perf_trace_event_type event_type;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+};
+struct tracing_data_event {
+  struct perf_event_header header;
+  u32 size;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+};
+union perf_event {
+  struct perf_event_header header;
+  struct mmap_event mmap;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  struct mmap2_event mmap2;
+  struct comm_event comm;
+  struct fork_event fork;
+  struct lost_event lost;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  struct read_event read;
+  struct sample_event sample;
+  struct attr_event attr;
+  struct event_type_event event_type;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  struct tracing_data_event tracing_data;
+  struct build_id_event build_id;
+};
+struct perf_tool;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+struct thread_map;
+typedef int(* perf_event__handler_t) (struct perf_tool * tool, union perf_event * event, struct perf_sample * sample, struct machine * machine);
+struct addr_location;
+#endif
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+
diff --git a/perfprofd/quipper/kernel-headers/tools/perf/util/header.h b/perfprofd/quipper/kernel-headers/tools/perf/util/header.h
new file mode 100644
index 0000000..3aab42f
--- /dev/null
+++ b/perfprofd/quipper/kernel-headers/tools/perf/util/header.h
@@ -0,0 +1,121 @@
+/****************************************************************************
+ ****************************************************************************
+ ***
+ ***   This header was automatically generated from a Linux kernel header
+ ***   of the same name, to make information necessary for userspace to
+ ***   call into the kernel available to libc.  It contains only constants,
+ ***   structures, and macros generated from the original header, and thus,
+ ***   contains no copyrightable information.
+ ***
+ ***   To edit the content of this header, modify the corresponding
+ ***   source file (e.g. under external/kernel-headers/original/) then
+ ***   run bionic/libc/kernel/tools/update_all.py
+ ***
+ ***   Any manual change here will be lost the next time this script will
+ ***   be run. You've been warned!
+ ***
+ ****************************************************************************
+ ****************************************************************************/
+#ifndef __PERF_HEADER_H
+#define __PERF_HEADER_H
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+enum {
+  HEADER_RESERVED = 0,
+  HEADER_FIRST_FEATURE = 1,
+  HEADER_TRACING_DATA = 1,
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  HEADER_BUILD_ID,
+  HEADER_HOSTNAME,
+  HEADER_OSRELEASE,
+  HEADER_VERSION,
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  HEADER_ARCH,
+  HEADER_NRCPUS,
+  HEADER_CPUDESC,
+  HEADER_CPUID,
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  HEADER_TOTAL_MEM,
+  HEADER_CMDLINE,
+  HEADER_EVENT_DESC,
+  HEADER_CPU_TOPOLOGY,
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  HEADER_NUMA_TOPOLOGY,
+  HEADER_BRANCH_STACK,
+  HEADER_PMU_MAPPINGS,
+  HEADER_GROUP_DESC,
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  HEADER_LAST_FEATURE,
+  HEADER_FEAT_BITS = 256,
+};
+enum perf_header_version {
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  PERF_HEADER_VERSION_1,
+  PERF_HEADER_VERSION_2,
+};
+struct perf_file_section {
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  u64 offset;
+  u64 size;
+};
+struct perf_file_header {
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  u64 magic;
+  u64 size;
+  u64 attr_size;
+  struct perf_file_section attrs;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  struct perf_file_section data;
+  struct perf_file_section event_types;
+  DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
+};
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+struct perf_pipe_file_header {
+  u64 magic;
+  u64 size;
+};
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+struct perf_header;
+struct perf_session_env {
+  char * hostname;
+  char * os_release;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  char * version;
+  char * arch;
+  int nr_cpus_online;
+  int nr_cpus_avail;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  char * cpu_desc;
+  char * cpuid;
+  unsigned long long total_mem;
+  int nr_cmdline;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  char * cmdline;
+  int nr_sibling_cores;
+  char * sibling_cores;
+  int nr_sibling_threads;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  char * sibling_threads;
+  int nr_numa_nodes;
+  char * numa_nodes;
+  int nr_pmu_mappings;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  char * pmu_mappings;
+  int nr_groups;
+};
+struct perf_header {
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  enum perf_header_version version;
+  bool needs_swap;
+  u64 data_offset;
+  u64 data_size;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  u64 feat_offset;
+  DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
+  struct perf_session_env env;
+};
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+struct perf_evlist;
+struct perf_session;
+#endif
+
diff --git a/perfprofd/quipper/kernel-headers/tools/perf/util/include/linux/bitops.h b/perfprofd/quipper/kernel-headers/tools/perf/util/include/linux/bitops.h
new file mode 100644
index 0000000..c6c4768
--- /dev/null
+++ b/perfprofd/quipper/kernel-headers/tools/perf/util/include/linux/bitops.h
@@ -0,0 +1,41 @@
+/****************************************************************************
+ ****************************************************************************
+ ***
+ ***   This header was automatically generated from a Linux kernel header
+ ***   of the same name, to make information necessary for userspace to
+ ***   call into the kernel available to libc.  It contains only constants,
+ ***   structures, and macros generated from the original header, and thus,
+ ***   contains no copyrightable information.
+ ***
+ ***   To edit the content of this header, modify the corresponding
+ ***   source file (e.g. under external/kernel-headers/original/) then
+ ***   run bionic/libc/kernel/tools/update_all.py
+ ***
+ ***   Any manual change here will be lost the next time this script will
+ ***   be run. You've been warned!
+ ***
+ ****************************************************************************
+ ****************************************************************************/
+#ifndef _PERF_LINUX_BITOPS_H_
+#define _PERF_LINUX_BITOPS_H_
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#ifndef __WORDSIZE
+#define __WORDSIZE (__SIZEOF_LONG__ * 8)
+#endif
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#define BITS_PER_LONG __WORDSIZE
+#define BITS_PER_BYTE 8
+#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
+#define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32))
+#define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE)
+#define for_each_set_bit(bit,addr,size) for((bit) = find_first_bit((addr), (size)); (bit) < (size); (bit) = find_next_bit((addr), (size), (bit) + 1))
+#define for_each_set_bit_from(bit,addr,size) for((bit) = find_next_bit((addr), (size), (bit)); (bit) < (size); (bit) = find_next_bit((addr), (size), (bit) + 1))
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
+#if BITS_PER_LONG == 64
+#endif
+#endif
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+
diff --git a/perfprofd/quipper/kernel-headers/tools/perf/util/include/linux/kernel/kernel.h b/perfprofd/quipper/kernel-headers/tools/perf/util/include/linux/kernel/kernel.h
new file mode 100644
index 0000000..d589c85
--- /dev/null
+++ b/perfprofd/quipper/kernel-headers/tools/perf/util/include/linux/kernel/kernel.h
@@ -0,0 +1,79 @@
+/****************************************************************************
+ ****************************************************************************
+ ***
+ ***   This header was automatically generated from a Linux kernel header
+ ***   of the same name, to make information necessary for userspace to
+ ***   call into the kernel available to libc.  It contains only constants,
+ ***   structures, and macros generated from the original header, and thus,
+ ***   contains no copyrightable information.
+ ***
+ ***   To edit the content of this header, modify the corresponding
+ ***   source file (e.g. under external/kernel-headers/original/) then
+ ***   run bionic/libc/kernel/tools/update_all.py
+ ***
+ ***   Any manual change here will be lost the next time this script will
+ ***   be run. You've been warned!
+ ***
+ ****************************************************************************
+ ****************************************************************************/
+#ifndef PERF_LINUX_KERNEL_H_
+#define PERF_LINUX_KERNEL_H_
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+#define PERF_ALIGN(x,a) __PERF_ALIGN_MASK(x, (typeof(x)) (a) - 1)
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#define __PERF_ALIGN_MASK(x,mask) (((x) + (mask)) & ~(mask))
+#ifndef offsetof
+#define offsetof(TYPE,MEMBER) ((size_t) & ((TYPE *) 0)->MEMBER)
+#endif
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#ifndef container_of
+#define container_of(ptr,type,member) ({ const typeof(((type *) 0)->member) * __mptr = (ptr); (type *) ((char *) __mptr - offsetof(type, member)); })
+#endif
+#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int : - ! ! (e); }))
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#ifndef max
+#define max(x,y) ({ typeof(x) _max1 = (x); typeof(y) _max2 = (y); (void) (& _max1 == & _max2); _max1 > _max2 ? _max1 : _max2; })
+#endif
+#ifndef min
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#define min(x,y) ({ typeof(x) _min1 = (x); typeof(y) _min2 = (y); (void) (& _min1 == & _min2); _min1 < _min2 ? _min1 : _min2; })
+#endif
+#ifndef roundup
+#define roundup(x,y) (\
+{ const typeof(y) __y = y; (((x) + (__y - 1)) / __y) * __y; \
+} \
+)
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#endif
+#ifndef BUG_ON
+#ifdef NDEBUG
+#define BUG_ON(cond) do { if(cond) { } } while(0)
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#else
+#define BUG_ON(cond) assert(! (cond))
+#endif
+#endif
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#define cpu_to_le64(x) (x)
+#define cpu_to_le32(x) (x)
+#ifndef pr_fmt
+#define pr_fmt(fmt) fmt
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#endif
+#define pr_err(fmt,...) eprintf(0, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warning(fmt,...) eprintf(0, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_info(fmt,...) eprintf(0, pr_fmt(fmt), ##__VA_ARGS__)
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#define pr_debug(fmt,...) eprintf(1, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debugN(n,fmt,...) eprintf(n, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debug2(fmt,...) pr_debugN(2, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debug3(fmt,...) pr_debugN(3, pr_fmt(fmt), ##__VA_ARGS__)
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#define pr_debug4(fmt,...) pr_debugN(4, pr_fmt(fmt), ##__VA_ARGS__)
+#define __round_mask(x,y) ((__typeof__(x)) ((y) - 1))
+#define round_up(x,y) ((((x) - 1) | __round_mask(x, y)) + 1)
+#define round_down(x,y) ((x) & ~__round_mask(x, y))
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#endif
+
diff --git a/perfprofd/quipper/kernel-headers/tools/perf/util/include/linux/types.h b/perfprofd/quipper/kernel-headers/tools/perf/util/include/linux/types.h
new file mode 100644
index 0000000..9f13906
--- /dev/null
+++ b/perfprofd/quipper/kernel-headers/tools/perf/util/include/linux/types.h
@@ -0,0 +1,43 @@
+/****************************************************************************
+ ****************************************************************************
+ ***
+ ***   This header was automatically generated from a Linux kernel header
+ ***   of the same name, to make information necessary for userspace to
+ ***   call into the kernel available to libc.  It contains only constants,
+ ***   structures, and macros generated from the original header, and thus,
+ ***   contains no copyrightable information.
+ ***
+ ***   To edit the content of this header, modify the corresponding
+ ***   source file (e.g. under external/kernel-headers/original/) then
+ ***   run bionic/libc/kernel/tools/update_all.py
+ ***
+ ***   Any manual change here will be lost the next time this script will
+ ***   be run. You've been warned!
+ ***
+ ****************************************************************************
+ ****************************************************************************/
+#ifndef _PERF_LINUX_TYPES_H_
+#define _PERF_LINUX_TYPES_H_
+#ifndef __bitwise
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#define __bitwise
+#endif
+#ifndef __le32
+typedef __u32 __bitwise __le32;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#endif
+#define DECLARE_BITMAP(name,bits) unsigned long (name)[BITS_TO_LONGS(bits)]
+struct list_head {
+  struct list_head * next, * prev;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+};
+struct hlist_head {
+  struct hlist_node * first;
+};
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+struct hlist_node {
+  struct hlist_node * next, * * pprev;
+};
+#endif
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+
diff --git a/perfprofd/quipper/kernel-headers/tools/perf/util/target.h b/perfprofd/quipper/kernel-headers/tools/perf/util/target.h
new file mode 100644
index 0000000..e6c3d94
--- /dev/null
+++ b/perfprofd/quipper/kernel-headers/tools/perf/util/target.h
@@ -0,0 +1,52 @@
+/****************************************************************************
+ ****************************************************************************
+ ***
+ ***   This header was automatically generated from a Linux kernel header
+ ***   of the same name, to make information necessary for userspace to
+ ***   call into the kernel available to libc.  It contains only constants,
+ ***   structures, and macros generated from the original header, and thus,
+ ***   contains no copyrightable information.
+ ***
+ ***   To edit the content of this header, modify the corresponding
+ ***   source file (e.g. under external/kernel-headers/original/) then
+ ***   run bionic/libc/kernel/tools/update_all.py
+ ***
+ ***   Any manual change here will be lost the next time this script will
+ ***   be run. You've been warned!
+ ***
+ ****************************************************************************
+ ****************************************************************************/
+#ifndef _PERF_TARGET_H
+#define _PERF_TARGET_H
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+struct perf_target {
+  const char * pid;
+  const char * tid;
+  const char * cpu_list;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  const char * uid_str;
+  uid_t uid;
+  bool system_wide;
+  bool uses_mmap;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+};
+enum perf_target_errno {
+  PERF_ERRNO_TARGET__SUCCESS = 0,
+  __PERF_ERRNO_TARGET__START = - 10000,
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  PERF_ERRNO_TARGET__PID_OVERRIDE_CPU = __PERF_ERRNO_TARGET__START,
+  PERF_ERRNO_TARGET__PID_OVERRIDE_UID,
+  PERF_ERRNO_TARGET__UID_OVERRIDE_CPU,
+  PERF_ERRNO_TARGET__PID_OVERRIDE_SYSTEM,
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  PERF_ERRNO_TARGET__UID_OVERRIDE_SYSTEM,
+  PERF_ERRNO_TARGET__INVALID_UID,
+  PERF_ERRNO_TARGET__USER_NOT_FOUND,
+  __PERF_ERRNO_TARGET__END,
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+};
+enum perf_target_errno perf_target__validate(struct perf_target * target);
+enum perf_target_errno perf_target__parse_uid(struct perf_target * target);
+#endif
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+
diff --git a/perfprofd/quipper/kernel-headers/tools/perf/util/types.h b/perfprofd/quipper/kernel-headers/tools/perf/util/types.h
new file mode 100644
index 0000000..cf36814
--- /dev/null
+++ b/perfprofd/quipper/kernel-headers/tools/perf/util/types.h
@@ -0,0 +1,38 @@
+/****************************************************************************
+ ****************************************************************************
+ ***
+ ***   This header was automatically generated from a Linux kernel header
+ ***   of the same name, to make information necessary for userspace to
+ ***   call into the kernel available to libc.  It contains only constants,
+ ***   structures, and macros generated from the original header, and thus,
+ ***   contains no copyrightable information.
+ ***
+ ***   To edit the content of this header, modify the corresponding
+ ***   source file (e.g. under external/kernel-headers/original/) then
+ ***   run bionic/libc/kernel/tools/update_all.py
+ ***
+ ***   Any manual change here will be lost the next time this script will
+ ***   be run. You've been warned!
+ ***
+ ****************************************************************************
+ ****************************************************************************/
+#ifndef __PERF_TYPES_H
+#define __PERF_TYPES_H
+typedef uint64_t u64;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+typedef int64_t s64;
+typedef unsigned int u32;
+typedef signed int s32;
+typedef unsigned short u16;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+typedef signed short s16;
+typedef unsigned char u8;
+typedef signed char s8;
+union u64_swap {
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+  u64 val64;
+  u32 val32[2];
+};
+#endif
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+
diff --git a/perfprofd/quipper/original-kernel-headers/tools/perf/perf.h b/perfprofd/quipper/original-kernel-headers/tools/perf/perf.h
new file mode 100644
index 0000000..cf20187
--- /dev/null
+++ b/perfprofd/quipper/original-kernel-headers/tools/perf/perf.h
@@ -0,0 +1,236 @@
+#ifndef _PERF_PERF_H
+#define _PERF_PERF_H
+
+#include <asm/unistd.h>
+
+#if defined(__i386__)
+#define rmb()		asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
+#define cpu_relax()	asm volatile("rep; nop" ::: "memory");
+#define CPUINFO_PROC	"model name"
+#ifndef __NR_perf_event_open
+# define __NR_perf_event_open 336
+#endif
+#endif
+
+#if defined(__x86_64__)
+#define rmb()		asm volatile("lfence" ::: "memory")
+#define cpu_relax()	asm volatile("rep; nop" ::: "memory");
+#define CPUINFO_PROC	"model name"
+#ifndef __NR_perf_event_open
+# define __NR_perf_event_open 298
+#endif
+#endif
+
+#ifdef __powerpc__
+#include "../../arch/powerpc/include/uapi/asm/unistd.h"
+#define rmb()		asm volatile ("sync" ::: "memory")
+#define cpu_relax()	asm volatile ("" ::: "memory");
+#define CPUINFO_PROC	"cpu"
+#endif
+
+#ifdef __s390__
+#define rmb()		asm volatile("bcr 15,0" ::: "memory")
+#define cpu_relax()	asm volatile("" ::: "memory");
+#endif
+
+#ifdef __sh__
+#if defined(__SH4A__) || defined(__SH5__)
+# define rmb()		asm volatile("synco" ::: "memory")
+#else
+# define rmb()		asm volatile("" ::: "memory")
+#endif
+#define cpu_relax()	asm volatile("" ::: "memory")
+#define CPUINFO_PROC	"cpu type"
+#endif
+
+#ifdef __hppa__
+#define rmb()		asm volatile("" ::: "memory")
+#define cpu_relax()	asm volatile("" ::: "memory");
+#define CPUINFO_PROC	"cpu"
+#endif
+
+#ifdef __sparc__
+#define rmb()		asm volatile("":::"memory")
+#define cpu_relax()	asm volatile("":::"memory")
+#define CPUINFO_PROC	"cpu"
+#endif
+
+#ifdef __alpha__
+#define rmb()		asm volatile("mb" ::: "memory")
+#define cpu_relax()	asm volatile("" ::: "memory")
+#define CPUINFO_PROC	"cpu model"
+#endif
+
+#ifdef __ia64__
+#define rmb()		asm volatile ("mf" ::: "memory")
+#define cpu_relax()	asm volatile ("hint @pause" ::: "memory")
+#define CPUINFO_PROC	"model name"
+#endif
+
+#ifdef __arm__
+/*
+ * Use the __kuser_memory_barrier helper in the CPU helper page. See
+ * arch/arm/kernel/entry-armv.S in the kernel source for details.
+ */
+#define rmb()		((void(*)(void))0xffff0fa0)()
+#define cpu_relax()	asm volatile("":::"memory")
+#define CPUINFO_PROC	"Processor"
+#endif
+
+#ifdef __aarch64__
+#define rmb()		asm volatile("dmb ld" ::: "memory")
+#define cpu_relax()	asm volatile("yield" ::: "memory")
+#endif
+
+#ifdef __mips__
+#define rmb()		asm volatile(					\
+				".set	mips2\n\t"			\
+				"sync\n\t"				\
+				".set	mips0"				\
+				: /* no output */			\
+				: /* no input */			\
+				: "memory")
+#define cpu_relax()	asm volatile("" ::: "memory")
+#define CPUINFO_PROC	"cpu model"
+#endif
+
+#ifdef __arc__
+#define rmb()		asm volatile("" ::: "memory")
+#define cpu_relax()	rmb()
+#define CPUINFO_PROC	"Processor"
+#endif
+
+#ifdef __metag__
+#define rmb()		asm volatile("" ::: "memory")
+#define cpu_relax()	asm volatile("" ::: "memory")
+#define CPUINFO_PROC	"CPU"
+#endif
+
+#include <time.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/syscall.h>
+
+#include <linux/perf_event.h>
+#include "util/types.h"
+#include <stdbool.h>
+
+/*
+ * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all
+ * counters in the current task.
+ */
+#define PR_TASK_PERF_EVENTS_DISABLE   31
+#define PR_TASK_PERF_EVENTS_ENABLE    32
+
+#ifndef NSEC_PER_SEC
+# define NSEC_PER_SEC			1000000000ULL
+#endif
+#ifndef NSEC_PER_USEC
+# define NSEC_PER_USEC			1000ULL
+#endif
+
+static inline unsigned long long rdclock(void)
+{
+	struct timespec ts;
+
+	clock_gettime(CLOCK_MONOTONIC, &ts);
+	return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
+}
+
+/*
+ * Pick up some kernel type conventions:
+ */
+#define __user
+#define asmlinkage
+
+#define unlikely(x)	__builtin_expect(!!(x), 0)
+#define min(x, y) ({				\
+	typeof(x) _min1 = (x);			\
+	typeof(y) _min2 = (y);			\
+	(void) (&_min1 == &_min2);		\
+	_min1 < _min2 ? _min1 : _min2; })
+
+extern bool test_attr__enabled;
+void test_attr__init(void);
+void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu,
+		     int fd, int group_fd, unsigned long flags);
+
+static inline int
+sys_perf_event_open(struct perf_event_attr *attr,
+		      pid_t pid, int cpu, int group_fd,
+		      unsigned long flags)
+{
+	int fd;
+
+	fd = syscall(__NR_perf_event_open, attr, pid, cpu,
+		     group_fd, flags);
+
+	if (unlikely(test_attr__enabled))
+		test_attr__open(attr, pid, cpu, fd, group_fd, flags);
+
+	return fd;
+}
+
+#define MAX_COUNTERS			256
+#define MAX_NR_CPUS			256
+
+struct ip_callchain {
+	u64 nr;
+	u64 ips[0];
+};
+
+struct branch_flags {
+	u64 mispred:1;
+	u64 predicted:1;
+	u64 reserved:62;
+};
+
+struct branch_entry {
+	u64				from;
+	u64				to;
+	struct branch_flags flags;
+};
+
+struct branch_stack {
+	u64				nr;
+	struct branch_entry	entries[0];
+};
+
+extern const char *input_name;
+extern bool perf_host, perf_guest;
+extern const char perf_version_string[];
+
+void pthread__unblock_sigwinch(void);
+
+#include "util/target.h"
+
+enum perf_call_graph_mode {
+	CALLCHAIN_NONE,
+	CALLCHAIN_FP,
+	CALLCHAIN_DWARF
+};
+
+struct perf_record_opts {
+	struct perf_target target;
+	int	     call_graph;
+	bool	     group;
+	bool	     inherit_stat;
+	bool	     no_delay;
+	bool	     no_inherit;
+	bool	     no_samples;
+	bool	     pipe_output;
+	bool	     raw_samples;
+	bool	     sample_address;
+	bool	     sample_weight;
+	bool	     sample_time;
+	bool	     period;
+	unsigned int freq;
+	unsigned int mmap_pages;
+	unsigned int user_freq;
+	u64          branch_stack;
+	u64	     default_interval;
+	u64	     user_interval;
+	u16	     stack_dump_size;
+};
+
+#endif
diff --git a/perfprofd/quipper/original-kernel-headers/tools/perf/util/build-id.h b/perfprofd/quipper/original-kernel-headers/tools/perf/util/build-id.h
new file mode 100644
index 0000000..a811f5c
--- /dev/null
+++ b/perfprofd/quipper/original-kernel-headers/tools/perf/util/build-id.h
@@ -0,0 +1,19 @@
+#ifndef PERF_BUILD_ID_H_
+#define PERF_BUILD_ID_H_ 1
+
+#define BUILD_ID_SIZE 20
+
+#include "tool.h"
+#include "types.h"
+
+extern struct perf_tool build_id__mark_dso_hit_ops;
+struct dso;
+
+int build_id__sprintf(const u8 *build_id, int len, char *bf);
+char *dso__build_id_filename(struct dso *self, char *bf, size_t size);
+
+int build_id__mark_dso_hit(struct perf_tool *tool, union perf_event *event,
+			   struct perf_sample *sample, struct perf_evsel *evsel,
+			   struct machine *machine);
+
+#endif
diff --git a/perfprofd/quipper/original-kernel-headers/tools/perf/util/event.h b/perfprofd/quipper/original-kernel-headers/tools/perf/util/event.h
new file mode 100644
index 0000000..c67ecc4
--- /dev/null
+++ b/perfprofd/quipper/original-kernel-headers/tools/perf/util/event.h
@@ -0,0 +1,263 @@
+#ifndef __PERF_RECORD_H
+#define __PERF_RECORD_H
+
+#include <limits.h>
+#include <stdio.h>
+
+#include "../perf.h"
+#include "map.h"
+#include "build-id.h"
+
+struct mmap_event {
+	struct perf_event_header header;
+	u32 pid, tid;
+	u64 start;
+	u64 len;
+	u64 pgoff;
+	char filename[PATH_MAX];
+};
+
+struct mmap2_event {
+	struct perf_event_header header;
+	u32 pid, tid;
+	u64 start;
+	u64 len;
+	u64 pgoff;
+	u32 maj;
+	u32 min;
+	u64 ino;
+	u64 ino_generation;
+	char filename[PATH_MAX];
+};
+
+struct comm_event {
+	struct perf_event_header header;
+	u32 pid, tid;
+	char comm[16];
+};
+
+struct fork_event {
+	struct perf_event_header header;
+	u32 pid, ppid;
+	u32 tid, ptid;
+	u64 time;
+};
+
+struct lost_event {
+	struct perf_event_header header;
+	u64 id;
+	u64 lost;
+};
+
+/*
+ * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID
+ */
+struct read_event {
+	struct perf_event_header header;
+	u32 pid, tid;
+	u64 value;
+	u64 time_enabled;
+	u64 time_running;
+	u64 id;
+};
+
+
+#define PERF_SAMPLE_MASK				\
+	(PERF_SAMPLE_IP | PERF_SAMPLE_TID |		\
+	 PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR |		\
+	PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID |	\
+	 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD |		\
+	 PERF_SAMPLE_IDENTIFIER)
+
+struct sample_event {
+	struct perf_event_header        header;
+	u64 array[];
+};
+
+struct regs_dump {
+	u64 abi;
+	u64 *regs;
+};
+
+struct stack_dump {
+	u16 offset;
+	u64 size;
+	char *data;
+};
+
+struct sample_read_value {
+	u64 value;
+	u64 id;
+};
+
+struct sample_read {
+	u64 time_enabled;
+	u64 time_running;
+	union {
+		struct {
+			u64 nr;
+			struct sample_read_value *values;
+		} group;
+		struct sample_read_value one;
+	};
+};
+
+struct perf_sample {
+	u64 ip;
+	u32 pid, tid;
+	u64 time;
+	u64 addr;
+	u64 id;
+	u64 stream_id;
+	u64 period;
+	u64 weight;
+	u32 cpu;
+	u32 raw_size;
+	u64 data_src;
+	void *raw_data;
+	struct ip_callchain *callchain;
+	struct branch_stack *branch_stack;
+	struct regs_dump  user_regs;
+	struct stack_dump user_stack;
+	struct sample_read read;
+};
+
+#define PERF_MEM_DATA_SRC_NONE \
+	(PERF_MEM_S(OP, NA) |\
+	 PERF_MEM_S(LVL, NA) |\
+	 PERF_MEM_S(SNOOP, NA) |\
+	 PERF_MEM_S(LOCK, NA) |\
+	 PERF_MEM_S(TLB, NA))
+
+struct build_id_event {
+	struct perf_event_header header;
+	pid_t			 pid;
+	u8			 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
+	char			 filename[];
+};
+
+enum perf_user_event_type { /* above any possible kernel type */
+	PERF_RECORD_USER_TYPE_START		= 64,
+	PERF_RECORD_HEADER_ATTR			= 64,
+	PERF_RECORD_HEADER_EVENT_TYPE		= 65, /* depreceated */
+	PERF_RECORD_HEADER_TRACING_DATA		= 66,
+	PERF_RECORD_HEADER_BUILD_ID		= 67,
+	PERF_RECORD_FINISHED_ROUND		= 68,
+	PERF_RECORD_HEADER_MAX
+};
+
+struct attr_event {
+	struct perf_event_header header;
+	struct perf_event_attr attr;
+	u64 id[];
+};
+
+#define MAX_EVENT_NAME 64
+
+struct perf_trace_event_type {
+	u64	event_id;
+	char	name[MAX_EVENT_NAME];
+};
+
+struct event_type_event {
+	struct perf_event_header header;
+	struct perf_trace_event_type event_type;
+};
+
+struct tracing_data_event {
+	struct perf_event_header header;
+	u32 size;
+};
+
+union perf_event {
+	struct perf_event_header	header;
+	struct mmap_event		mmap;
+	struct mmap2_event		mmap2;
+	struct comm_event		comm;
+	struct fork_event		fork;
+	struct lost_event		lost;
+	struct read_event		read;
+	struct sample_event		sample;
+	struct attr_event		attr;
+	struct event_type_event		event_type;
+	struct tracing_data_event	tracing_data;
+	struct build_id_event		build_id;
+};
+
+void perf_event__print_totals(void);
+
+struct perf_tool;
+struct thread_map;
+
+typedef int (*perf_event__handler_t)(struct perf_tool *tool,
+				     union perf_event *event,
+				     struct perf_sample *sample,
+				     struct machine *machine);
+
+int perf_event__synthesize_thread_map(struct perf_tool *tool,
+				      struct thread_map *threads,
+				      perf_event__handler_t process,
+				      struct machine *machine);
+int perf_event__synthesize_threads(struct perf_tool *tool,
+				   perf_event__handler_t process,
+				   struct machine *machine);
+int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
+				       perf_event__handler_t process,
+				       struct machine *machine,
+				       const char *symbol_name);
+
+int perf_event__synthesize_modules(struct perf_tool *tool,
+				   perf_event__handler_t process,
+				   struct machine *machine);
+
+int perf_event__process_comm(struct perf_tool *tool,
+			     union perf_event *event,
+			     struct perf_sample *sample,
+			     struct machine *machine);
+int perf_event__process_lost(struct perf_tool *tool,
+			     union perf_event *event,
+			     struct perf_sample *sample,
+			     struct machine *machine);
+int perf_event__process_mmap(struct perf_tool *tool,
+			     union perf_event *event,
+			     struct perf_sample *sample,
+			     struct machine *machine);
+int perf_event__process_mmap2(struct perf_tool *tool,
+			     union perf_event *event,
+			     struct perf_sample *sample,
+			     struct machine *machine);
+int perf_event__process_fork(struct perf_tool *tool,
+			     union perf_event *event,
+			     struct perf_sample *sample,
+			     struct machine *machine);
+int perf_event__process_exit(struct perf_tool *tool,
+			     union perf_event *event,
+			     struct perf_sample *sample,
+			     struct machine *machine);
+int perf_event__process(struct perf_tool *tool,
+			union perf_event *event,
+			struct perf_sample *sample,
+			struct machine *machine);
+
+struct addr_location;
+int perf_event__preprocess_sample(const union perf_event *self,
+				  struct machine *machine,
+				  struct addr_location *al,
+				  struct perf_sample *sample);
+
+const char *perf_event__name(unsigned int id);
+
+size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
+				     u64 sample_regs_user, u64 read_format);
+int perf_event__synthesize_sample(union perf_event *event, u64 type,
+				  u64 sample_regs_user, u64 read_format,
+				  const struct perf_sample *sample,
+				  bool swapped);
+
+size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_task(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf(union perf_event *event, FILE *fp);
+
+#endif /* __PERF_RECORD_H */
diff --git a/perfprofd/quipper/original-kernel-headers/tools/perf/util/header.h b/perfprofd/quipper/original-kernel-headers/tools/perf/util/header.h
new file mode 100644
index 0000000..307c9ae
--- /dev/null
+++ b/perfprofd/quipper/original-kernel-headers/tools/perf/util/header.h
@@ -0,0 +1,159 @@
+#ifndef __PERF_HEADER_H
+#define __PERF_HEADER_H
+
+#include <linux/perf_event.h>
+#include <sys/types.h>
+#include <stdbool.h>
+#include "types.h"
+#include "event.h"
+
+#include <linux/bitmap.h>
+
+enum {
+	HEADER_RESERVED		= 0,	/* always cleared */
+	HEADER_FIRST_FEATURE	= 1,
+	HEADER_TRACING_DATA	= 1,
+	HEADER_BUILD_ID,
+
+	HEADER_HOSTNAME,
+	HEADER_OSRELEASE,
+	HEADER_VERSION,
+	HEADER_ARCH,
+	HEADER_NRCPUS,
+	HEADER_CPUDESC,
+	HEADER_CPUID,
+	HEADER_TOTAL_MEM,
+	HEADER_CMDLINE,
+	HEADER_EVENT_DESC,
+	HEADER_CPU_TOPOLOGY,
+	HEADER_NUMA_TOPOLOGY,
+	HEADER_BRANCH_STACK,
+	HEADER_PMU_MAPPINGS,
+	HEADER_GROUP_DESC,
+	HEADER_LAST_FEATURE,
+	HEADER_FEAT_BITS	= 256,
+};
+
+enum perf_header_version {
+	PERF_HEADER_VERSION_1,
+	PERF_HEADER_VERSION_2,
+};
+
+struct perf_file_section {
+	u64 offset;
+	u64 size;
+};
+
+struct perf_file_header {
+	u64				magic;
+	u64				size;
+	u64				attr_size;
+	struct perf_file_section	attrs;
+	struct perf_file_section	data;
+	/* event_types is ignored */
+	struct perf_file_section	event_types;
+	DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
+};
+
+struct perf_pipe_file_header {
+	u64				magic;
+	u64				size;
+};
+
+struct perf_header;
+
+int perf_file_header__read(struct perf_file_header *header,
+			   struct perf_header *ph, int fd);
+
+struct perf_session_env {
+	char			*hostname;
+	char			*os_release;
+	char			*version;
+	char			*arch;
+	int			nr_cpus_online;
+	int			nr_cpus_avail;
+	char			*cpu_desc;
+	char			*cpuid;
+	unsigned long long	total_mem;
+
+	int			nr_cmdline;
+	char			*cmdline;
+	int			nr_sibling_cores;
+	char			*sibling_cores;
+	int			nr_sibling_threads;
+	char			*sibling_threads;
+	int			nr_numa_nodes;
+	char			*numa_nodes;
+	int			nr_pmu_mappings;
+	char			*pmu_mappings;
+	int			nr_groups;
+};
+
+struct perf_header {
+	enum perf_header_version	version;
+	bool				needs_swap;
+	u64				data_offset;
+	u64				data_size;
+	u64				feat_offset;
+	DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
+	struct perf_session_env 	env;
+};
+
+struct perf_evlist;
+struct perf_session;
+
+int perf_session__read_header(struct perf_session *session);
+int perf_session__write_header(struct perf_session *session,
+			       struct perf_evlist *evlist,
+			       int fd, bool at_exit);
+int perf_header__write_pipe(int fd);
+
+void perf_header__set_feat(struct perf_header *header, int feat);
+void perf_header__clear_feat(struct perf_header *header, int feat);
+bool perf_header__has_feat(const struct perf_header *header, int feat);
+
+int perf_header__set_cmdline(int argc, const char **argv);
+
+int perf_header__process_sections(struct perf_header *header, int fd,
+				  void *data,
+				  int (*process)(struct perf_file_section *section,
+				  struct perf_header *ph,
+				  int feat, int fd, void *data));
+
+int perf_header__fprintf_info(struct perf_session *s, FILE *fp, bool full);
+
+int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
+			  const char *name, bool is_kallsyms, bool is_vdso);
+int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir);
+
+int perf_event__synthesize_attr(struct perf_tool *tool,
+				struct perf_event_attr *attr, u32 ids, u64 *id,
+				perf_event__handler_t process);
+int perf_event__synthesize_attrs(struct perf_tool *tool,
+				 struct perf_session *session,
+				 perf_event__handler_t process);
+int perf_event__process_attr(struct perf_tool *tool, union perf_event *event,
+			     struct perf_evlist **pevlist);
+
+int perf_event__synthesize_tracing_data(struct perf_tool *tool,
+					int fd, struct perf_evlist *evlist,
+					perf_event__handler_t process);
+int perf_event__process_tracing_data(struct perf_tool *tool,
+				     union perf_event *event,
+				     struct perf_session *session);
+
+int perf_event__synthesize_build_id(struct perf_tool *tool,
+				    struct dso *pos, u16 misc,
+				    perf_event__handler_t process,
+				    struct machine *machine);
+int perf_event__process_build_id(struct perf_tool *tool,
+				 union perf_event *event,
+				 struct perf_session *session);
+bool is_perf_magic(u64 magic);
+
+/*
+ * arch specific callback
+ */
+int get_cpuid(char *buffer, size_t sz);
+
+#endif /* __PERF_HEADER_H */
diff --git a/perfprofd/quipper/original-kernel-headers/tools/perf/util/include/linux/bitops.h b/perfprofd/quipper/original-kernel-headers/tools/perf/util/include/linux/bitops.h
new file mode 100644
index 0000000..45cf10a
--- /dev/null
+++ b/perfprofd/quipper/original-kernel-headers/tools/perf/util/include/linux/bitops.h
@@ -0,0 +1,158 @@
+#ifndef _PERF_LINUX_BITOPS_H_
+#define _PERF_LINUX_BITOPS_H_
+
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <asm/hweight.h>
+
+#ifndef __WORDSIZE
+#define __WORDSIZE (__SIZEOF_LONG__ * 8)
+#endif
+
+#define BITS_PER_LONG __WORDSIZE
+#define BITS_PER_BYTE           8
+#define BITS_TO_LONGS(nr)       DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
+#define BITS_TO_U64(nr)         DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
+#define BITS_TO_U32(nr)         DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32))
+#define BITS_TO_BYTES(nr)       DIV_ROUND_UP(nr, BITS_PER_BYTE)
+
+#define for_each_set_bit(bit, addr, size) \
+	for ((bit) = find_first_bit((addr), (size));		\
+	     (bit) < (size);					\
+	     (bit) = find_next_bit((addr), (size), (bit) + 1))
+
+/* same as for_each_set_bit() but use bit as value to start with */
+#define for_each_set_bit_from(bit, addr, size) \
+	for ((bit) = find_next_bit((addr), (size), (bit));	\
+	     (bit) < (size);					\
+	     (bit) = find_next_bit((addr), (size), (bit) + 1))
+
+static inline void set_bit(int nr, unsigned long *addr)
+{
+	addr[nr / BITS_PER_LONG] |= 1UL << (nr % BITS_PER_LONG);
+}
+
+static inline void clear_bit(int nr, unsigned long *addr)
+{
+	addr[nr / BITS_PER_LONG] &= ~(1UL << (nr % BITS_PER_LONG));
+}
+
+static __always_inline int test_bit(unsigned int nr, const unsigned long *addr)
+{
+	return ((1UL << (nr % BITS_PER_LONG)) &
+		(((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
+}
+
+static inline unsigned long hweight_long(unsigned long w)
+{
+	return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
+}
+
+#define BITOP_WORD(nr)		((nr) / BITS_PER_LONG)
+
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static __always_inline unsigned long __ffs(unsigned long word)
+{
+	int num = 0;
+
+#if BITS_PER_LONG == 64
+	if ((word & 0xffffffff) == 0) {
+		num += 32;
+		word >>= 32;
+	}
+#endif
+	if ((word & 0xffff) == 0) {
+		num += 16;
+		word >>= 16;
+	}
+	if ((word & 0xff) == 0) {
+		num += 8;
+		word >>= 8;
+	}
+	if ((word & 0xf) == 0) {
+		num += 4;
+		word >>= 4;
+	}
+	if ((word & 0x3) == 0) {
+		num += 2;
+		word >>= 2;
+	}
+	if ((word & 0x1) == 0)
+		num += 1;
+	return num;
+}
+
+/*
+ * Find the first set bit in a memory region.
+ */
+static inline unsigned long
+find_first_bit(const unsigned long *addr, unsigned long size)
+{
+	const unsigned long *p = addr;
+	unsigned long result = 0;
+	unsigned long tmp;
+
+	while (size & ~(BITS_PER_LONG-1)) {
+		if ((tmp = *(p++)))
+			goto found;
+		result += BITS_PER_LONG;
+		size -= BITS_PER_LONG;
+	}
+	if (!size)
+		return result;
+
+	tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
+	if (tmp == 0UL)		/* Are any bits set? */
+		return result + size;	/* Nope. */
+found:
+	return result + __ffs(tmp);
+}
+
+/*
+ * Find the next set bit in a memory region.
+ */
+static inline unsigned long
+find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
+{
+	const unsigned long *p = addr + BITOP_WORD(offset);
+	unsigned long result = offset & ~(BITS_PER_LONG-1);
+	unsigned long tmp;
+
+	if (offset >= size)
+		return size;
+	size -= result;
+	offset %= BITS_PER_LONG;
+	if (offset) {
+		tmp = *(p++);
+		tmp &= (~0UL << offset);
+		if (size < BITS_PER_LONG)
+			goto found_first;
+		if (tmp)
+			goto found_middle;
+		size -= BITS_PER_LONG;
+		result += BITS_PER_LONG;
+	}
+	while (size & ~(BITS_PER_LONG-1)) {
+		if ((tmp = *(p++)))
+			goto found_middle;
+		result += BITS_PER_LONG;
+		size -= BITS_PER_LONG;
+	}
+	if (!size)
+		return result;
+	tmp = *p;
+
+found_first:
+	tmp &= (~0UL >> (BITS_PER_LONG - size));
+	if (tmp == 0UL)		/* Are any bits set? */
+		return result + size;	/* Nope. */
+found_middle:
+	return result + __ffs(tmp);
+}
+
+#endif
diff --git a/perfprofd/quipper/original-kernel-headers/tools/perf/util/include/linux/kernel/kernel.h b/perfprofd/quipper/original-kernel-headers/tools/perf/util/include/linux/kernel/kernel.h
new file mode 100644
index 0000000..d8c927c
--- /dev/null
+++ b/perfprofd/quipper/original-kernel-headers/tools/perf/util/include/linux/kernel/kernel.h
@@ -0,0 +1,134 @@
+#ifndef PERF_LINUX_KERNEL_H_
+#define PERF_LINUX_KERNEL_H_
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+
+#define PERF_ALIGN(x, a)	__PERF_ALIGN_MASK(x, (typeof(x))(a)-1)
+#define __PERF_ALIGN_MASK(x, mask)	(((x)+(mask))&~(mask))
+
+#ifndef offsetof
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+#endif
+
+#ifndef container_of
+/**
+ * container_of - cast a member of a structure out to the containing structure
+ * @ptr:	the pointer to the member.
+ * @type:	the type of the container struct this is embedded in.
+ * @member:	the name of the member within the struct.
+ *
+ */
+#define container_of(ptr, type, member) ({			\
+	const typeof(((type *)0)->member) * __mptr = (ptr);	\
+	(type *)((char *)__mptr - offsetof(type, member)); })
+#endif
+
+#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
+
+#ifndef max
+#define max(x, y) ({				\
+	typeof(x) _max1 = (x);			\
+	typeof(y) _max2 = (y);			\
+	(void) (&_max1 == &_max2);		\
+	_max1 > _max2 ? _max1 : _max2; })
+#endif
+
+#ifndef min
+#define min(x, y) ({				\
+	typeof(x) _min1 = (x);			\
+	typeof(y) _min2 = (y);			\
+	(void) (&_min1 == &_min2);		\
+	_min1 < _min2 ? _min1 : _min2; })
+#endif
+
+#ifndef roundup
+#define roundup(x, y) (                                \
+{                                                      \
+	const typeof(y) __y = y;		       \
+	(((x) + (__y - 1)) / __y) * __y;	       \
+}                                                      \
+)
+#endif
+
+#ifndef BUG_ON
+#ifdef NDEBUG
+#define BUG_ON(cond) do { if (cond) {} } while (0)
+#else
+#define BUG_ON(cond) assert(!(cond))
+#endif
+#endif
+
+/*
+ * Both need more care to handle endianness
+ * (Don't use bitmap_copy_le() for now)
+ */
+#define cpu_to_le64(x)	(x)
+#define cpu_to_le32(x)	(x)
+
+static inline int
+vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
+{
+	int i;
+	ssize_t ssize = size;
+
+	i = vsnprintf(buf, size, fmt, args);
+
+	return (i >= ssize) ? (ssize - 1) : i;
+}
+
+static inline int scnprintf(char * buf, size_t size, const char * fmt, ...)
+{
+	va_list args;
+	ssize_t ssize = size;
+	int i;
+
+	va_start(args, fmt);
+	i = vsnprintf(buf, size, fmt, args);
+	va_end(args);
+
+	return (i >= ssize) ? (ssize - 1) : i;
+}
+
+static inline unsigned long
+simple_strtoul(const char *nptr, char **endptr, int base)
+{
+	return strtoul(nptr, endptr, base);
+}
+
+int eprintf(int level,
+	    const char *fmt, ...) __attribute__((format(printf, 2, 3)));
+
+#ifndef pr_fmt
+#define pr_fmt(fmt) fmt
+#endif
+
+#define pr_err(fmt, ...) \
+	eprintf(0, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warning(fmt, ...) \
+	eprintf(0, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_info(fmt, ...) \
+	eprintf(0, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debug(fmt, ...) \
+	eprintf(1, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debugN(n, fmt, ...) \
+	eprintf(n, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debug2(fmt, ...) pr_debugN(2, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debug3(fmt, ...) pr_debugN(3, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debug4(fmt, ...) pr_debugN(4, pr_fmt(fmt), ##__VA_ARGS__)
+
+/*
+ * This looks more complex than it should be. But we need to
+ * get the type for the ~ right in round_down (it needs to be
+ * as wide as the result!), and we want to evaluate the macro
+ * arguments just once each.
+ */
+#define __round_mask(x, y) ((__typeof__(x))((y)-1))
+#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
+#define round_down(x, y) ((x) & ~__round_mask(x, y))
+
+#endif
diff --git a/perfprofd/quipper/original-kernel-headers/tools/perf/util/include/linux/types.h b/perfprofd/quipper/original-kernel-headers/tools/perf/util/include/linux/types.h
new file mode 100644
index 0000000..eb46478
--- /dev/null
+++ b/perfprofd/quipper/original-kernel-headers/tools/perf/util/include/linux/types.h
@@ -0,0 +1,29 @@
+#ifndef _PERF_LINUX_TYPES_H_
+#define _PERF_LINUX_TYPES_H_
+
+#include <asm/types.h>
+
+#ifndef __bitwise
+#define __bitwise
+#endif
+
+#ifndef __le32
+typedef __u32 __bitwise __le32;
+#endif
+
+#define DECLARE_BITMAP(name,bits) \
+	unsigned long name[BITS_TO_LONGS(bits)]
+
+struct list_head {
+	struct list_head *next, *prev;
+};
+
+struct hlist_head {
+	struct hlist_node *first;
+};
+
+struct hlist_node {
+	struct hlist_node *next, **pprev;
+};
+
+#endif
diff --git a/perfprofd/quipper/original-kernel-headers/tools/perf/util/target.h b/perfprofd/quipper/original-kernel-headers/tools/perf/util/target.h
new file mode 100644
index 0000000..a4be857
--- /dev/null
+++ b/perfprofd/quipper/original-kernel-headers/tools/perf/util/target.h
@@ -0,0 +1,65 @@
+#ifndef _PERF_TARGET_H
+#define _PERF_TARGET_H
+
+#include <stdbool.h>
+#include <sys/types.h>
+
+struct perf_target {
+	const char   *pid;
+	const char   *tid;
+	const char   *cpu_list;
+	const char   *uid_str;
+	uid_t	     uid;
+	bool	     system_wide;
+	bool	     uses_mmap;
+};
+
+enum perf_target_errno {
+	PERF_ERRNO_TARGET__SUCCESS		= 0,
+
+	/*
+	 * Choose an arbitrary negative big number not to clash with standard
+	 * errno since SUS requires the errno has distinct positive values.
+	 * See 'Issue 6' in the link below.
+	 *
+	 * http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html
+	 */
+	__PERF_ERRNO_TARGET__START		= -10000,
+
+
+	/* for perf_target__validate() */
+	PERF_ERRNO_TARGET__PID_OVERRIDE_CPU	= __PERF_ERRNO_TARGET__START,
+	PERF_ERRNO_TARGET__PID_OVERRIDE_UID,
+	PERF_ERRNO_TARGET__UID_OVERRIDE_CPU,
+	PERF_ERRNO_TARGET__PID_OVERRIDE_SYSTEM,
+	PERF_ERRNO_TARGET__UID_OVERRIDE_SYSTEM,
+
+	/* for perf_target__parse_uid() */
+	PERF_ERRNO_TARGET__INVALID_UID,
+	PERF_ERRNO_TARGET__USER_NOT_FOUND,
+
+	__PERF_ERRNO_TARGET__END,
+};
+
+enum perf_target_errno perf_target__validate(struct perf_target *target);
+enum perf_target_errno perf_target__parse_uid(struct perf_target *target);
+
+int perf_target__strerror(struct perf_target *target, int errnum, char *buf,
+			  size_t buflen);
+
+static inline bool perf_target__has_task(struct perf_target *target)
+{
+	return target->tid || target->pid || target->uid_str;
+}
+
+static inline bool perf_target__has_cpu(struct perf_target *target)
+{
+	return target->system_wide || target->cpu_list;
+}
+
+static inline bool perf_target__none(struct perf_target *target)
+{
+	return !perf_target__has_task(target) && !perf_target__has_cpu(target);
+}
+
+#endif /* _PERF_TARGET_H */
diff --git a/perfprofd/quipper/original-kernel-headers/tools/perf/util/types.h b/perfprofd/quipper/original-kernel-headers/tools/perf/util/types.h
new file mode 100644
index 0000000..c51fa6b
--- /dev/null
+++ b/perfprofd/quipper/original-kernel-headers/tools/perf/util/types.h
@@ -0,0 +1,24 @@
+#ifndef __PERF_TYPES_H
+#define __PERF_TYPES_H
+
+#include <stdint.h>
+
+/*
+ * We define u64 as uint64_t for every architecture
+ * so that we can print it with "%"PRIx64 without getting warnings.
+ */
+typedef uint64_t	   u64;
+typedef int64_t		   s64;
+typedef unsigned int	   u32;
+typedef signed int	   s32;
+typedef unsigned short	   u16;
+typedef signed short	   s16;
+typedef unsigned char	   u8;
+typedef signed char	   s8;
+
+union u64_swap {
+	u64 val64;
+	u32 val32[2];
+};
+
+#endif /* __PERF_TYPES_H */
diff --git a/perfprofd/quipper/perf_internals.h b/perfprofd/quipper/perf_internals.h
new file mode 100644
index 0000000..a779d3c
--- /dev/null
+++ b/perfprofd/quipper/perf_internals.h
@@ -0,0 +1,77 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#ifndef PERF_INTERNALS_H
+#define PERF_INTERNALS_H
+
+#include <linux/perf_event.h>
+#include "kernel-headers/tools/perf/util/types.h"
+#include "kernel-headers/tools/perf/util/include/linux/bitops.h"
+#include "kernel-headers/tools/perf/util/include/linux/types.h"
+#include "kernel-headers/tools/perf/util/build-id.h"
+#include "kernel-headers/tools/perf/util/include/linux/kernel/kernel.h"
+#include "kernel-headers/tools/perf/util/header.h"
+#include "kernel-headers/tools/perf/util/event.h"
+#include "kernel-headers/tools/perf/util/target.h"
+#include "kernel-headers/tools/perf/perf.h"
+
+// The first 64 bits of the perf header, used as a perf data file ID tag.
+const uint64_t kPerfMagic = 0x32454c4946524550LL;  // "PERFILE2" little-endian
+
+#undef max
+#undef min
+
+//
+// Wrapper class to manage creation/deletion of storage associated
+// with perf_sample structs.
+//
+class PerfSampleCustodian {
+ public:
+  explicit PerfSampleCustodian(struct perf_sample& sample)
+      : sample_(sample) {
+    sample.raw_data = NULL;
+    sample.callchain = NULL;
+    sample.branch_stack = NULL;
+  }
+  ~PerfSampleCustodian() {
+    if (sample_.callchain)
+      delete [] sample_.callchain;
+    if (sample_.branch_stack)
+          delete [] sample_.branch_stack;
+    if (sample_.branch_stack)
+      delete [] reinterpret_cast<char*>(sample_.raw_data);
+  }
+ private:
+  struct perf_sample& sample_;
+};
+
+typedef perf_event event_t;
+
+//
+// Custom / user-specific records emitted by simpleperf.
+// These need to be kept in sync with the simpleperf sources.
+//
+enum simpleperf_record_type {
+  SIMPLE_PERF_RECORD_TYPE_START = 32768,
+  SIMPLE_PERF_RECORD_KERNEL_SYMBOL,
+  SIMPLE_PERF_RECORD_DSO,
+  SIMPLE_PERF_RECORD_SYMBOL,
+  SIMPLE_PERF_RECORD_SPLIT,
+  SIMPLE_PERF_RECORD_SPLIT_END,
+};
+
+#endif
diff --git a/perfprofd/quipper/perf_parser.cc b/perfprofd/quipper/perf_parser.cc
new file mode 100644
index 0000000..c9ec189
--- /dev/null
+++ b/perfprofd/quipper/perf_parser.cc
@@ -0,0 +1,584 @@
+// Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "perf_parser.h"
+
+#include <algorithm>
+#include <cstdio>
+#include <set>
+
+#include "base/logging.h"
+
+#include "address_mapper.h"
+#include "quipper_string.h"
+#include "perf_utils.h"
+
+namespace quipper {
+
+namespace {
+
+struct EventAndTime {
+  ParsedEvent* event;
+  uint64_t time;
+};
+
+// Returns true if |e1| has an earlier timestamp than |e2|.  The args are const
+// pointers instead of references because of the way this function is used when
+// calling std::stable_sort.
+bool CompareParsedEventTimes(const std::unique_ptr<EventAndTime>& e1,
+                             const std::unique_ptr<EventAndTime>& e2) {
+  return (e1->time < e2->time);
+}
+
+// Kernel MMAP entry pid appears as -1
+const uint32_t kKernelPid = UINT32_MAX;
+
+// Name and ID of the kernel swapper process.
+const char kSwapperCommandName[] = "swapper";
+const uint32_t kSwapperPid = 0;
+
+bool IsNullBranchStackEntry(const struct branch_entry& entry) {
+  return (!entry.from && !entry.to);
+}
+
+}  // namespace
+
+PerfParser::PerfParser()
+    : kernel_mapper_(new AddressMapper)
+{}
+
+PerfParser::~PerfParser() {}
+
+PerfParser::PerfParser(const PerfParser::Options& options) {
+  options_ = options;
+}
+
+void PerfParser::set_options(const PerfParser::Options& options) {
+  options_ = options;
+}
+
+bool PerfParser::ParseRawEvents() {
+  process_mappers_.clear();
+  parsed_events_.resize(events_.size());
+  for (size_t i = 0; i < events_.size(); ++i) {
+    ParsedEvent& parsed_event = parsed_events_[i];
+    parsed_event.raw_event = events_[i].get();
+  }
+  MaybeSortParsedEvents();
+  if (!ProcessEvents()) {
+    return false;
+  }
+
+  if (!options_.discard_unused_events)
+    return true;
+
+  // Some MMAP/MMAP2 events' mapped regions will not have any samples. These
+  // MMAP/MMAP2 events should be dropped. |parsed_events_| should be
+  // reconstructed without these events.
+  size_t write_index = 0;
+  size_t read_index;
+  for (read_index = 0; read_index < parsed_events_.size(); ++read_index) {
+    const ParsedEvent& event = parsed_events_[read_index];
+    if ((event.raw_event->header.type == PERF_RECORD_MMAP ||
+         event.raw_event->header.type == PERF_RECORD_MMAP2) &&
+        event.num_samples_in_mmap_region == 0) {
+      continue;
+    }
+    if (read_index != write_index)
+      parsed_events_[write_index] = event;
+    ++write_index;
+  }
+  CHECK_LE(write_index, parsed_events_.size());
+  parsed_events_.resize(write_index);
+
+  // Now regenerate the sorted event list again.  These are pointers to events
+  // so they must be regenerated after a resize() of the ParsedEvent vector.
+  MaybeSortParsedEvents();
+
+  return true;
+}
+
+void PerfParser::MaybeSortParsedEvents() {
+  if (!(sample_type_ & PERF_SAMPLE_TIME)) {
+    parsed_events_sorted_by_time_.resize(parsed_events_.size());
+    for (size_t i = 0; i < parsed_events_.size(); ++i) {
+      parsed_events_sorted_by_time_[i] = &parsed_events_[i];
+    }
+    return;
+  }
+  std::vector<std::unique_ptr<EventAndTime>> events_and_times;
+  events_and_times.resize(parsed_events_.size());
+  for (size_t i = 0; i < parsed_events_.size(); ++i) {
+    std::unique_ptr<EventAndTime> event_and_time(new EventAndTime);
+
+    // Store the timestamp and event pointer in an array.
+    event_and_time->event = &parsed_events_[i];
+
+    struct perf_sample sample_info;
+    PerfSampleCustodian custodian(sample_info);
+    CHECK(ReadPerfSampleInfo(*parsed_events_[i].raw_event, &sample_info));
+    event_and_time->time = sample_info.time;
+
+    events_and_times[i] = std::move(event_and_time);
+  }
+  // Sort the events based on timestamp, and then populate the sorted event
+  // vector in sorted order.
+  std::stable_sort(events_and_times.begin(), events_and_times.end(),
+                   CompareParsedEventTimes);
+
+  parsed_events_sorted_by_time_.resize(events_and_times.size());
+  for (unsigned int i = 0; i < events_and_times.size(); ++i) {
+    parsed_events_sorted_by_time_[i] = events_and_times[i]->event;
+  }
+}
+
+bool PerfParser::ProcessEvents() {
+  memset(&stats_, 0, sizeof(stats_));
+
+  stats_.did_remap = false;   // Explicitly clear the remap flag.
+
+  // Pid 0 is called the swapper process. Even though perf does not record a
+  // COMM event for pid 0, we act like we did receive a COMM event for it. Perf
+  // does this itself, example:
+  //   http://lxr.free-electrons.com/source/tools/perf/util/session.c#L1120
+  commands_.insert(kSwapperCommandName);
+  pidtid_to_comm_map_[std::make_pair(kSwapperPid, kSwapperPid)] =
+      &(*commands_.find(kSwapperCommandName));
+
+  // NB: Not necessarily actually sorted by time.
+  for (unsigned int i = 0; i < parsed_events_sorted_by_time_.size(); ++i) {
+    ParsedEvent& parsed_event = *parsed_events_sorted_by_time_[i];
+    event_t& event = *parsed_event.raw_event;
+    switch (event.header.type) {
+      case PERF_RECORD_SAMPLE:
+        // SAMPLE doesn't have any fields to log at a fixed,
+        // previously-endian-swapped location. This used to log ip.
+        VLOG(1) << "SAMPLE";
+        ++stats_.num_sample_events;
+
+        if (MapSampleEvent(&parsed_event)) {
+          ++stats_.num_sample_events_mapped;
+        }
+        break;
+      case PERF_RECORD_MMAP: {
+        VLOG(1) << "MMAP: " << event.mmap.filename;
+        ++stats_.num_mmap_events;
+        // Use the array index of the current mmap event as a unique identifier.
+        CHECK(MapMmapEvent(&event.mmap, i)) << "Unable to map MMAP event!";
+        // No samples in this MMAP region yet, hopefully.
+        parsed_event.num_samples_in_mmap_region = 0;
+        DSOInfo dso_info;
+        // TODO(sque): Add Build ID as well.
+        dso_info.name = event.mmap.filename;
+        dso_set_.insert(dso_info);
+        break;
+      }
+      case PERF_RECORD_MMAP2: {
+        VLOG(1) << "MMAP2: " << event.mmap2.filename;
+        ++stats_.num_mmap_events;
+        // Use the array index of the current mmap event as a unique identifier.
+        CHECK(MapMmapEvent(&event.mmap2, i)) << "Unable to map MMAP2 event!";
+        // No samples in this MMAP region yet, hopefully.
+        parsed_event.num_samples_in_mmap_region = 0;
+        DSOInfo dso_info;
+        // TODO(sque): Add Build ID as well.
+        dso_info.name = event.mmap2.filename;
+        dso_set_.insert(dso_info);
+        break;
+      }
+      case PERF_RECORD_FORK:
+        VLOG(1) << "FORK: " << event.fork.ppid << ":" << event.fork.ptid
+                << " -> " << event.fork.pid << ":" << event.fork.tid;
+        ++stats_.num_fork_events;
+        CHECK(MapForkEvent(event.fork)) << "Unable to map FORK event!";
+        break;
+      case PERF_RECORD_EXIT:
+        // EXIT events have the same structure as FORK events.
+        VLOG(1) << "EXIT: " << event.fork.ppid << ":" << event.fork.ptid;
+        ++stats_.num_exit_events;
+        break;
+      case PERF_RECORD_COMM:
+        VLOG(1) << "COMM: " << event.comm.pid << ":" << event.comm.tid << ": "
+                << event.comm.comm;
+        ++stats_.num_comm_events;
+        CHECK(MapCommEvent(event.comm));
+        commands_.insert(event.comm.comm);
+        pidtid_to_comm_map_[std::make_pair(event.comm.pid, event.comm.tid)] =
+            &(*commands_.find(event.comm.comm));
+        break;
+      case PERF_RECORD_LOST:
+      case PERF_RECORD_THROTTLE:
+      case PERF_RECORD_UNTHROTTLE:
+      case PERF_RECORD_READ:
+      case PERF_RECORD_MAX:
+        VLOG(1) << "Parsed event type: " << event.header.type
+                << ". Doing nothing.";
+        break;
+    case SIMPLE_PERF_RECORD_KERNEL_SYMBOL:
+    case SIMPLE_PERF_RECORD_DSO:
+    case SIMPLE_PERF_RECORD_SYMBOL:
+    case SIMPLE_PERF_RECORD_SPLIT:
+    case SIMPLE_PERF_RECORD_SPLIT_END:
+      break;
+      default:
+        LOG(ERROR) << "Unknown event type: " << event.header.type;
+        return false;
+    }
+  }
+
+  // Print stats collected from parsing.
+  DLOG(INFO) << "Parser processed: "
+            << stats_.num_mmap_events << " MMAP/MMAP2 events, "
+            << stats_.num_comm_events << " COMM events, "
+            << stats_.num_fork_events << " FORK events, "
+            << stats_.num_exit_events << " EXIT events, "
+            << stats_.num_sample_events << " SAMPLE events, "
+            << stats_.num_sample_events_mapped << " of these were mapped";
+
+  float sample_mapping_percentage =
+      static_cast<float>(stats_.num_sample_events_mapped) /
+      stats_.num_sample_events * 100.;
+  float threshold = options_.sample_mapping_percentage_threshold;
+  if (sample_mapping_percentage < threshold) {
+    LOG(WARNING) << "Mapped " << static_cast<int>(sample_mapping_percentage)
+               << "% of samples, expected at least "
+               << static_cast<int>(threshold) << "%";
+    return false;
+  }
+  stats_.did_remap = options_.do_remap;
+  return true;
+}
+
+bool PerfParser::MapSampleEvent(ParsedEvent* parsed_event) {
+  bool mapping_failed = false;
+
+  // Find the associated command.
+  if (!(sample_type_ & PERF_SAMPLE_IP && sample_type_ & PERF_SAMPLE_TID))
+    return false;
+  perf_sample sample_info;
+  PerfSampleCustodian custodian(sample_info);
+  if (!ReadPerfSampleInfo(*parsed_event->raw_event, &sample_info))
+    return false;
+  PidTid pidtid = std::make_pair(sample_info.pid, sample_info.tid);
+  const auto comm_iter = pidtid_to_comm_map_.find(pidtid);
+  if (comm_iter != pidtid_to_comm_map_.end()) {
+    parsed_event->set_command(comm_iter->second);
+  }
+
+  const uint64_t unmapped_event_ip = sample_info.ip;
+
+  // Map the event IP itself.
+  if (!MapIPAndPidAndGetNameAndOffset(sample_info.ip,
+                                      sample_info.pid,
+                                      &sample_info.ip,
+                                      &parsed_event->dso_and_offset)) {
+    mapping_failed = true;
+  }
+
+  if (sample_info.callchain &&
+      !MapCallchain(sample_info.ip,
+                    sample_info.pid,
+                    unmapped_event_ip,
+                    sample_info.callchain,
+                    parsed_event)) {
+    mapping_failed = true;
+  }
+
+  if (sample_info.branch_stack &&
+      !MapBranchStack(sample_info.pid,
+                      sample_info.branch_stack,
+                      parsed_event)) {
+    mapping_failed = true;
+  }
+
+  if (options_.do_remap) {
+    // Write the remapped data back to the raw event regardless of
+    // whether it was entirely successfully remapped.  A single failed
+    // remap should not invalidate all the other remapped entries.
+    if (!WritePerfSampleInfo(sample_info, parsed_event->raw_event)) {
+      LOG(ERROR) << "Failed to write back remapped sample info.";
+      return false;
+    }
+  }
+
+  return !mapping_failed;
+}
+
+bool PerfParser::MapCallchain(const uint64_t ip,
+                              const uint32_t pid,
+                              const uint64_t original_event_addr,
+                              struct ip_callchain* callchain,
+                              ParsedEvent* parsed_event) {
+  if (!callchain) {
+    LOG(ERROR) << "NULL call stack data.";
+    return false;
+  }
+
+  bool mapping_failed = false;
+
+  // If the callchain's length is 0, there is no work to do.
+  if (callchain->nr == 0)
+    return true;
+
+  // Keeps track of whether the current entry is kernel or user.
+  parsed_event->callchain.resize(callchain->nr);
+  int num_entries_mapped = 0;
+  for (unsigned int j = 0; j < callchain->nr; ++j) {
+    uint64_t entry = callchain->ips[j];
+    // When a callchain context entry is found, do not attempt to symbolize it.
+    if (entry >= PERF_CONTEXT_MAX) {
+      continue;
+    }
+    // The sample address has already been mapped so no need to map it.
+    if (entry == original_event_addr) {
+      callchain->ips[j] = ip;
+      continue;
+    }
+    if (!MapIPAndPidAndGetNameAndOffset(
+            entry,
+            pid,
+            &callchain->ips[j],
+            &parsed_event->callchain[num_entries_mapped++])) {
+      mapping_failed = true;
+    }
+  }
+  // Not all the entries were mapped.  Trim |parsed_event->callchain| to
+  // remove unused entries at the end.
+  parsed_event->callchain.resize(num_entries_mapped);
+
+  return !mapping_failed;
+}
+
+bool PerfParser::MapBranchStack(const uint32_t pid,
+                                struct branch_stack* branch_stack,
+                                ParsedEvent* parsed_event) {
+  if (!branch_stack) {
+    LOG(ERROR) << "NULL branch stack data.";
+    return false;
+  }
+
+  // First, trim the branch stack to remove trailing null entries.
+  size_t trimmed_size = 0;
+  for (size_t i = 0; i < branch_stack->nr; ++i) {
+    // Count the number of non-null entries before the first null entry.
+    if (IsNullBranchStackEntry(branch_stack->entries[i])) {
+      break;
+    }
+    ++trimmed_size;
+  }
+
+  // If a null entry was found, make sure all subsequent null entries are NULL
+  // as well.
+  for (size_t i = trimmed_size; i < branch_stack->nr; ++i) {
+    const struct branch_entry& entry = branch_stack->entries[i];
+    if (!IsNullBranchStackEntry(entry)) {
+      LOG(ERROR) << "Non-null branch stack entry found after null entry: "
+                 << reinterpret_cast<void*>(entry.from) << " -> "
+                 << reinterpret_cast<void*>(entry.to);
+      return false;
+    }
+  }
+
+  // Map branch stack addresses.
+  parsed_event->branch_stack.resize(trimmed_size);
+  for (unsigned int i = 0; i < trimmed_size; ++i) {
+    struct branch_entry& entry = branch_stack->entries[i];
+    ParsedEvent::BranchEntry& parsed_entry = parsed_event->branch_stack[i];
+    if (!MapIPAndPidAndGetNameAndOffset(entry.from,
+                                        pid,
+                                        &entry.from,
+                                        &parsed_entry.from)) {
+      return false;
+    }
+    if (!MapIPAndPidAndGetNameAndOffset(entry.to,
+                                        pid,
+                                        &entry.to,
+                                        &parsed_entry.to)) {
+      return false;
+    }
+    parsed_entry.predicted = entry.flags.predicted;
+    // Either predicted or mispredicted, not both. But don't use a CHECK here,
+    // just exit gracefully because it's a minor issue.
+    if (entry.flags.predicted == entry.flags.mispred) {
+      LOG(ERROR) << "Branch stack entry predicted and mispred flags "
+                 << "both have value " << entry.flags.mispred;
+      return false;
+    }
+  }
+
+  return true;
+}
+
+bool PerfParser::MapIPAndPidAndGetNameAndOffset(
+    uint64_t ip,
+    uint32_t pid,
+    uint64_t* new_ip,
+    ParsedEvent::DSOAndOffset* dso_and_offset) {
+
+  // Attempt to find the synthetic address of the IP sample in this order:
+  // 1. Address space of its own process.
+  // 2. Address space of the kernel.
+
+  uint64_t mapped_addr = 0;
+
+  // Sometimes the first event we see is a SAMPLE event and we don't have the
+  // time to create an address mapper for a process. Example, for pid 0.
+  AddressMapper* mapper = GetOrCreateProcessMapper(pid).first;
+  bool mapped = mapper->GetMappedAddress(ip, &mapped_addr);
+  if (!mapped) {
+    mapper = kernel_mapper_.get();
+    mapped = mapper->GetMappedAddress(ip, &mapped_addr);
+  }
+
+  // TODO(asharif): What should we do when we cannot map a SAMPLE event?
+  if (mapped) {
+    if (dso_and_offset) {
+      uint64_t id = kuint64max;
+      CHECK(mapper->GetMappedIDAndOffset(ip, &id, &dso_and_offset->offset_));
+      // Make sure the ID points to a valid event.
+      CHECK_LE(id, parsed_events_sorted_by_time_.size());
+      ParsedEvent* parsed_event = parsed_events_sorted_by_time_[id];
+      const event_t* raw_event = parsed_event->raw_event;
+
+      DSOInfo dso_info;
+      if (raw_event->header.type == PERF_RECORD_MMAP) {
+        dso_info.name = raw_event->mmap.filename;
+      } else if (raw_event->header.type == PERF_RECORD_MMAP2) {
+        dso_info.name = raw_event->mmap2.filename;
+      } else {
+        LOG(FATAL) << "Expected MMAP or MMAP2 event";
+      }
+
+      // Find the mmap DSO filename in the set of known DSO names.
+      // TODO(sque): take build IDs into account.
+      std::set<DSOInfo>::const_iterator dso_iter = dso_set_.find(dso_info);
+      CHECK(dso_iter != dso_set_.end());
+      dso_and_offset->dso_info_ = &(*dso_iter);
+
+      ++parsed_event->num_samples_in_mmap_region;
+    }
+    if (options_.do_remap)
+      *new_ip = mapped_addr;
+  }
+  return mapped;
+}
+
+bool PerfParser::MapMmapEvent(uint64_t id,
+                              uint32_t pid,
+                              uint64_t* p_start,
+                              uint64_t* p_len,
+                              uint64_t* p_pgoff)
+{
+  // We need to hide only the real kernel addresses.  However, to make things
+  // more secure, and make the mapping idempotent, we should remap all
+  // addresses, both kernel and non-kernel.
+  AddressMapper* mapper =
+      (pid == kKernelPid ? kernel_mapper_.get() :
+       GetOrCreateProcessMapper(pid).first);
+
+  uint64_t start = *p_start;
+  uint64_t len = *p_len;
+  uint64_t pgoff = *p_pgoff;
+
+  // |id| == 0 corresponds to the kernel mmap. We have several cases here:
+  //
+  // For ARM and x86, in sudo mode, pgoff == start, example:
+  // start=0x80008200
+  // pgoff=0x80008200
+  // len  =0xfffffff7ff7dff
+  //
+  // For x86-64, in sudo mode, pgoff is between start and start + len. SAMPLE
+  // events lie between pgoff and pgoff + length of the real kernel binary,
+  // example:
+  // start=0x3bc00000
+  // pgoff=0xffffffffbcc00198
+  // len  =0xffffffff843fffff
+  // SAMPLE events will be found after pgoff. For kernels with ASLR, pgoff will
+  // be something only visible to the root user, and will be randomized at
+  // startup. With |remap| set to true, we should hide pgoff in this case. So we
+  // normalize all SAMPLE events relative to pgoff.
+  //
+  // For non-sudo mode, the kernel will be mapped from 0 to the pointer limit,
+  // example:
+  // start=0x0
+  // pgoff=0x0
+  // len  =0xffffffff
+  if (id == 0) {
+    // If pgoff is between start and len, we normalize the event by setting
+    // start to be pgoff just like how it is for ARM and x86. We also set len to
+    // be a much smaller number (closer to the real length of the kernel binary)
+    // because SAMPLEs are actually only seen between |event->pgoff| and
+    // |event->pgoff + kernel text size|.
+    if (pgoff > start && pgoff < start + len) {
+      len = len + start - pgoff;
+      start = pgoff;
+    }
+    // For kernels with ALSR pgoff is critical information that should not be
+    // revealed when |remap| is true.
+    pgoff = 0;
+  }
+
+  if (!mapper->MapWithID(start, len, id, pgoff, true)) {
+    mapper->DumpToLog();
+    return false;
+  }
+
+  if (options_.do_remap) {
+    uint64_t mapped_addr;
+    CHECK(mapper->GetMappedAddress(start, &mapped_addr));
+    *p_start = mapped_addr;
+    *p_len = len;
+    *p_pgoff = pgoff;
+  }
+  return true;
+}
+
+std::pair<AddressMapper*, bool> PerfParser::GetOrCreateProcessMapper(
+    uint32_t pid, uint32_t *ppid) {
+  const auto& search = process_mappers_.find(pid);
+  if (search != process_mappers_.end()) {
+    return std::make_pair(search->second.get(), false);
+  }
+
+  std::unique_ptr<AddressMapper> mapper;
+  const auto& parent_mapper = (ppid ? process_mappers_.find(*ppid) : process_mappers_.end());
+  if (parent_mapper != process_mappers_.end())
+      mapper.reset(new AddressMapper(*parent_mapper->second));
+  else
+    mapper.reset(new AddressMapper());
+
+  const auto inserted =
+      process_mappers_.insert(search, std::make_pair(pid, std::move(mapper)));
+  return std::make_pair(inserted->second.get(), true);
+}
+
+bool PerfParser::MapCommEvent(const struct comm_event& event) {
+  GetOrCreateProcessMapper(event.pid);
+  return true;
+}
+
+bool PerfParser::MapForkEvent(const struct fork_event& event) {
+  PidTid parent = std::make_pair(event.ppid, event.ptid);
+  PidTid child = std::make_pair(event.pid, event.tid);
+  if (parent != child &&
+      pidtid_to_comm_map_.find(parent) != pidtid_to_comm_map_.end()) {
+    pidtid_to_comm_map_[child] = pidtid_to_comm_map_[parent];
+  }
+
+  const uint32_t pid = event.pid;
+
+  // If the parent and child pids are the same, this is just a new thread
+  // within the same process, so don't do anything.
+  if (event.ppid == pid)
+    return true;
+
+  uint32_t ppid = event.ppid;
+  if (!GetOrCreateProcessMapper(pid, &ppid).second) {
+    DLOG(INFO) << "Found an existing process mapper with pid: " << pid;
+  }
+
+  return true;
+}
+
+}  // namespace quipper
diff --git a/perfprofd/quipper/perf_parser.h b/perfprofd/quipper/perf_parser.h
new file mode 100644
index 0000000..bb66de2
--- /dev/null
+++ b/perfprofd/quipper/perf_parser.h
@@ -0,0 +1,249 @@
+// Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROMIUMOS_WIDE_PROFILING_PERF_PARSER_H_
+#define CHROMIUMOS_WIDE_PROFILING_PERF_PARSER_H_
+
+#include <stdint.h>
+
+#include <map>
+#include <set>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/macros.h"
+
+#include "perf_reader.h"
+#include "perf_utils.h"
+
+namespace quipper {
+
+class AddressMapper;
+
+// A struct containing all relevant info for a mapped DSO, independent of any
+// samples.
+struct DSOInfo {
+  string name;
+  string build_id;
+
+  // Comparator that allows this to be stored in a STL set.
+  bool operator<(const DSOInfo& other) const {
+    if (name == other.name)
+      return build_id < other.build_id;
+    return name < other.name;
+  }
+};
+
+struct ParsedEvent {
+  // TODO(sque): Turn this struct into a class to privatize member variables.
+  ParsedEvent() : command_(NULL) {}
+
+  // Stores address of an event_t owned by the |PerfReader::events_| vector.
+  event_t* raw_event;
+
+  // For mmap events, use this to count the number of samples that are in this
+  // region.
+  uint32_t num_samples_in_mmap_region;
+
+  // Command associated with this sample.
+  const string* command_;
+
+  // Accessor for command string.
+  const string command() const {
+    if (command_)
+      return *command_;
+    return string();
+  }
+
+  void set_command(const string* command) {
+    command_ = command;
+  }
+
+  // A struct that contains a DSO + offset pair.
+  struct DSOAndOffset {
+    const DSOInfo* dso_info_;
+    uint64_t offset_;
+
+    // Accessor methods.
+    const string dso_name() const {
+      if (dso_info_)
+        return dso_info_->name;
+      return string();
+    }
+    const string build_id() const {
+      if (dso_info_)
+        return dso_info_->build_id;
+      return string();
+    }
+    uint64_t offset() const {
+      return offset_;
+    }
+
+    DSOAndOffset() : dso_info_(NULL),
+                     offset_(0) {}
+  } dso_and_offset;
+
+  // DSO+offset info for callchain.
+  std::vector<DSOAndOffset> callchain;
+
+  // DSO + offset info for branch stack entries.
+  struct BranchEntry {
+    bool predicted;
+    DSOAndOffset from;
+    DSOAndOffset to;
+  };
+  std::vector<BranchEntry> branch_stack;
+};
+
+struct PerfEventStats {
+  // Number of each type of event.
+  uint32_t num_sample_events;
+  uint32_t num_mmap_events;
+  uint32_t num_comm_events;
+  uint32_t num_fork_events;
+  uint32_t num_exit_events;
+
+  // Number of sample events that were successfully mapped using the address
+  // mapper.  The mapping is recorded regardless of whether the address in the
+  // perf sample event itself was assigned the remapped address.  The latter is
+  // indicated by |did_remap|.
+  uint32_t num_sample_events_mapped;
+
+  // Whether address remapping was enabled during event parsing.
+  bool did_remap;
+};
+
+class PerfParser : public PerfReader {
+ public:
+  PerfParser();
+  ~PerfParser();
+
+  struct Options {
+    // For synthetic address mapping.
+    bool do_remap = false;
+    // Set this flag to discard non-sample events that don't have any associated
+    // sample events. e.g. MMAP regions with no samples in them.
+    bool discard_unused_events = false;
+    // When mapping perf sample events, at least this percentage of them must be
+    // successfully mapped in order for ProcessEvents() to return true.
+    // By default, most samples must be properly mapped in order for sample
+    // mapping to be considered successful.
+    float sample_mapping_percentage_threshold = 95.0f;
+  };
+
+  // Constructor that takes in options at PerfParser creation time.
+  explicit PerfParser(const Options& options);
+
+  // Pass in a struct containing various options.
+  void set_options(const Options& options);
+
+  // Gets parsed event/sample info from raw event data.
+  bool ParseRawEvents();
+
+  const std::vector<ParsedEvent>& parsed_events() const {
+    return parsed_events_;
+  }
+
+  // Returns an array of pointers to |parsed_events_| sorted by sample time.
+  // The first time this is called, it will create the sorted array.
+  const std::vector<ParsedEvent*>& GetEventsSortedByTime() const {
+    return parsed_events_sorted_by_time_;
+  }
+
+  const PerfEventStats& stats() const {
+    return stats_;
+  }
+
+ protected:
+  // Defines a type for a pid:tid pair.
+  typedef std::pair<uint32_t, uint32_t> PidTid;
+
+  // Sort |parsed_events_| by time, storing the results in
+  // |parsed_events_sorted_by_time_|.
+  // Events can not be sorted by time if PERF_SAMPLE_TIME is not set in
+  // attr.sample_type (PerfReader.sample_type_). In that case,
+  // |parsed_events_sorted_by_time_| is not actually sorted, but has the same
+  // order as |parsed_events_|.
+  void MaybeSortParsedEvents();
+
+  // Used for processing events.  e.g. remapping with synthetic addresses.
+  bool ProcessEvents();
+  template <typename MMapEventT>
+  bool MapMmapEvent(MMapEventT* event, uint64_t id) {
+    return MapMmapEvent(id,
+                        event->pid,
+                        &event->start,
+                        &event->len,
+                        &event->pgoff);
+  }
+  bool MapMmapEvent(uint64_t id,
+                    uint32_t pid,
+                    uint64_t* p_start,
+                    uint64_t* p_len,
+                    uint64_t* p_pgoff);
+  bool MapForkEvent(const struct fork_event& event);
+  bool MapCommEvent(const struct comm_event& event);
+
+  // Does a sample event remap and then returns DSO name and offset of sample.
+  bool MapSampleEvent(ParsedEvent* parsed_event);
+
+  std::vector<ParsedEvent> parsed_events_;
+  // See MaybeSortParsedEvents to see why this might not actually be sorted
+  // by time:
+  std::vector<ParsedEvent*> parsed_events_sorted_by_time_;
+
+  Options options_;   // Store all option flags as one struct.
+
+  // Maps pid/tid to commands.
+  std::map<PidTid, const string*> pidtid_to_comm_map_;
+
+  // A set to store the actual command strings.
+  std::set<string> commands_;
+
+  PerfEventStats stats_;
+
+  // A set of unique DSOs that may be referenced by multiple events.
+  std::set<DSOInfo> dso_set_;
+
+ private:
+  // Calls MapIPAndPidAndGetNameAndOffset() on the callchain of a sample event.
+  bool MapCallchain(const uint64_t ip,
+                    const uint32_t pid,
+                    uint64_t original_event_addr,
+                    struct ip_callchain* callchain,
+                    ParsedEvent* parsed_event);
+
+  // Trims the branch stack for null entries and calls
+  // MapIPAndPidAndGetNameAndOffset() on each entry.
+  bool MapBranchStack(const uint32_t pid,
+                      struct branch_stack* branch_stack,
+                      ParsedEvent* parsed_event);
+
+  // This maps a sample event and returns the mapped address, DSO name, and
+  // offset within the DSO.  This is a private function because the API might
+  // change in the future, and we don't want derived classes to be stuck with an
+  // obsolete API.
+  bool MapIPAndPidAndGetNameAndOffset(
+      uint64_t ip,
+      uint32_t pid,
+      uint64_t* new_ip,
+      ParsedEvent::DSOAndOffset* dso_and_offset);
+
+  // Create a process mapper for a process. Optionally pass in a parent pid
+  // |ppid| from which to copy mappings.
+  // Returns (mapper, true) if a new AddressMapper was created, and
+  // (mapper, false) if there is an existing mapper.
+  std::pair<AddressMapper*, bool> GetOrCreateProcessMapper(uint32_t pid,
+                                                           uint32_t *ppid = NULL);
+
+  std::unique_ptr<AddressMapper> kernel_mapper_;
+  std::map<uint32_t, std::unique_ptr<AddressMapper>> process_mappers_;
+
+  DISALLOW_COPY_AND_ASSIGN(PerfParser);
+};
+
+}  // namespace quipper
+
+#endif  // CHROMIUMOS_WIDE_PROFILING_PERF_PARSER_H_
diff --git a/perfprofd/quipper/perf_reader.cc b/perfprofd/quipper/perf_reader.cc
new file mode 100644
index 0000000..48497d0
--- /dev/null
+++ b/perfprofd/quipper/perf_reader.cc
@@ -0,0 +1,1709 @@
+// Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "perf_reader.h"
+
+#include <byteswap.h>
+#include <limits.h>
+
+#include <bitset>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <vector>
+
+#define LOG_TAG "perf_reader"
+
+#include "base/logging.h"
+
+#include "quipper_string.h"
+#include "perf_utils.h"
+
+namespace quipper {
+
+struct BufferWithSize {
+  char* ptr;
+  size_t size;
+};
+
+// If the buffer is read-only, it is not sufficient to mark the previous struct
+// as const, as this only means that the pointer cannot be changed, and says
+// nothing about the contents of the buffer.  So, we need another struct.
+struct ConstBufferWithSize {
+  const char* ptr;
+  size_t size;
+};
+
+namespace {
+
+// The type of the number of string data, found in the command line metadata in
+// the perf data file.
+typedef u32 num_string_data_type;
+
+// Types of the event desc fields that are not found in other structs.
+typedef u32 event_desc_num_events;
+typedef u32 event_desc_attr_size;
+typedef u32 event_desc_num_unique_ids;
+
+// The type of the number of nodes field in NUMA topology.
+typedef u32 numa_topology_num_nodes_type;
+
+// A mask that is applied to metadata_mask_ in order to get a mask for
+// only the metadata supported by quipper.
+const uint32_t kSupportedMetadataMask =
+    1 << HEADER_TRACING_DATA |
+    1 << HEADER_BUILD_ID |
+    1 << HEADER_HOSTNAME |
+    1 << HEADER_OSRELEASE |
+    1 << HEADER_VERSION |
+    1 << HEADER_ARCH |
+    1 << HEADER_NRCPUS |
+    1 << HEADER_CPUDESC |
+    1 << HEADER_CPUID |
+    1 << HEADER_TOTAL_MEM |
+    1 << HEADER_CMDLINE |
+    1 << HEADER_EVENT_DESC |
+    1 << HEADER_CPU_TOPOLOGY |
+    1 << HEADER_NUMA_TOPOLOGY |
+    1 << HEADER_BRANCH_STACK;
+
+// By default, the build ID event has PID = -1.
+const uint32_t kDefaultBuildIDEventPid = static_cast<uint32_t>(-1);
+
+template <class T>
+void ByteSwap(T* input) {
+  switch (sizeof(T)) {
+  case sizeof(uint8_t):
+    LOG(WARNING) << "Attempting to byte swap on a single byte.";
+    break;
+  case sizeof(uint16_t):
+    *input = bswap_16(*input);
+    break;
+  case sizeof(uint32_t):
+    *input = bswap_32(*input);
+    break;
+  case sizeof(uint64_t):
+    *input = bswap_64(*input);
+    break;
+  default:
+    LOG(FATAL) << "Invalid size for byte swap: " << sizeof(T) << " bytes";
+    break;
+  }
+}
+
+u64 MaybeSwap(u64 value, bool swap) {
+  if (swap)
+    return bswap_64(value);
+  return value;
+}
+
+u32 MaybeSwap(u32 value, bool swap) {
+  if (swap)
+    return bswap_32(value);
+  return value;
+}
+
+u8 ReverseByte(u8 x) {
+  x = (x & 0xf0) >> 4 | (x & 0x0f) << 4;  // exchange nibbles
+  x = (x & 0xcc) >> 2 | (x & 0x33) << 2;  // exchange pairs
+  x = (x & 0xaa) >> 1 | (x & 0x55) << 1;  // exchange neighbors
+  return x;
+}
+
+// If field points to the start of a bitfield padded to len bytes, this
+// performs an endian swap of the bitfield, assuming the compiler that produced
+// it conforms to the same ABI (bitfield layout is not completely specified by
+// the language).
+void SwapBitfieldOfBits(u8* field, size_t len) {
+  for (size_t i = 0; i < len; i++) {
+    field[i] = ReverseByte(field[i]);
+  }
+}
+
+// The code currently assumes that the compiler will not add any padding to the
+// various structs.  These CHECKs make sure that this is true.
+void CheckNoEventHeaderPadding() {
+  perf_event_header header;
+  CHECK_EQ(sizeof(header),
+           sizeof(header.type) + sizeof(header.misc) + sizeof(header.size));
+}
+
+void CheckNoPerfEventAttrPadding() {
+  perf_event_attr attr;
+  CHECK_EQ(sizeof(attr),
+           (reinterpret_cast<u64>(&attr.__reserved_2) -
+            reinterpret_cast<u64>(&attr)) +
+           sizeof(attr.__reserved_2));
+}
+
+void CheckNoEventTypePadding() {
+  perf_trace_event_type event_type;
+  CHECK_EQ(sizeof(event_type),
+           sizeof(event_type.event_id) + sizeof(event_type.name));
+}
+
+void CheckNoBuildIDEventPadding() {
+  build_id_event event;
+  CHECK_EQ(sizeof(event),
+           sizeof(event.header.type) + sizeof(event.header.misc) +
+           sizeof(event.header.size) + sizeof(event.pid) +
+           sizeof(event.build_id));
+}
+
+// Creates/updates a build id event with |build_id| and |filename|.
+// Passing "" to |build_id| or |filename| will leave the corresponding field
+// unchanged (in which case |event| must be non-null).
+// If |event| is null or is not large enough, a new event will be created.
+// In this case, if |event| is non-null, it will be freed.
+// Otherwise, updates the fields of the existing event.
+// |new_misc| indicates kernel vs user space, and is only used to fill in the
+// |header.misc| field of new events.
+// In either case, returns a pointer to the event containing the updated data,
+// or NULL in the case of a failure.
+build_id_event* CreateOrUpdateBuildID(const string& build_id,
+                                      const string& filename,
+                                      uint16_t new_misc,
+                                      build_id_event* event) {
+  // When creating an event from scratch, build id and filename must be present.
+  if (!event && (build_id.empty() || filename.empty()))
+    return NULL;
+  size_t new_len = GetUint64AlignedStringLength(
+      filename.empty() ? event->filename : filename);
+
+  // If event is null, or we don't have enough memory, allocate more memory, and
+  // switch the new pointer with the existing pointer.
+  size_t new_size = sizeof(*event) + new_len;
+  if (!event || new_size > event->header.size) {
+    build_id_event* new_event = CallocMemoryForBuildID(new_size);
+
+    if (event) {
+      // Copy over everything except the filename and free the event.
+      // It is guaranteed that we are changing the filename - otherwise, the old
+      // size and the new size would be equal.
+      *new_event = *event;
+      free(event);
+    } else {
+      // Fill in the fields appropriately.
+      new_event->header.type = HEADER_BUILD_ID;
+      new_event->header.misc = new_misc;
+      new_event->pid = kDefaultBuildIDEventPid;
+    }
+    event = new_event;
+  }
+
+  // Here, event is the pointer to the build_id_event that we are keeping.
+  // Update the event's size, build id, and filename.
+  if (!build_id.empty() &&
+      !StringToHex(build_id, event->build_id, arraysize(event->build_id))) {
+    free(event);
+    return NULL;
+  }
+
+  if (!filename.empty())
+    CHECK_GT(snprintf(event->filename, new_len, "%s", filename.c_str()), 0);
+
+  event->header.size = new_size;
+  return event;
+}
+
+// Reads |size| bytes from |buffer| into |dest| and advances |src_offset|.
+bool ReadDataFromBuffer(const ConstBufferWithSize& buffer,
+                        size_t size,
+                        const string& value_name,
+                        size_t* src_offset,
+                        void* dest) {
+  size_t end_offset = *src_offset + size / sizeof(*buffer.ptr);
+  if (buffer.size < end_offset) {
+    LOG(ERROR) << "Not enough bytes to read " << value_name
+               << ". Requested " << size << " bytes";
+    return false;
+  }
+  memcpy(dest, buffer.ptr + *src_offset, size);
+  *src_offset = end_offset;
+  return true;
+}
+
+// Reads a CStringWithLength from |buffer| into |dest|, and advances the offset.
+bool ReadStringFromBuffer(const ConstBufferWithSize& buffer,
+                          bool is_cross_endian,
+                          size_t* offset,
+                          CStringWithLength* dest) {
+  if (!ReadDataFromBuffer(buffer, sizeof(dest->len), "string length",
+                          offset, &dest->len)) {
+    return false;
+  }
+  if (is_cross_endian)
+    ByteSwap(&dest->len);
+
+  if (buffer.size < *offset + dest->len) {
+    LOG(ERROR) << "Not enough bytes to read string";
+    return false;
+  }
+  dest->str = string(buffer.ptr + *offset);
+  *offset += dest->len / sizeof(*buffer.ptr);
+  return true;
+}
+
+// Read read info from perf data.  Corresponds to sample format type
+// PERF_SAMPLE_READ.
+const uint64_t* ReadReadInfo(const uint64_t* array,
+                           bool swap_bytes,
+                           uint64_t read_format,
+                           struct perf_sample* sample) {
+  if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+    sample->read.time_enabled = *array++;
+  if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+    sample->read.time_running = *array++;
+  if (read_format & PERF_FORMAT_ID)
+    sample->read.one.id = *array++;
+
+  if (swap_bytes) {
+    ByteSwap(&sample->read.time_enabled);
+    ByteSwap(&sample->read.time_running);
+    ByteSwap(&sample->read.one.id);
+  }
+
+  return array;
+}
+
+// Read call chain info from perf data.  Corresponds to sample format type
+// PERF_SAMPLE_CALLCHAIN.
+const uint64_t* ReadCallchain(const uint64_t* array,
+                            bool swap_bytes,
+                            struct perf_sample* sample) {
+  // Make sure there is no existing allocated memory in |sample->callchain|.
+  CHECK_EQ(static_cast<void*>(NULL), sample->callchain);
+
+  // The callgraph data consists of a uint64_t value |nr| followed by |nr|
+  // addresses.
+  uint64_t callchain_size = *array++;
+  if (swap_bytes)
+    ByteSwap(&callchain_size);
+  struct ip_callchain* callchain =
+      reinterpret_cast<struct ip_callchain*>(new uint64_t[callchain_size + 1]);
+  callchain->nr = callchain_size;
+  for (size_t i = 0; i < callchain_size; ++i) {
+    callchain->ips[i] = *array++;
+    if (swap_bytes)
+      ByteSwap(&callchain->ips[i]);
+  }
+  sample->callchain = callchain;
+
+  return array;
+}
+
+// Read raw info from perf data.  Corresponds to sample format type
+// PERF_SAMPLE_RAW.
+const uint64_t* ReadRawData(const uint64_t* array,
+                          bool swap_bytes,
+                          struct perf_sample* sample) {
+  // First read the size.
+  const uint32_t* ptr = reinterpret_cast<const uint32_t*>(array);
+  sample->raw_size = *ptr++;
+  if (swap_bytes)
+    ByteSwap(&sample->raw_size);
+
+  // Allocate space for and read the raw data bytes.
+  sample->raw_data = new uint8_t[sample->raw_size];
+  memcpy(sample->raw_data, ptr, sample->raw_size);
+
+  // Determine the bytes that were read, and align to the next 64 bits.
+  int bytes_read = AlignSize(sizeof(sample->raw_size) + sample->raw_size,
+                             sizeof(uint64_t));
+  array += bytes_read / sizeof(uint64_t);
+
+  return array;
+}
+
+// Read call chain info from perf data.  Corresponds to sample format type
+// PERF_SAMPLE_CALLCHAIN.
+const uint64_t* ReadBranchStack(const uint64_t* array,
+                              bool swap_bytes,
+                              struct perf_sample* sample) {
+  // Make sure there is no existing allocated memory in
+  // |sample->branch_stack|.
+  CHECK_EQ(static_cast<void*>(NULL), sample->branch_stack);
+
+  // The branch stack data consists of a uint64_t value |nr| followed by |nr|
+  // branch_entry structs.
+  uint64_t branch_stack_size = *array++;
+  if (swap_bytes)
+    ByteSwap(&branch_stack_size);
+  struct branch_stack* branch_stack =
+      reinterpret_cast<struct branch_stack*>(
+          new uint8_t[sizeof(uint64_t) +
+                    branch_stack_size * sizeof(struct branch_entry)]);
+  branch_stack->nr = branch_stack_size;
+  for (size_t i = 0; i < branch_stack_size; ++i) {
+    memcpy(&branch_stack->entries[i], array, sizeof(struct branch_entry));
+    array += sizeof(struct branch_entry) / sizeof(*array);
+    if (swap_bytes) {
+      ByteSwap(&branch_stack->entries[i].from);
+      ByteSwap(&branch_stack->entries[i].to);
+    }
+  }
+  sample->branch_stack = branch_stack;
+
+  return array;
+}
+
+size_t ReadPerfSampleFromData(const perf_event_type event_type,
+                              const uint64_t* array,
+                              const uint64_t sample_fields,
+                              const uint64_t read_format,
+                              bool swap_bytes,
+                              const perf_event_attr &attr0,
+                              size_t n_attrs,
+                              struct perf_sample* sample) {
+  const uint64_t* initial_array_ptr = array;
+
+  union {
+    uint32_t val32[sizeof(uint64_t) / sizeof(uint32_t)];
+    uint64_t val64;
+  };
+
+  // See structure for PERF_RECORD_SAMPLE in kernel/perf_event.h
+  // and compare sample_id when sample_id_all is set.
+
+  // NB: For sample_id, sample_fields has already been masked to the set
+  // of fields in that struct by GetSampleFieldsForEventType. That set
+  // of fields is mostly in the same order as PERF_RECORD_SAMPLE, with
+  // the exception of PERF_SAMPLE_IDENTIFIER.
+
+  // PERF_SAMPLE_IDENTIFIER is in a different location depending on
+  // if this is a SAMPLE event or the sample_id of another event.
+  if (event_type == PERF_RECORD_SAMPLE) {
+    // { u64                   id;       } && PERF_SAMPLE_IDENTIFIER
+    if (sample_fields & PERF_SAMPLE_IDENTIFIER) {
+      sample->id = MaybeSwap(*array++, swap_bytes);
+    }
+  }
+
+  // { u64                   ip;       } && PERF_SAMPLE_IP
+  if (sample_fields & PERF_SAMPLE_IP) {
+    sample->ip = MaybeSwap(*array++, swap_bytes);
+  }
+
+  // { u32                   pid, tid; } && PERF_SAMPLE_TID
+  if (sample_fields & PERF_SAMPLE_TID) {
+    val64 = *array++;
+    sample->pid = MaybeSwap(val32[0], swap_bytes);
+    sample->tid = MaybeSwap(val32[1], swap_bytes);
+  }
+
+  // { u64                   time;     } && PERF_SAMPLE_TIME
+  if (sample_fields & PERF_SAMPLE_TIME) {
+    sample->time = MaybeSwap(*array++, swap_bytes);
+  }
+
+  // { u64                   addr;     } && PERF_SAMPLE_ADDR
+  if (sample_fields & PERF_SAMPLE_ADDR) {
+    sample->addr = MaybeSwap(*array++, swap_bytes);
+  }
+
+  // { u64                   id;       } && PERF_SAMPLE_ID
+  if (sample_fields & PERF_SAMPLE_ID) {
+    sample->id = MaybeSwap(*array++, swap_bytes);
+  }
+
+  // { u64                   stream_id;} && PERF_SAMPLE_STREAM_ID
+  if (sample_fields & PERF_SAMPLE_STREAM_ID) {
+    sample->stream_id = MaybeSwap(*array++, swap_bytes);
+  }
+
+  // { u32                   cpu, res; } && PERF_SAMPLE_CPU
+  if (sample_fields & PERF_SAMPLE_CPU) {
+    val64 = *array++;
+    sample->cpu = MaybeSwap(val32[0], swap_bytes);
+    // sample->res = MaybeSwap(*val32[1], swap_bytes);  // not implemented?
+  }
+
+  // This is the location of PERF_SAMPLE_IDENTIFIER in struct sample_id.
+  if (event_type != PERF_RECORD_SAMPLE) {
+    // { u64                   id;       } && PERF_SAMPLE_IDENTIFIER
+    if (sample_fields & PERF_SAMPLE_IDENTIFIER) {
+      sample->id = MaybeSwap(*array++, swap_bytes);
+    }
+  }
+
+  //
+  // The remaining fields are only in PERF_RECORD_SAMPLE
+  //
+
+  // { u64                   period;   } && PERF_SAMPLE_PERIOD
+  if (sample_fields & PERF_SAMPLE_PERIOD) {
+    sample->period = MaybeSwap(*array++, swap_bytes);
+  }
+
+  // { struct read_format    values;   } && PERF_SAMPLE_READ
+  if (sample_fields & PERF_SAMPLE_READ) {
+    // TODO(cwp-team): support grouped read info.
+    if (read_format & PERF_FORMAT_GROUP)
+      return 0;
+    array = ReadReadInfo(array, swap_bytes, read_format, sample);
+  }
+
+  // { u64                   nr,
+  //   u64                   ips[nr];  } && PERF_SAMPLE_CALLCHAIN
+  if (sample_fields & PERF_SAMPLE_CALLCHAIN) {
+    array = ReadCallchain(array, swap_bytes, sample);
+  }
+
+  // { u32                   size;
+  //   char                  data[size];}&& PERF_SAMPLE_RAW
+  if (sample_fields & PERF_SAMPLE_RAW) {
+    array = ReadRawData(array, swap_bytes, sample);
+  }
+
+  // { u64                   nr;
+  //   { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
+  if (sample_fields & PERF_SAMPLE_BRANCH_STACK) {
+    array = ReadBranchStack(array, swap_bytes, sample);
+  }
+
+  // { u64                   abi,
+  //   u64                   regs[nr];  } && PERF_SAMPLE_REGS_USER
+  if (sample_fields & PERF_SAMPLE_REGS_USER) {
+    uint64_t abi = MaybeSwap(*array++, swap_bytes);
+    if (abi != 0) {
+      assert(n_attrs == 1);
+      uint64_t reg_mask = attr0.sample_regs_user;
+      size_t bit_nr = 0;
+      for (size_t i = 0; i < 64; ++i) {
+        if ((reg_mask >> i) & 1) {
+          bit_nr++;
+        }
+      }
+      array += bit_nr;
+    }
+  }
+
+  // { u64                   size,
+  //   u64                   regs[nr];  } && PERF_SAMPLE_STACK_USER
+  if (sample_fields & PERF_SAMPLE_STACK_USER) {
+    uint64_t size = MaybeSwap(*array++, swap_bytes);
+    if (size != 0) {
+      array += (size / sizeof(uint64_t));
+      array += 1;  // for dyn_size
+    }
+  }
+
+  static const u64 kUnimplementedSampleFields =
+      PERF_SAMPLE_WEIGHT     |
+      PERF_SAMPLE_DATA_SRC   |
+      PERF_SAMPLE_TRANSACTION;
+
+  if (sample_fields & kUnimplementedSampleFields) {
+    LOG(WARNING) << "Unimplemented sample fields 0x"
+                 << std::hex << (sample_fields & kUnimplementedSampleFields);
+  }
+
+  if (sample_fields & ~(PERF_SAMPLE_MAX-1)) {
+    LOG(WARNING) << "Unrecognized sample fields 0x"
+                 << std::hex << (sample_fields & ~(PERF_SAMPLE_MAX-1));
+  }
+
+  return (array - initial_array_ptr) * sizeof(uint64_t);
+}
+
+size_t WritePerfSampleToData(const perf_event_type event_type,
+                             const struct perf_sample& sample,
+                             const uint64_t sample_fields,
+                             const uint64_t read_format,
+                             uint64_t* array) {
+  const uint64_t* initial_array_ptr = array;
+
+  union {
+    uint32_t val32[sizeof(uint64_t) / sizeof(uint32_t)];
+    uint64_t val64;
+  };
+
+  // See notes at the top of ReadPerfSampleFromData regarding the structure
+  // of PERF_RECORD_SAMPLE, sample_id, and PERF_SAMPLE_IDENTIFIER, as they
+  // all apply here as well.
+
+  // PERF_SAMPLE_IDENTIFIER is in a different location depending on
+  // if this is a SAMPLE event or the sample_id of another event.
+  if (event_type == PERF_RECORD_SAMPLE) {
+    // { u64                   id;       } && PERF_SAMPLE_IDENTIFIER
+    if (sample_fields & PERF_SAMPLE_IDENTIFIER) {
+      *array++ = sample.id;
+    }
+  }
+
+  // { u64                   ip;       } && PERF_SAMPLE_IP
+  if (sample_fields & PERF_SAMPLE_IP) {
+    *array++ = sample.ip;
+  }
+
+  // { u32                   pid, tid; } && PERF_SAMPLE_TID
+  if (sample_fields & PERF_SAMPLE_TID) {
+    val32[0] = sample.pid;
+    val32[1] = sample.tid;
+    *array++ = val64;
+  }
+
+  // { u64                   time;     } && PERF_SAMPLE_TIME
+  if (sample_fields & PERF_SAMPLE_TIME) {
+    *array++ = sample.time;
+  }
+
+  // { u64                   addr;     } && PERF_SAMPLE_ADDR
+  if (sample_fields & PERF_SAMPLE_ADDR) {
+    *array++ = sample.addr;
+  }
+
+  // { u64                   id;       } && PERF_SAMPLE_ID
+  if (sample_fields & PERF_SAMPLE_ID) {
+    *array++ = sample.id;
+  }
+
+  // { u64                   stream_id;} && PERF_SAMPLE_STREAM_ID
+  if (sample_fields & PERF_SAMPLE_STREAM_ID) {
+    *array++ = sample.stream_id;
+  }
+
+  // { u32                   cpu, res; } && PERF_SAMPLE_CPU
+  if (sample_fields & PERF_SAMPLE_CPU) {
+    val32[0] = sample.cpu;
+    // val32[1] = sample.res;  // not implemented?
+    val32[1] = 0;
+    *array++ = val64;
+  }
+
+  // This is the location of PERF_SAMPLE_IDENTIFIER in struct sample_id.
+  if (event_type != PERF_RECORD_SAMPLE) {
+    // { u64                   id;       } && PERF_SAMPLE_IDENTIFIER
+    if (sample_fields & PERF_SAMPLE_IDENTIFIER) {
+      *array++ = sample.id;
+    }
+  }
+
+  //
+  // The remaining fields are only in PERF_RECORD_SAMPLE
+  //
+
+  // { u64                   period;   } && PERF_SAMPLE_PERIOD
+  if (sample_fields & PERF_SAMPLE_PERIOD) {
+    *array++ = sample.period;
+  }
+
+  // { struct read_format    values;   } && PERF_SAMPLE_READ
+  if (sample_fields & PERF_SAMPLE_READ) {
+    // TODO(cwp-team): support grouped read info.
+    if (read_format & PERF_FORMAT_GROUP)
+      return 0;
+    if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+      *array++ = sample.read.time_enabled;
+    if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+      *array++ = sample.read.time_running;
+    if (read_format & PERF_FORMAT_ID)
+      *array++ = sample.read.one.id;
+  }
+
+  // { u64                   nr,
+  //   u64                   ips[nr];  } && PERF_SAMPLE_CALLCHAIN
+  if (sample_fields & PERF_SAMPLE_CALLCHAIN) {
+    if (!sample.callchain) {
+      LOG(ERROR) << "Expecting callchain data, but none was found.";
+    } else {
+      *array++ = sample.callchain->nr;
+      for (size_t i = 0; i < sample.callchain->nr; ++i)
+        *array++ = sample.callchain->ips[i];
+    }
+  }
+
+  // { u32                   size;
+  //   char                  data[size];}&& PERF_SAMPLE_RAW
+  if (sample_fields & PERF_SAMPLE_RAW) {
+    uint32_t* ptr = reinterpret_cast<uint32_t*>(array);
+    *ptr++ = sample.raw_size;
+    memcpy(ptr, sample.raw_data, sample.raw_size);
+
+    // Update the data read pointer after aligning to the next 64 bytes.
+    int num_bytes = AlignSize(sizeof(sample.raw_size) + sample.raw_size,
+                              sizeof(uint64_t));
+    array += num_bytes / sizeof(uint64_t);
+  }
+
+  // { u64                   nr;
+  //   { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
+  if (sample_fields & PERF_SAMPLE_BRANCH_STACK) {
+    if (!sample.branch_stack) {
+      LOG(ERROR) << "Expecting branch stack data, but none was found.";
+    } else {
+      *array++ = sample.branch_stack->nr;
+      for (size_t i = 0; i < sample.branch_stack->nr; ++i) {
+        *array++ = sample.branch_stack->entries[i].from;
+        *array++ = sample.branch_stack->entries[i].to;
+        memcpy(array++, &sample.branch_stack->entries[i].flags,
+               sizeof(uint64_t));
+      }
+    }
+  }
+
+  //
+  // Unsupported sample types.
+  //
+  CHECK(!(sample_fields & PERF_SAMPLE_STACK_USER|PERF_SAMPLE_REGS_USER));
+
+  return (array - initial_array_ptr) * sizeof(uint64_t);
+}
+
+}  // namespace
+
+PerfReader::~PerfReader() {
+  // Free allocated memory.
+  for (size_t i = 0; i < build_id_events_.size(); ++i)
+    if (build_id_events_[i])
+      free(build_id_events_[i]);
+}
+
+void PerfReader::PerfizeBuildIDString(string* build_id) {
+  build_id->resize(kBuildIDStringLength, '0');
+}
+
+void PerfReader::UnperfizeBuildIDString(string* build_id) {
+  const size_t kPaddingSize = 8;
+  const string kBuildIDPadding = string(kPaddingSize, '0');
+
+  // Remove kBuildIDPadding from the end of build_id until we cannot remove any
+  // more, or removing more would cause the build id to be empty.
+  while (build_id->size() > kPaddingSize &&
+         build_id->substr(build_id->size() - kPaddingSize) == kBuildIDPadding) {
+    build_id->resize(build_id->size() - kPaddingSize);
+  }
+}
+
+bool PerfReader::ReadFile(const string& filename) {
+  std::vector<char> data;
+  if (!ReadFileToData(filename, &data))
+    return false;
+  return ReadFromVector(data);
+}
+
+bool PerfReader::ReadFromVector(const std::vector<char>& data) {
+  return ReadFromPointer(&data[0], data.size());
+}
+
+bool PerfReader::ReadFromString(const string& str) {
+  return ReadFromPointer(str.c_str(), str.size());
+}
+
+bool PerfReader::ReadFromPointer(const char* perf_data, size_t size) {
+  const ConstBufferWithSize data = { perf_data, size };
+
+  if (data.size == 0)
+    return false;
+  if (!ReadHeader(data))
+    return false;
+
+  // Check if it is normal perf data.
+  if (header_.size == sizeof(header_)) {
+    DLOG(INFO) << "Perf data is in normal format.";
+    metadata_mask_ = header_.adds_features[0];
+    return (ReadAttrs(data) && ReadEventTypes(data) && ReadData(data)
+            && ReadMetadata(data));
+  }
+
+  // Otherwise it is piped data.
+  LOG(ERROR) << "Internal error: no support for piped data";
+  return false;
+}
+
+bool PerfReader::Localize(
+    const std::map<string, string>& build_ids_to_filenames) {
+  std::map<string, string> perfized_build_ids_to_filenames;
+  std::map<string, string>::const_iterator it;
+  for (it = build_ids_to_filenames.begin();
+       it != build_ids_to_filenames.end();
+       ++it) {
+    string build_id = it->first;
+    PerfizeBuildIDString(&build_id);
+    perfized_build_ids_to_filenames[build_id] = it->second;
+  }
+
+  std::map<string, string> filename_map;
+  for (size_t i = 0; i < build_id_events_.size(); ++i) {
+    build_id_event* event = build_id_events_[i];
+    string build_id = HexToString(event->build_id, kBuildIDArraySize);
+    if (perfized_build_ids_to_filenames.find(build_id) ==
+        perfized_build_ids_to_filenames.end()) {
+      continue;
+    }
+
+    string new_name = perfized_build_ids_to_filenames.at(build_id);
+    filename_map[string(event->filename)] = new_name;
+    build_id_event* new_event = CreateOrUpdateBuildID("", new_name, 0, event);
+    CHECK(new_event);
+    build_id_events_[i] = new_event;
+  }
+
+  LocalizeUsingFilenames(filename_map);
+  return true;
+}
+
+bool PerfReader::LocalizeUsingFilenames(
+    const std::map<string, string>& filename_map) {
+  LocalizeMMapFilenames(filename_map);
+  for (size_t i = 0; i < build_id_events_.size(); ++i) {
+    build_id_event* event = build_id_events_[i];
+    string old_name = event->filename;
+
+    if (filename_map.find(event->filename) != filename_map.end()) {
+      const string& new_name = filename_map.at(old_name);
+      build_id_event* new_event = CreateOrUpdateBuildID("", new_name, 0, event);
+      CHECK(new_event);
+      build_id_events_[i] = new_event;
+    }
+  }
+  return true;
+}
+
+void PerfReader::GetFilenames(std::vector<string>* filenames) const {
+  std::set<string> filename_set;
+  GetFilenamesAsSet(&filename_set);
+  filenames->clear();
+  filenames->insert(filenames->begin(), filename_set.begin(),
+                    filename_set.end());
+}
+
+void PerfReader::GetFilenamesAsSet(std::set<string>* filenames) const {
+  filenames->clear();
+  for (size_t i = 0; i < events_.size(); ++i) {
+    const event_t& event = *events_[i];
+    if (event.header.type == PERF_RECORD_MMAP)
+      filenames->insert(event.mmap.filename);
+    if (event.header.type == PERF_RECORD_MMAP2)
+      filenames->insert(event.mmap2.filename);
+  }
+}
+
+void PerfReader::GetFilenamesToBuildIDs(
+    std::map<string, string>* filenames_to_build_ids) const {
+  filenames_to_build_ids->clear();
+  for (size_t i = 0; i < build_id_events_.size(); ++i) {
+    const build_id_event& event = *build_id_events_[i];
+    string build_id = HexToString(event.build_id, kBuildIDArraySize);
+    (*filenames_to_build_ids)[event.filename] = build_id;
+  }
+}
+
+bool PerfReader::IsSupportedEventType(uint32_t type) {
+  switch (type) {
+  case PERF_RECORD_SAMPLE:
+  case PERF_RECORD_MMAP:
+  case PERF_RECORD_MMAP2:
+  case PERF_RECORD_FORK:
+  case PERF_RECORD_EXIT:
+  case PERF_RECORD_COMM:
+  case PERF_RECORD_LOST:
+  case PERF_RECORD_THROTTLE:
+  case PERF_RECORD_UNTHROTTLE:
+  case SIMPLE_PERF_RECORD_KERNEL_SYMBOL:
+  case SIMPLE_PERF_RECORD_DSO:
+  case SIMPLE_PERF_RECORD_SYMBOL:
+  case SIMPLE_PERF_RECORD_SPLIT:
+  case SIMPLE_PERF_RECORD_SPLIT_END:
+    return true;
+  case PERF_RECORD_READ:
+  case PERF_RECORD_MAX:
+    return false;
+  default:
+    LOG(FATAL) << "Unknown event type " << type;
+    return false;
+  }
+}
+
+bool PerfReader::ReadPerfSampleInfo(const event_t& event,
+                                    struct perf_sample* sample) const {
+  CHECK(sample);
+
+  if (!IsSupportedEventType(event.header.type)) {
+    LOG(ERROR) << "Unsupported event type " << event.header.type;
+    return false;
+  }
+
+  // We want to completely ignore these records
+  if (event.header.type == SIMPLE_PERF_RECORD_KERNEL_SYMBOL ||
+      event.header.type == SIMPLE_PERF_RECORD_DSO ||
+      event.header.type == SIMPLE_PERF_RECORD_SYMBOL ||
+      event.header.type == SIMPLE_PERF_RECORD_SPLIT ||
+      event.header.type == SIMPLE_PERF_RECORD_SPLIT_END)
+    return true;
+
+  uint64_t sample_format = GetSampleFieldsForEventType(event.header.type,
+                                                       sample_type_);
+  uint64_t offset = GetPerfSampleDataOffset(event);
+  size_t size_read = ReadPerfSampleFromData(
+      static_cast<perf_event_type>(event.header.type),
+      reinterpret_cast<const uint64_t*>(&event) + offset / sizeof(uint64_t),
+      sample_format,
+      read_format_,
+      is_cross_endian_,
+      attrs_[0].attr,
+      attrs_.size(),
+      sample);
+
+  size_t expected_size = event.header.size - offset;
+  if (size_read != expected_size) {
+    LOG(ERROR) << "Read " << size_read << " bytes, expected "
+               << expected_size << " bytes.";
+  }
+
+  return (size_read == expected_size);
+}
+
+bool PerfReader::WritePerfSampleInfo(const perf_sample& sample,
+                                     event_t* event) const {
+  CHECK(event);
+
+  if (!IsSupportedEventType(event->header.type)) {
+    LOG(ERROR) << "Unsupported event type " << event->header.type;
+    return false;
+  }
+
+  uint64_t sample_format = GetSampleFieldsForEventType(event->header.type,
+                                                       sample_type_);
+  uint64_t offset = GetPerfSampleDataOffset(*event);
+
+  size_t expected_size = event->header.size - offset;
+  memset(reinterpret_cast<uint8_t*>(event) + offset, 0, expected_size);
+  size_t size_written = WritePerfSampleToData(
+      static_cast<perf_event_type>(event->header.type),
+      sample,
+      sample_format,
+      read_format_,
+      reinterpret_cast<uint64_t*>(event) + offset / sizeof(uint64_t));
+  if (size_written != expected_size) {
+    LOG(ERROR) << "Wrote " << size_written << " bytes, expected "
+               << expected_size << " bytes.";
+  }
+
+  return (size_written == expected_size);
+}
+
+bool PerfReader::ReadHeader(const ConstBufferWithSize& data) {
+  CheckNoEventHeaderPadding();
+  size_t offset = 0;
+  if (!ReadDataFromBuffer(data, sizeof(piped_header_), "header magic",
+                          &offset, &piped_header_)) {
+    return false;
+  }
+  if (piped_header_.magic != kPerfMagic &&
+      piped_header_.magic != bswap_64(kPerfMagic)) {
+    LOG(ERROR) << "Read wrong magic. Expected: 0x" << std::hex << kPerfMagic
+               << " or 0x" << std::hex << bswap_64(kPerfMagic)
+               << " Got: 0x" << std::hex << piped_header_.magic;
+    return false;
+  }
+  is_cross_endian_ = (piped_header_.magic != kPerfMagic);
+  if (is_cross_endian_)
+    ByteSwap(&piped_header_.size);
+
+  // Header can be a piped header.
+  if (piped_header_.size == sizeof(piped_header_))
+    return true;
+
+  // Re-read full header
+  offset = 0;
+  if (!ReadDataFromBuffer(data, sizeof(header_), "header data",
+                          &offset, &header_)) {
+    return false;
+  }
+  if (is_cross_endian_)
+    ByteSwap(&header_.size);
+
+  DLOG(INFO) << "event_types.size: " << header_.event_types.size;
+  DLOG(INFO) << "event_types.offset: " << header_.event_types.offset;
+
+  return true;
+}
+
+bool PerfReader::ReadAttrs(const ConstBufferWithSize& data) {
+  size_t num_attrs = header_.attrs.size / header_.attr_size;
+  size_t offset = header_.attrs.offset;
+  for (size_t i = 0; i < num_attrs; i++) {
+    if (!ReadAttr(data, &offset))
+      return false;
+  }
+  return true;
+}
+
+bool PerfReader::ReadAttr(const ConstBufferWithSize& data, size_t* offset) {
+  PerfFileAttr attr;
+  if (!ReadEventAttr(data, offset, &attr.attr))
+    return false;
+
+  perf_file_section ids;
+  if (!ReadDataFromBuffer(data, sizeof(ids), "ID section info", offset, &ids))
+    return false;
+  if (is_cross_endian_) {
+    ByteSwap(&ids.offset);
+    ByteSwap(&ids.size);
+  }
+
+  size_t num_ids = ids.size / sizeof(decltype(attr.ids)::value_type);
+  // Convert the offset from u64 to size_t.
+  size_t ids_offset = ids.offset;
+  if (!ReadUniqueIDs(data, num_ids, &ids_offset, &attr.ids))
+    return false;
+  attrs_.push_back(attr);
+  return true;
+}
+
+u32 PerfReader::ReadPerfEventAttrSize(const ConstBufferWithSize& data,
+                                      size_t attr_offset) {
+  static_assert(std::is_same<decltype(perf_event_attr::size), u32>::value,
+                "ReadPerfEventAttrSize return type should match "
+                "perf_event_attr.size");
+  u32 attr_size;
+  size_t attr_size_offset = attr_offset + offsetof(perf_event_attr, size);
+  if (!ReadDataFromBuffer(data, sizeof(perf_event_attr::size),
+                          "attr.size", &attr_size_offset, &attr_size)) {
+    return kuint32max;
+  }
+  return MaybeSwap(attr_size, is_cross_endian_);
+}
+
+bool PerfReader::ReadEventAttr(const ConstBufferWithSize& data, size_t* offset,
+                               perf_event_attr* attr) {
+  CheckNoPerfEventAttrPadding();
+
+  std::memset(attr, 0, sizeof(*attr));
+  //*attr = {0};
+
+  // read just size first
+  u32 attr_size = ReadPerfEventAttrSize(data, *offset);
+  if (attr_size == kuint32max) {
+    return false;
+  }
+
+  // now read the the struct.
+  if (!ReadDataFromBuffer(data, attr_size, "attribute", offset,
+                          reinterpret_cast<char*>(attr))) {
+    return false;
+  }
+
+  if (is_cross_endian_) {
+    // Depending on attr->size, some of these might not have actually been
+    // read. This is okay: they are zero.
+    ByteSwap(&attr->type);
+    ByteSwap(&attr->size);
+    ByteSwap(&attr->config);
+    ByteSwap(&attr->sample_period);
+    ByteSwap(&attr->sample_type);
+    ByteSwap(&attr->read_format);
+
+    // NB: This will also reverse precise_ip : 2 as if it was two fields:
+    auto *const bitfield_start = &attr->read_format + 1;
+    SwapBitfieldOfBits(reinterpret_cast<u8*>(bitfield_start),
+                       sizeof(u64));
+    // ... So swap it back:
+    const auto tmp = attr->precise_ip;
+    attr->precise_ip = (tmp & 0x2) >> 1 | (tmp & 0x1) << 1;
+
+    ByteSwap(&attr->wakeup_events);  // union with wakeup_watermark
+    ByteSwap(&attr->bp_type);
+    ByteSwap(&attr->bp_addr);        // union with config1
+    ByteSwap(&attr->bp_len);         // union with config2
+    ByteSwap(&attr->branch_sample_type);
+    ByteSwap(&attr->sample_regs_user);
+    ByteSwap(&attr->sample_stack_user);
+  }
+
+  CHECK_EQ(attr_size, attr->size);
+  // The actual perf_event_attr data size might be different from the size of
+  // the struct definition.  Check against perf_event_attr's |size| field.
+  attr->size = sizeof(*attr);
+
+  // Assign sample type if it hasn't been assigned, otherwise make sure all
+  // subsequent attributes have the same sample type bits set.
+  if (sample_type_ == 0) {
+    sample_type_ = attr->sample_type;
+  } else {
+    CHECK_EQ(sample_type_, attr->sample_type)
+        << "Event type sample format does not match sample format of other "
+        << "event type.";
+  }
+
+  if (read_format_ == 0) {
+    read_format_ = attr->read_format;
+  } else {
+    CHECK_EQ(read_format_, attr->read_format)
+        << "Event type read format does not match read format of other event "
+        << "types.";
+  }
+
+  return true;
+}
+
+bool PerfReader::ReadUniqueIDs(const ConstBufferWithSize& data, size_t num_ids,
+                               size_t* offset, std::vector<u64>* ids) {
+  ids->resize(num_ids);
+  for (size_t j = 0; j < num_ids; j++) {
+    if (!ReadDataFromBuffer(data, sizeof(ids->at(j)), "ID", offset,
+                            &ids->at(j))) {
+      return false;
+    }
+    if (is_cross_endian_)
+      ByteSwap(&ids->at(j));
+  }
+  return true;
+}
+
+bool PerfReader::ReadEventTypes(const ConstBufferWithSize& data) {
+  size_t num_event_types = header_.event_types.size /
+      sizeof(struct perf_trace_event_type);
+  CHECK_EQ(sizeof(perf_trace_event_type) * num_event_types,
+           header_.event_types.size);
+  size_t offset = header_.event_types.offset;
+  for (size_t i = 0; i < num_event_types; ++i) {
+    if (!ReadEventType(data, &offset))
+      return false;
+  }
+  return true;
+}
+
+bool PerfReader::ReadEventType(const ConstBufferWithSize& data,
+                               size_t* offset) {
+  CheckNoEventTypePadding();
+  perf_trace_event_type type;
+  memset(&type, 0, sizeof(type));
+  if (!ReadDataFromBuffer(data, sizeof(type.event_id), "event id",
+                          offset, &type.event_id)) {
+    return false;
+  }
+  const char* event_name = reinterpret_cast<const char*>(data.ptr + *offset);
+  CHECK_GT(snprintf(type.name, sizeof(type.name), "%s", event_name), 0);
+  *offset += sizeof(type.name);
+  event_types_.push_back(type);
+  return true;
+}
+
+bool PerfReader::ReadData(const ConstBufferWithSize& data) {
+  u64 data_remaining_bytes = header_.data.size;
+  size_t offset = header_.data.offset;
+  while (data_remaining_bytes != 0) {
+    if (data.size < offset) {
+      LOG(ERROR) << "Not enough data to read a perf event.";
+      return false;
+    }
+
+    const event_t* event = reinterpret_cast<const event_t*>(data.ptr + offset);
+    if (!ReadPerfEventBlock(*event))
+      return false;
+    data_remaining_bytes -= event->header.size;
+    offset += event->header.size;
+  }
+
+  DLOG(INFO) << "Number of events stored: "<< events_.size();
+  return true;
+}
+
+bool PerfReader::ReadMetadata(const ConstBufferWithSize& data) {
+  size_t offset = header_.data.offset + header_.data.size;
+
+  for (u32 type = HEADER_FIRST_FEATURE; type != HEADER_LAST_FEATURE; ++type) {
+    if ((metadata_mask_ & (1 << type)) == 0)
+      continue;
+
+    if (data.size < offset) {
+      LOG(ERROR) << "Not enough data to read offset and size of metadata.";
+      return false;
+    }
+
+    u64 metadata_offset, metadata_size;
+    if (!ReadDataFromBuffer(data, sizeof(metadata_offset), "metadata offset",
+                            &offset, &metadata_offset) ||
+        !ReadDataFromBuffer(data, sizeof(metadata_size), "metadata size",
+                            &offset, &metadata_size)) {
+      return false;
+    }
+
+    if (data.size < metadata_offset + metadata_size) {
+      LOG(ERROR) << "Not enough data to read metadata.";
+      return false;
+    }
+
+    switch (type) {
+    case HEADER_TRACING_DATA:
+      if (!ReadTracingMetadata(data, metadata_offset, metadata_size)) {
+        return false;
+      }
+      break;
+    case HEADER_BUILD_ID:
+      if (!ReadBuildIDMetadata(data, type, metadata_offset, metadata_size))
+        return false;
+      break;
+    case HEADER_HOSTNAME:
+    case HEADER_OSRELEASE:
+    case HEADER_VERSION:
+    case HEADER_ARCH:
+    case HEADER_CPUDESC:
+    case HEADER_CPUID:
+    case HEADER_CMDLINE:
+      if (!ReadStringMetadata(data, type, metadata_offset, metadata_size))
+        return false;
+      break;
+    case HEADER_NRCPUS:
+      if (!ReadUint32Metadata(data, type, metadata_offset, metadata_size))
+        return false;
+      break;
+    case HEADER_TOTAL_MEM:
+      if (!ReadUint64Metadata(data, type, metadata_offset, metadata_size))
+        return false;
+      break;
+    case HEADER_EVENT_DESC:
+      break;
+    case HEADER_CPU_TOPOLOGY:
+      if (!ReadCPUTopologyMetadata(data, type, metadata_offset, metadata_size))
+        return false;
+      break;
+    case HEADER_NUMA_TOPOLOGY:
+      if (!ReadNUMATopologyMetadata(data, type, metadata_offset, metadata_size))
+        return false;
+      break;
+    case HEADER_PMU_MAPPINGS:
+      // ignore for now
+      continue;
+      break;
+    case HEADER_BRANCH_STACK:
+      continue;
+    default: LOG(INFO) << "Unsupported metadata type: " << type;
+      break;
+    }
+  }
+
+  // Event type events are optional in some newer versions of perf. They
+  // contain the same information that is already in |attrs_|. Make sure the
+  // number of event types matches the number of attrs, but only if there are
+  // event type events present.
+  if (event_types_.size() > 0) {
+    if (event_types_.size() != attrs_.size()) {
+      LOG(ERROR) << "Mismatch between number of event type events and attr "
+                 << "events: " << event_types_.size() << " vs "
+                 << attrs_.size();
+      return false;
+    }
+    metadata_mask_ |= (1 << HEADER_EVENT_DESC);
+  }
+  return true;
+}
+
+bool PerfReader::ReadBuildIDMetadata(const ConstBufferWithSize& data, u32 /*type*/,
+                                     size_t offset, size_t size) {
+  CheckNoBuildIDEventPadding();
+  while (size > 0) {
+    // Make sure there is enough data for everything but the filename.
+    if (data.size < offset + sizeof(build_id_event) / sizeof(*data.ptr)) {
+      LOG(ERROR) << "Not enough bytes to read build id event";
+      return false;
+    }
+
+    const build_id_event* temp_ptr =
+        reinterpret_cast<const build_id_event*>(data.ptr + offset);
+    u16 event_size = temp_ptr->header.size;
+    if (is_cross_endian_)
+      ByteSwap(&event_size);
+
+    // Make sure there is enough data for the rest of the event.
+    if (data.size < offset + event_size / sizeof(*data.ptr)) {
+      LOG(ERROR) << "Not enough bytes to read build id event";
+      return false;
+    }
+
+    // Allocate memory for the event and copy over the bytes.
+    build_id_event* event = CallocMemoryForBuildID(event_size);
+    if (!ReadDataFromBuffer(data, event_size, "build id event",
+                            &offset, event)) {
+      return false;
+    }
+    if (is_cross_endian_) {
+      ByteSwap(&event->header.type);
+      ByteSwap(&event->header.misc);
+      ByteSwap(&event->header.size);
+      ByteSwap(&event->pid);
+    }
+    size -= event_size;
+
+    // Perf tends to use more space than necessary, so fix the size.
+    event->header.size =
+        sizeof(*event) + GetUint64AlignedStringLength(event->filename);
+    build_id_events_.push_back(event);
+  }
+
+  return true;
+}
+
+bool PerfReader::ReadStringMetadata(const ConstBufferWithSize& data, u32 type,
+                                    size_t offset, size_t size) {
+  PerfStringMetadata str_data;
+  str_data.type = type;
+
+  size_t start_offset = offset;
+  // Skip the number of string data if it is present.
+  if (NeedsNumberOfStringData(type))
+    offset += sizeof(num_string_data_type) / sizeof(*data.ptr);
+
+  while ((offset - start_offset) < size) {
+    CStringWithLength single_string;
+    if (!ReadStringFromBuffer(data, is_cross_endian_, &offset, &single_string))
+      return false;
+    str_data.data.push_back(single_string);
+  }
+
+  string_metadata_.push_back(str_data);
+  return true;
+}
+
+bool PerfReader::ReadUint32Metadata(const ConstBufferWithSize& data, u32 type,
+                                    size_t offset, size_t size) {
+  PerfUint32Metadata uint32_data;
+  uint32_data.type = type;
+
+  size_t start_offset = offset;
+  while (size > offset - start_offset) {
+    uint32_t item;
+    if (!ReadDataFromBuffer(data, sizeof(item), "uint32_t data", &offset,
+                            &item))
+      return false;
+
+    if (is_cross_endian_)
+      ByteSwap(&item);
+
+    uint32_data.data.push_back(item);
+  }
+
+  uint32_metadata_.push_back(uint32_data);
+  return true;
+}
+
+bool PerfReader::ReadUint64Metadata(const ConstBufferWithSize& data, u32 type,
+                                    size_t offset, size_t size) {
+  PerfUint64Metadata uint64_data;
+  uint64_data.type = type;
+
+  size_t start_offset = offset;
+  while (size > offset - start_offset) {
+    uint64_t item;
+    if (!ReadDataFromBuffer(data, sizeof(item), "uint64_t data", &offset,
+                            &item))
+      return false;
+
+    if (is_cross_endian_)
+      ByteSwap(&item);
+
+    uint64_data.data.push_back(item);
+  }
+
+  uint64_metadata_.push_back(uint64_data);
+  return true;
+}
+
+bool PerfReader::ReadCPUTopologyMetadata(
+    const ConstBufferWithSize& data, u32 /*type*/, size_t offset, size_t /*size*/) {
+  num_siblings_type num_core_siblings;
+  if (!ReadDataFromBuffer(data, sizeof(num_core_siblings), "num cores",
+                          &offset, &num_core_siblings)) {
+    return false;
+  }
+  if (is_cross_endian_)
+    ByteSwap(&num_core_siblings);
+
+  cpu_topology_.core_siblings.resize(num_core_siblings);
+  for (size_t i = 0; i < num_core_siblings; ++i) {
+    if (!ReadStringFromBuffer(data, is_cross_endian_, &offset,
+                              &cpu_topology_.core_siblings[i])) {
+      return false;
+    }
+  }
+
+  num_siblings_type num_thread_siblings;
+  if (!ReadDataFromBuffer(data, sizeof(num_thread_siblings), "num threads",
+                          &offset, &num_thread_siblings)) {
+    return false;
+  }
+  if (is_cross_endian_)
+    ByteSwap(&num_thread_siblings);
+
+  cpu_topology_.thread_siblings.resize(num_thread_siblings);
+  for (size_t i = 0; i < num_thread_siblings; ++i) {
+    if (!ReadStringFromBuffer(data, is_cross_endian_, &offset,
+                              &cpu_topology_.thread_siblings[i])) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+bool PerfReader::ReadNUMATopologyMetadata(
+    const ConstBufferWithSize& data, u32 /*type*/, size_t offset, size_t /*size*/) {
+  numa_topology_num_nodes_type num_nodes;
+  if (!ReadDataFromBuffer(data, sizeof(num_nodes), "num nodes",
+                          &offset, &num_nodes)) {
+    return false;
+  }
+  if (is_cross_endian_)
+    ByteSwap(&num_nodes);
+
+  for (size_t i = 0; i < num_nodes; ++i) {
+    PerfNodeTopologyMetadata node;
+    if (!ReadDataFromBuffer(data, sizeof(node.id), "node id",
+                            &offset, &node.id) ||
+        !ReadDataFromBuffer(data, sizeof(node.total_memory),
+                            "node total memory", &offset,
+                            &node.total_memory) ||
+        !ReadDataFromBuffer(data, sizeof(node.free_memory),
+                            "node free memory", &offset, &node.free_memory) ||
+        !ReadStringFromBuffer(data, is_cross_endian_, &offset,
+                              &node.cpu_list)) {
+      return false;
+    }
+    if (is_cross_endian_) {
+      ByteSwap(&node.id);
+      ByteSwap(&node.total_memory);
+      ByteSwap(&node.free_memory);
+    }
+    numa_topology_.push_back(node);
+  }
+  return true;
+}
+
+bool PerfReader::ReadTracingMetadata(
+    const ConstBufferWithSize& data, size_t offset, size_t size) {
+  size_t tracing_data_offset = offset;
+  tracing_data_.resize(size);
+  return ReadDataFromBuffer(data, tracing_data_.size(), "tracing_data",
+                            &tracing_data_offset, tracing_data_.data());
+}
+
+bool PerfReader::ReadTracingMetadataEvent(
+    const ConstBufferWithSize& data, size_t offset) {
+  // TRACING_DATA's header.size is a lie. It is the size of only the event
+  // struct. The size of the data is in the event struct, and followed
+  // immediately by the tracing header data.
+
+  // Make a copy of the event (but not the tracing data)
+  tracing_data_event tracing_event =
+      *reinterpret_cast<const tracing_data_event*>(data.ptr + offset);
+
+  if (is_cross_endian_) {
+    ByteSwap(&tracing_event.header.type);
+    ByteSwap(&tracing_event.header.misc);
+    ByteSwap(&tracing_event.header.size);
+    ByteSwap(&tracing_event.size);
+  }
+
+  return ReadTracingMetadata(data, offset + tracing_event.header.size,
+                             tracing_event.size);
+}
+
+bool PerfReader::ReadAttrEventBlock(const ConstBufferWithSize& data,
+                                    size_t offset, size_t size) {
+  const size_t initial_offset = offset;
+  PerfFileAttr attr;
+  if (!ReadEventAttr(data, &offset, &attr.attr))
+    return false;
+
+  // attr.attr.size has been upgraded to the current size of perf_event_attr.
+  const size_t actual_attr_size = offset - initial_offset;
+
+  const size_t num_ids =
+      (size - actual_attr_size) / sizeof(decltype(attr.ids)::value_type);
+  if (!ReadUniqueIDs(data, num_ids, &offset, &attr.ids))
+    return false;
+
+  // Event types are found many times in the perf data file.
+  // Only add this event type if it is not already present.
+  for (size_t i = 0; i < attrs_.size(); ++i) {
+    if (attrs_[i].ids[0] == attr.ids[0])
+      return true;
+  }
+  attrs_.push_back(attr);
+  return true;
+}
+
+// When this method is called, |event| is a reference to the bytes in the data
+// vector that contains the entire perf.data file.  As a result, we need to be
+// careful to only copy event.header.size bytes.
+// In particular, something like
+// event_t event_copy = event;
+// would be bad, because it would read past the end of the event, and possibly
+// pass the end of the data vector as well.
+bool PerfReader::ReadPerfEventBlock(const event_t& event) {
+  u16 size = event.header.size;
+  if (is_cross_endian_)
+    ByteSwap(&size);
+
+  //
+  // Upstream linux perf limits the size of an event record to 2^16 bytes,
+  // however simpleperf includes extensions to support larger (2^32) record
+  // sizes via a split record scheme (the larger records are split up
+  // into chunks and then embedded into a series of SIMPLE_PERF_RECORD_SPLIT
+  // records followed by a terminating SIMPLE_PERF_RECORD_SPLIT_END record.
+  // At the moment none of the larger records are of interest to perfprofd, so
+  // the main thing we're doing here is ignoring/bypassing them.
+  //
+  if (event.header.type == SIMPLE_PERF_RECORD_KERNEL_SYMBOL ||
+      event.header.type == SIMPLE_PERF_RECORD_DSO ||
+      event.header.type == SIMPLE_PERF_RECORD_SYMBOL ||
+      event.header.type == SIMPLE_PERF_RECORD_SPLIT ||
+      event.header.type == SIMPLE_PERF_RECORD_SPLIT_END)
+    size = sizeof(event_t);
+  else if (size > sizeof(event_t)) {
+    LOG(INFO) << "Data size: " << size << " sizeof(event_t): "
+              << sizeof(event_t);
+    return false;
+  }
+
+  // Copy only the part of the event that is needed.
+  malloced_unique_ptr<event_t> event_copy(CallocMemoryForEvent(size));
+  memcpy(event_copy.get(), &event, size);
+  if (is_cross_endian_) {
+    ByteSwap(&event_copy->header.type);
+    ByteSwap(&event_copy->header.misc);
+    ByteSwap(&event_copy->header.size);
+  }
+
+  uint32_t type = event_copy->header.type;
+  if (is_cross_endian_) {
+    switch (type) {
+    case PERF_RECORD_SAMPLE:
+      break;
+    case PERF_RECORD_MMAP:
+      ByteSwap(&event_copy->mmap.pid);
+      ByteSwap(&event_copy->mmap.tid);
+      ByteSwap(&event_copy->mmap.start);
+      ByteSwap(&event_copy->mmap.len);
+      ByteSwap(&event_copy->mmap.pgoff);
+      break;
+    case PERF_RECORD_MMAP2:
+      ByteSwap(&event_copy->mmap2.pid);
+      ByteSwap(&event_copy->mmap2.tid);
+      ByteSwap(&event_copy->mmap2.start);
+      ByteSwap(&event_copy->mmap2.len);
+      ByteSwap(&event_copy->mmap2.pgoff);
+      ByteSwap(&event_copy->mmap2.maj);
+      ByteSwap(&event_copy->mmap2.min);
+      ByteSwap(&event_copy->mmap2.ino);
+      ByteSwap(&event_copy->mmap2.ino_generation);
+      break;
+    case PERF_RECORD_FORK:
+    case PERF_RECORD_EXIT:
+      ByteSwap(&event_copy->fork.pid);
+      ByteSwap(&event_copy->fork.tid);
+      ByteSwap(&event_copy->fork.ppid);
+      ByteSwap(&event_copy->fork.ptid);
+      break;
+    case PERF_RECORD_COMM:
+      ByteSwap(&event_copy->comm.pid);
+      ByteSwap(&event_copy->comm.tid);
+      break;
+    case PERF_RECORD_LOST:
+      ByteSwap(&event_copy->lost.id);
+      ByteSwap(&event_copy->lost.lost);
+      break;
+    case PERF_RECORD_READ:
+      ByteSwap(&event_copy->read.pid);
+      ByteSwap(&event_copy->read.tid);
+      ByteSwap(&event_copy->read.value);
+      ByteSwap(&event_copy->read.time_enabled);
+      ByteSwap(&event_copy->read.time_running);
+      ByteSwap(&event_copy->read.id);
+      break;
+    case SIMPLE_PERF_RECORD_KERNEL_SYMBOL:
+      break;
+    default:
+      LOG(FATAL) << "Unknown event type: " << type;
+    }
+  }
+
+  events_.push_back(std::move(event_copy));
+
+  return true;
+}
+
+size_t PerfReader::GetNumMetadata() const {
+  // This is just the number of 1s in the binary representation of the metadata
+  // mask.  However, make sure to only use supported metadata, and don't include
+  // branch stack (since it doesn't have an entry in the metadata section).
+  uint64_t new_mask = metadata_mask_;
+  new_mask &= kSupportedMetadataMask & ~(1 << HEADER_BRANCH_STACK);
+  std::bitset<sizeof(new_mask) * CHAR_BIT> bits(new_mask);
+  return bits.count();
+}
+
+size_t PerfReader::GetEventDescMetadataSize() const {
+  size_t size = 0;
+  if (event_types_.empty()) {
+    return size;
+  }
+  if (metadata_mask_ & (1 << HEADER_EVENT_DESC)) {
+    if (event_types_.size() > 0 && event_types_.size() != attrs_.size()) {
+      LOG(ERROR) << "Mismatch between number of event type events and attr "
+                 << "events: " << event_types_.size() << " vs "
+                 << attrs_.size();
+      return size;
+    }
+    size += sizeof(event_desc_num_events) + sizeof(event_desc_attr_size);
+    CStringWithLength dummy;
+    for (size_t i = 0; i < attrs_.size(); ++i) {
+      size += sizeof(perf_event_attr) + sizeof(dummy.len);
+      size += sizeof(event_desc_num_unique_ids);
+      size += GetUint64AlignedStringLength(event_types_[i].name) * sizeof(char);
+      size += attrs_[i].ids.size() * sizeof(attrs_[i].ids[0]);
+    }
+  }
+  return size;
+}
+
+size_t PerfReader::GetBuildIDMetadataSize() const {
+  size_t size = 0;
+  for (size_t i = 0; i < build_id_events_.size(); ++i)
+    size += build_id_events_[i]->header.size;
+  return size;
+}
+
+size_t PerfReader::GetStringMetadataSize() const {
+  size_t size = 0;
+  for (size_t i = 0; i < string_metadata_.size(); ++i) {
+    const PerfStringMetadata& metadata = string_metadata_[i];
+    if (NeedsNumberOfStringData(metadata.type))
+      size += sizeof(num_string_data_type);
+
+    for (size_t j = 0; j < metadata.data.size(); ++j) {
+      const CStringWithLength& str = metadata.data[j];
+      size += sizeof(str.len) + (str.len * sizeof(char));
+    }
+  }
+  return size;
+}
+
+size_t PerfReader::GetUint32MetadataSize() const {
+  size_t size = 0;
+  for (size_t i = 0; i < uint32_metadata_.size(); ++i) {
+    const PerfUint32Metadata& metadata = uint32_metadata_[i];
+    size += metadata.data.size() * sizeof(metadata.data[0]);
+  }
+  return size;
+}
+
+size_t PerfReader::GetUint64MetadataSize() const {
+  size_t size = 0;
+  for (size_t i = 0; i < uint64_metadata_.size(); ++i) {
+    const PerfUint64Metadata& metadata = uint64_metadata_[i];
+    size += metadata.data.size() * sizeof(metadata.data[0]);
+  }
+  return size;
+}
+
+size_t PerfReader::GetCPUTopologyMetadataSize() const {
+  // Core siblings.
+  size_t size = sizeof(num_siblings_type);
+  for (size_t i = 0; i < cpu_topology_.core_siblings.size(); ++i) {
+    const CStringWithLength& str = cpu_topology_.core_siblings[i];
+    size += sizeof(str.len) + (str.len * sizeof(char));
+  }
+
+  // Thread siblings.
+  size += sizeof(num_siblings_type);
+  for (size_t i = 0; i < cpu_topology_.thread_siblings.size(); ++i) {
+    const CStringWithLength& str = cpu_topology_.thread_siblings[i];
+    size += sizeof(str.len) + (str.len * sizeof(char));
+  }
+
+  return size;
+}
+
+size_t PerfReader::GetNUMATopologyMetadataSize() const {
+  size_t size = sizeof(numa_topology_num_nodes_type);
+  for (size_t i = 0; i < numa_topology_.size(); ++i) {
+    const PerfNodeTopologyMetadata& node = numa_topology_[i];
+    size += sizeof(node.id);
+    size += sizeof(node.total_memory) + sizeof(node.free_memory);
+    size += sizeof(node.cpu_list.len) + node.cpu_list.len * sizeof(char);
+  }
+  return size;
+}
+
+bool PerfReader::NeedsNumberOfStringData(u32 type) const {
+  return type == HEADER_CMDLINE;
+}
+
+bool PerfReader::LocalizeMMapFilenames(
+    const std::map<string, string>& filename_map) {
+  // Search for mmap/mmap2 events for which the filename needs to be updated.
+  for (size_t i = 0; i < events_.size(); ++i) {
+    string filename;
+    size_t size_of_fixed_event_parts;
+    event_t* event = events_[i].get();
+    if (event->header.type == PERF_RECORD_MMAP) {
+      filename = string(event->mmap.filename);
+      size_of_fixed_event_parts =
+          sizeof(event->mmap) - sizeof(event->mmap.filename);
+    } else if (event->header.type == PERF_RECORD_MMAP2) {
+      filename = string(event->mmap2.filename);
+      size_of_fixed_event_parts =
+          sizeof(event->mmap2) - sizeof(event->mmap2.filename);
+    } else {
+      continue;
+    }
+
+    const auto it = filename_map.find(filename);
+    if (it == filename_map.end())  // not found
+      continue;
+
+    const string& new_filename = it->second;
+    size_t old_len = GetUint64AlignedStringLength(filename);
+    size_t new_len = GetUint64AlignedStringLength(new_filename);
+    size_t old_offset = GetPerfSampleDataOffset(*event);
+    size_t sample_size = event->header.size - old_offset;
+
+    int size_change = new_len - old_len;
+    size_t new_size = event->header.size + size_change;
+    size_t new_offset = old_offset + size_change;
+
+    if (size_change > 0) {
+      // Allocate memory for a new event.
+      event_t* old_event = event;
+      malloced_unique_ptr<event_t> new_event(CallocMemoryForEvent(new_size));
+
+      // Copy over everything except filename and sample info.
+      memcpy(new_event.get(), old_event, size_of_fixed_event_parts);
+
+      // Copy over the sample info to the correct location.
+      char* old_addr = reinterpret_cast<char*>(old_event);
+      char* new_addr = reinterpret_cast<char*>(new_event.get());
+      memcpy(new_addr + new_offset, old_addr + old_offset, sample_size);
+
+      events_[i] = std::move(new_event);
+      event = events_[i].get();
+    } else if (size_change < 0) {
+      // Move the perf sample data to its new location.
+      // Since source and dest could overlap, use memmove instead of memcpy.
+      char* start_addr = reinterpret_cast<char*>(event);
+      memmove(start_addr + new_offset, start_addr + old_offset, sample_size);
+    }
+
+    // Copy over the new filename and fix the size of the event.
+    char *event_filename = nullptr;
+    if (event->header.type == PERF_RECORD_MMAP) {
+      event_filename = event->mmap.filename;
+    } else if (event->header.type == PERF_RECORD_MMAP2) {
+      event_filename = event->mmap2.filename;
+    } else {
+      LOG(FATAL) << "Unexpected event type";  // Impossible
+    }
+    CHECK_GT(snprintf(event_filename, new_filename.size() + 1, "%s",
+                      new_filename.c_str()),
+             0);
+    event->header.size = new_size;
+  }
+
+  return true;
+}
+
+}  // namespace quipper
diff --git a/perfprofd/quipper/perf_reader.h b/perfprofd/quipper/perf_reader.h
new file mode 100644
index 0000000..8216372
--- /dev/null
+++ b/perfprofd/quipper/perf_reader.h
@@ -0,0 +1,296 @@
+// Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROMIUMOS_WIDE_PROFILING_PERF_READER_H_
+#define CHROMIUMOS_WIDE_PROFILING_PERF_READER_H_
+
+#include <stdint.h>
+
+#include <map>
+#include <set>
+#include <string>
+#include <type_traits>
+#include <vector>
+
+#include "base/macros.h"
+
+#include "perf_internals.h"
+#include "quipper_string.h"
+#include "perf_utils.h"
+
+namespace quipper {
+
+struct PerfFileAttr {
+  struct perf_event_attr attr;
+  std::vector<u64> ids;
+};
+
+// Based on code in tools/perf/util/header.c, the metadata are of the following
+// formats:
+
+// Based on kernel/perf_internals.h
+const size_t kBuildIDArraySize = 20;
+const size_t kBuildIDStringLength = kBuildIDArraySize * 2;
+
+struct CStringWithLength {
+  u32 len;
+  string str;
+};
+
+struct PerfStringMetadata {
+  u32 type;
+  std::vector<CStringWithLength> data;
+};
+
+struct PerfUint32Metadata {
+  u32 type;
+  std::vector<uint32_t> data;
+};
+
+struct PerfUint64Metadata {
+  u32 type;
+  std::vector<uint64_t> data;
+};
+
+typedef u32 num_siblings_type;
+
+struct PerfCPUTopologyMetadata {
+  std::vector<CStringWithLength> core_siblings;
+  std::vector<CStringWithLength> thread_siblings;
+};
+
+struct PerfNodeTopologyMetadata {
+  u32 id;
+  u64 total_memory;
+  u64 free_memory;
+  CStringWithLength cpu_list;
+};
+
+struct BufferWithSize;
+struct ConstBufferWithSize;
+
+class PerfReader {
+ public:
+  PerfReader() : sample_type_(0),
+                 read_format_(0),
+                 is_cross_endian_(0) {}
+  ~PerfReader();
+
+  // Makes |build_id| fit the perf format, by either truncating it or adding
+  // zeros to the end so that it has length kBuildIDStringLength.
+  static void PerfizeBuildIDString(string* build_id);
+
+  // Changes |build_id| to the best guess of what the build id was before going
+  // through perf.  Specifically, it keeps removing trailing sequences of four
+  // zero bytes (or eight '0' characters) until there are no more such
+  // sequences, or the build id would be empty if the process were repeated.
+  static void UnperfizeBuildIDString(string* build_id);
+
+  bool ReadFile(const string& filename);
+  bool ReadFromVector(const std::vector<char>& data);
+  bool ReadFromString(const string& str);
+  bool ReadFromPointer(const char* perf_data, size_t size);
+
+  // TODO(rohinmshah): GetSize should not use RegenerateHeader (so that it can
+  // be const).  Ideally, RegenerateHeader would be deleted and instead of
+  // having out_header_ as an instance variable, it would be computed
+  // dynamically whenever needed.
+
+  // Returns the size in bytes that would be written by any of the methods that
+  // write the entire perf data file (WriteFile, WriteToPointer, etc).
+  size_t GetSize();
+
+  bool WriteFile(const string& filename);
+  bool WriteToVector(std::vector<char>* data);
+  bool WriteToString(string* str);
+  bool WriteToPointer(char* buffer, size_t size);
+
+  bool RegenerateHeader();
+
+  // Stores the mapping from filenames to build ids in build_id_events_.
+  // Returns true on success.
+  // Note: If |filenames_to_build_ids| contains a mapping for a filename for
+  // which there is already a build_id_event in build_id_events_, a duplicate
+  // build_id_event will be created, and the old build_id_event will NOT be
+  // deleted.
+  bool InjectBuildIDs(const std::map<string, string>& filenames_to_build_ids);
+
+  // Replaces existing filenames with filenames from |build_ids_to_filenames|
+  // by joining on build ids.  If a build id in |build_ids_to_filenames| is not
+  // present in this parser, it is ignored.
+  bool Localize(const std::map<string, string>& build_ids_to_filenames);
+
+  // Same as Localize, but joins on filenames instead of build ids.
+  bool LocalizeUsingFilenames(const std::map<string, string>& filename_map);
+
+  // Stores a list of unique filenames found in MMAP/MMAP2 events into
+  // |filenames|.  Any existing data in |filenames| will be lost.
+  void GetFilenames(std::vector<string>* filenames) const;
+  void GetFilenamesAsSet(std::set<string>* filenames) const;
+
+  // Uses build id events to populate |filenames_to_build_ids|.
+  // Any existing data in |filenames_to_build_ids| will be lost.
+  // Note:  A filename returned by GetFilenames need not be present in this map,
+  // since there may be no build id event corresponding to the MMAP/MMAP2.
+  void GetFilenamesToBuildIDs(
+      std::map<string, string>* filenames_to_build_ids) const;
+
+  static bool IsSupportedEventType(uint32_t type);
+
+  // If a program using PerfReader calls events(), it could work with the
+  // resulting events by importing kernel/perf_internals.h.  This would also
+  // apply to other forms of data (attributes, event types, build ids, etc.)
+  // However, there is no easy way to work with the sample info within events.
+  // The following two methods have been added for this purpose.
+
+  // Extracts from a perf event |event| info about the perf sample that
+  // contains the event.  Stores info in |sample|.
+  bool ReadPerfSampleInfo(const event_t& event,
+                          struct perf_sample* sample) const;
+  // Writes |sample| info back to a perf event |event|.
+  bool WritePerfSampleInfo(const perf_sample& sample,
+                           event_t* event) const;
+
+  // Accessor funcs.
+  const std::vector<PerfFileAttr>& attrs() const {
+    return attrs_;
+  }
+
+  const std::vector<malloced_unique_ptr<event_t>>& events() const {
+    return events_;
+  }
+
+  const std::vector<perf_trace_event_type>& event_types() const {
+    return event_types_;
+  }
+
+  const std::vector<build_id_event*>& build_id_events() const {
+    return build_id_events_;
+  }
+
+  const std::vector<char>& tracing_data() const {
+    return tracing_data_;
+  }
+
+ protected:
+  bool ReadHeader(const ConstBufferWithSize& data);
+
+  bool ReadAttrs(const ConstBufferWithSize& data);
+  bool ReadAttr(const ConstBufferWithSize& data, size_t* offset);
+  bool ReadEventAttr(const ConstBufferWithSize& data, size_t* offset,
+                     perf_event_attr* attr);
+  bool ReadUniqueIDs(const ConstBufferWithSize& data, size_t num_ids,
+                     size_t* offset, std::vector<u64>* ids);
+
+  bool ReadEventTypes(const ConstBufferWithSize& data);
+  bool ReadEventType(const ConstBufferWithSize& data, size_t* offset);
+
+  bool ReadData(const ConstBufferWithSize& data);
+
+  // Reads metadata in normal mode.
+  bool ReadMetadata(const ConstBufferWithSize& data);
+  bool ReadTracingMetadata(const ConstBufferWithSize& data,
+                           size_t offset, size_t size);
+  bool ReadBuildIDMetadata(const ConstBufferWithSize& data, u32 type,
+                           size_t offset, size_t size);
+  bool ReadStringMetadata(const ConstBufferWithSize& data, u32 type,
+                          size_t offset, size_t size);
+  bool ReadUint32Metadata(const ConstBufferWithSize& data, u32 type,
+                          size_t offset, size_t size);
+  bool ReadUint64Metadata(const ConstBufferWithSize& data, u32 type,
+                          size_t offset, size_t size);
+  bool ReadCPUTopologyMetadata(const ConstBufferWithSize& data, u32 type,
+                               size_t offset, size_t size);
+  bool ReadNUMATopologyMetadata(const ConstBufferWithSize& data, u32 type,
+                                size_t offset, size_t size);
+
+  // Read perf data from piped perf output data.
+  bool ReadPipedData(const ConstBufferWithSize& data);
+  bool ReadTracingMetadataEvent(const ConstBufferWithSize& data, size_t offset);
+
+  // Like WriteToPointer, but does not check if the buffer is large enough.
+  bool WriteToPointerWithoutCheckingSize(char* buffer, size_t size);
+
+  bool WriteHeader(const BufferWithSize& data) const;
+  bool WriteAttrs(const BufferWithSize& data) const;
+  bool WriteEventTypes(const BufferWithSize& data) const;
+  bool WriteData(const BufferWithSize& data) const;
+  bool WriteMetadata(const BufferWithSize& data) const;
+
+  // For writing the various types of metadata.
+  bool WriteBuildIDMetadata(u32 type, size_t* offset,
+                            const BufferWithSize& data) const;
+  bool WriteStringMetadata(u32 type, size_t* offset,
+                           const BufferWithSize& data) const;
+  bool WriteUint32Metadata(u32 type, size_t* offset,
+                           const BufferWithSize& data) const;
+  bool WriteUint64Metadata(u32 type, size_t* offset,
+                           const BufferWithSize& data) const;
+  bool WriteEventDescMetadata(u32 type, size_t* offset,
+                              const BufferWithSize& data) const;
+  bool WriteCPUTopologyMetadata(u32 type, size_t* offset,
+                                const BufferWithSize& data) const;
+  bool WriteNUMATopologyMetadata(u32 type, size_t* offset,
+                                 const BufferWithSize& data) const;
+
+  // For reading event blocks within piped perf data.
+  bool ReadAttrEventBlock(const ConstBufferWithSize& data, size_t offset,
+                          size_t size);
+  bool ReadPerfEventBlock(const event_t& event);
+
+  // Returns the number of types of metadata stored.
+  size_t GetNumMetadata() const;
+
+  // For computing the sizes of the various types of metadata.
+  size_t GetBuildIDMetadataSize() const;
+  size_t GetStringMetadataSize() const;
+  size_t GetUint32MetadataSize() const;
+  size_t GetUint64MetadataSize() const;
+  size_t GetEventDescMetadataSize() const;
+  size_t GetCPUTopologyMetadataSize() const;
+  size_t GetNUMATopologyMetadataSize() const;
+
+  // Returns true if we should write the number of strings for the string
+  // metadata of type |type|.
+  bool NeedsNumberOfStringData(u32 type) const;
+
+  // Replaces existing filenames in MMAP/MMAP2 events based on |filename_map|.
+  // This method does not change |build_id_events_|.
+  bool LocalizeMMapFilenames(const std::map<string, string>& filename_map);
+
+  std::vector<PerfFileAttr> attrs_;
+  std::vector<perf_trace_event_type> event_types_;
+  std::vector<malloced_unique_ptr<event_t>> events_;
+  std::vector<build_id_event*> build_id_events_;
+  std::vector<PerfStringMetadata> string_metadata_;
+  std::vector<PerfUint32Metadata> uint32_metadata_;
+  std::vector<PerfUint64Metadata> uint64_metadata_;
+  PerfCPUTopologyMetadata cpu_topology_;
+  std::vector<PerfNodeTopologyMetadata> numa_topology_;
+  std::vector<char> tracing_data_;
+  uint64_t sample_type_;
+  uint64_t read_format_;
+  uint64_t metadata_mask_;
+
+  // Indicates that the perf data being read is from machine with a different
+  // endianness than the current machine.
+  bool is_cross_endian_;
+
+ private:
+  u32 ReadPerfEventAttrSize(const ConstBufferWithSize& data,
+                            size_t attr_offset);
+
+  // The file header is either a normal header or a piped header.
+  union {
+    struct perf_file_header header_;
+    struct perf_pipe_file_header piped_header_;
+  };
+  struct perf_file_header out_header_;
+
+  DISALLOW_COPY_AND_ASSIGN(PerfReader);
+};
+
+}  // namespace quipper
+
+#endif  // CHROMIUMOS_WIDE_PROFILING_PERF_READER_H_
diff --git a/perfprofd/quipper/perf_utils.cc b/perfprofd/quipper/perf_utils.cc
new file mode 100644
index 0000000..4f6fdc3
--- /dev/null
+++ b/perfprofd/quipper/perf_utils.cc
@@ -0,0 +1,184 @@
+// Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#define LOG_TAG "perf_reader"
+
+#include "perf_utils.h"
+
+#include <sys/stat.h>
+
+#include <cctype>
+#include <cstddef>
+#include <cstdlib>
+#include <cstring>
+#include <fstream>  // NOLINT(readability/streams)
+#include <iomanip>
+#include <sstream>
+
+#include "base/logging.h"
+#include "base/macros.h"
+
+namespace {
+
+// Number of hex digits in a byte.
+const int kNumHexDigitsInByte = 2;
+
+}  // namespace
+
+namespace quipper {
+
+event_t* CallocMemoryForEvent(size_t size) {
+  event_t* event = reinterpret_cast<event_t*>(calloc(1, size));
+  CHECK(event);
+  return event;
+}
+
+build_id_event* CallocMemoryForBuildID(size_t size) {
+  build_id_event* event = reinterpret_cast<build_id_event*>(calloc(1, size));
+  CHECK(event);
+  return event;
+}
+
+string HexToString(const u8* array, size_t length) {
+  // Convert the bytes to hex digits one at a time.
+  // There will be kNumHexDigitsInByte hex digits, and 1 char for NUL.
+  char buffer[kNumHexDigitsInByte + 1];
+  string result = "";
+  for (size_t i = 0; i < length; ++i) {
+    snprintf(buffer, sizeof(buffer), "%02x", array[i]);
+    result += buffer;
+  }
+  return result;
+}
+
+bool StringToHex(const string& str, u8* array, size_t length) {
+  const int kHexRadix = 16;
+  char* err;
+  // Loop through kNumHexDigitsInByte characters at a time (to get one byte)
+  // Stop when there are no more characters, or the array has been filled.
+  for (size_t i = 0;
+       (i + 1) * kNumHexDigitsInByte <= str.size() && i < length;
+       ++i) {
+    string one_byte = str.substr(i * kNumHexDigitsInByte, kNumHexDigitsInByte);
+    array[i] = strtol(one_byte.c_str(), &err, kHexRadix);
+    if (*err)
+      return false;
+  }
+  return true;
+}
+
+uint64_t AlignSize(uint64_t size, uint32_t align_size) {
+  return ((size + align_size - 1) / align_size) * align_size;
+}
+
+// In perf data, strings are packed into the smallest number of 8-byte blocks
+// possible, including the null terminator.
+// e.g.
+//    "0123"                ->  5 bytes -> packed into  8 bytes
+//    "0123456"             ->  8 bytes -> packed into  8 bytes
+//    "01234567"            ->  9 bytes -> packed into 16 bytes
+//    "0123456789abcd"      -> 15 bytes -> packed into 16 bytes
+//    "0123456789abcde"     -> 16 bytes -> packed into 16 bytes
+//    "0123456789abcdef"    -> 17 bytes -> packed into 24 bytes
+//
+// Returns the size of the 8-byte-aligned memory for storing |string|.
+size_t GetUint64AlignedStringLength(const string& str) {
+  return AlignSize(str.size() + 1, sizeof(uint64_t));
+}
+
+uint64_t GetSampleFieldsForEventType(uint32_t event_type,
+                                     uint64_t sample_type) {
+  uint64_t mask = kuint64max;
+  switch (event_type) {
+  case PERF_RECORD_MMAP:
+  case PERF_RECORD_LOST:
+  case PERF_RECORD_COMM:
+  case PERF_RECORD_EXIT:
+  case PERF_RECORD_THROTTLE:
+  case PERF_RECORD_UNTHROTTLE:
+  case PERF_RECORD_FORK:
+  case PERF_RECORD_READ:
+  case PERF_RECORD_MMAP2:
+    // See perf_event.h "struct" sample_id and sample_id_all.
+    mask = PERF_SAMPLE_TID | PERF_SAMPLE_TIME | PERF_SAMPLE_ID |
+           PERF_SAMPLE_STREAM_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_IDENTIFIER;
+    break;
+  case PERF_RECORD_SAMPLE:
+  case SIMPLE_PERF_RECORD_KERNEL_SYMBOL:
+    break;
+  default:
+    LOG(FATAL) << "Unknown event type " << event_type;
+  }
+  return sample_type & mask;
+}
+
+uint64_t GetPerfSampleDataOffset(const event_t& event) {
+  uint64_t offset = kuint64max;
+  switch (event.header.type) {
+  case PERF_RECORD_SAMPLE:
+    offset = offsetof(event_t, sample.array);
+    break;
+  case PERF_RECORD_MMAP:
+    offset = sizeof(event.mmap) - sizeof(event.mmap.filename) +
+             GetUint64AlignedStringLength(event.mmap.filename);
+    break;
+  case PERF_RECORD_FORK:
+  case PERF_RECORD_EXIT:
+    offset = sizeof(event.fork);
+    break;
+  case PERF_RECORD_COMM:
+    offset = sizeof(event.comm) - sizeof(event.comm.comm) +
+             GetUint64AlignedStringLength(event.comm.comm);
+    break;
+  case PERF_RECORD_LOST:
+    offset = sizeof(event.lost);
+    break;
+  case PERF_RECORD_READ:
+    offset = sizeof(event.read);
+    break;
+  case PERF_RECORD_MMAP2:
+    offset = sizeof(event.mmap2) - sizeof(event.mmap2.filename) +
+             GetUint64AlignedStringLength(event.mmap2.filename);
+    break;
+  case SIMPLE_PERF_RECORD_KERNEL_SYMBOL:
+    offset = 0;
+    break;
+  default:
+    LOG(FATAL) << "Unknown/unsupported event type " << event.header.type;
+    break;
+  }
+  // Make sure the offset was valid
+  CHECK_NE(offset, kuint64max);
+  CHECK_EQ(offset % sizeof(uint64_t), 0U);
+  return offset;
+}
+
+bool ReadFileToData(const string& filename, std::vector<char>* data) {
+  std::ifstream in(filename.c_str(), std::ios::binary);
+  if (!in.good()) {
+    LOG(ERROR) << "Failed to open file " << filename;
+    return false;
+  }
+  in.seekg(0, in.end);
+  size_t length = in.tellg();
+  in.seekg(0, in.beg);
+  data->resize(length);
+
+  in.read(&(*data)[0], length);
+
+  if (!in.good()) {
+    LOG(ERROR) << "Error reading from file " << filename;
+    return false;
+  }
+  return true;
+}
+
+bool WriteDataToFile(const std::vector<char>& data, const string& filename) {
+  std::ofstream out(filename.c_str(), std::ios::binary);
+  out.seekp(0, std::ios::beg);
+  out.write(&data[0], data.size());
+  return out.good();
+}
+
+}  // namespace quipper
diff --git a/perfprofd/quipper/perf_utils.h b/perfprofd/quipper/perf_utils.h
new file mode 100644
index 0000000..66f1d9e
--- /dev/null
+++ b/perfprofd/quipper/perf_utils.h
@@ -0,0 +1,112 @@
+// Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROMIUMOS_WIDE_PROFILING_UTILS_H_
+#define CHROMIUMOS_WIDE_PROFILING_UTILS_H_
+
+#include <stdint.h>
+#include <stdlib.h>  // for free()
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/logging.h"
+
+#include "perf_internals.h"
+#include "quipper_string.h"
+
+namespace quipper {
+
+struct FreeDeleter {
+  inline void operator()(void* pointer) {
+    free(pointer);
+  }
+};
+
+template <typename T>
+using malloced_unique_ptr = std::unique_ptr<T, FreeDeleter>;
+
+// Given a valid open file handle |fp|, returns the size of the file.
+int64_t GetFileSizeFromHandle(FILE* fp);
+
+event_t* CallocMemoryForEvent(size_t size);
+event_t* ReallocMemoryForEvent(event_t* event, size_t new_size);
+
+build_id_event* CallocMemoryForBuildID(size_t size);
+
+bool FileToBuffer(const string& filename, std::vector<char>* contents);
+
+template <typename CharContainer>
+bool BufferToFile(const string& filename, const CharContainer& contents) {
+  FILE* fp = fopen(filename.c_str(), "wb");
+  if (!fp)
+    return false;
+  // Do not write anything if |contents| contains nothing.  fopen will create
+  // an empty file.
+  if (!contents.empty()) {
+    CHECK_EQ(fwrite(contents.data(),
+                    sizeof(typename CharContainer::value_type),
+                    contents.size(),
+                    fp),
+             contents.size());
+  }
+  fclose(fp);
+  return true;
+}
+
+uint64_t Md5Prefix(const string& input);
+uint64_t Md5Prefix(const std::vector<char>& input);
+
+// Returns a string that represents |array| in hexadecimal.
+string HexToString(const u8* array, size_t length);
+
+// Converts |str| to a hexadecimal number, stored in |array|.  Returns true on
+// success.  Only stores up to |length| bytes - if there are more characters in
+// the string, they are ignored (but the function may still return true).
+bool StringToHex(const string& str, u8* array, size_t length);
+
+// Adjust |size| to blocks of |align_size|.  i.e. returns the smallest multiple
+// of |align_size| that can fit |size|.
+uint64_t AlignSize(uint64_t size, uint32_t align_size);
+
+// Given a general perf sample format |sample_type|, return the fields of that
+// format that are present in a sample for an event of type |event_type|.
+//
+// e.g. FORK and EXIT events have the fields {time, pid/tid, cpu, id}.
+// Given a sample type with fields {ip, time, pid/tid, and period}, return
+// the intersection of these two field sets: {time, pid/tid}.
+//
+// All field formats are bitfields, as defined by enum perf_event_sample_format
+// in kernel/perf_event.h.
+uint64_t GetSampleFieldsForEventType(uint32_t event_type, uint64_t sample_type);
+
+// Returns the offset in bytes within a perf event structure at which the raw
+// perf sample data is located.
+uint64_t GetPerfSampleDataOffset(const event_t& event);
+
+// Returns the size of the 8-byte-aligned memory for storing |string|.
+size_t GetUint64AlignedStringLength(const string& str);
+
+// Returns true iff the file exists.
+bool FileExists(const string& filename);
+
+// Reads the contents of a file into |data|.  Returns true on success, false if
+// it fails.
+bool ReadFileToData(const string& filename, std::vector<char>* data);
+
+// Writes contents of |data| to a file with name |filename|, overwriting any
+// existing file.  Returns true on success, false if it fails.
+bool WriteDataToFile(const std::vector<char>& data, const string& filename);
+
+// Executes |command| and stores stdout output in |output|.  Returns true on
+// success, false otherwise.
+bool RunCommandAndGetStdout(const string& command, std::vector<char>* output);
+
+// Trim leading and trailing whitespace from |str|.
+void TrimWhitespace(string* str);
+
+}  // namespace quipper
+
+#endif  // CHROMIUMOS_WIDE_PROFILING_UTILS_H_
diff --git a/perfprofd/quipper/quipper_string.h b/perfprofd/quipper/quipper_string.h
new file mode 100644
index 0000000..7b0ad1e
--- /dev/null
+++ b/perfprofd/quipper/quipper_string.h
@@ -0,0 +1,13 @@
+// Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef QUIPPER_STRING_
+#define QUIPPER_STRING_
+
+#ifndef HAS_GLOBAL_STRING
+using std::string;
+using std::stringstream;
+#endif
+
+#endif  // QUIPPER_STRING_
diff --git a/perfprofd/quipper/quipper_test.h b/perfprofd/quipper/quipper_test.h
new file mode 100644
index 0000000..85e8aea
--- /dev/null
+++ b/perfprofd/quipper/quipper_test.h
@@ -0,0 +1,10 @@
+// Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef QUIPPER_TEST_H_
+#define QUIPPER_TEST_H_
+
+#include <gtest/gtest.h>
+
+#endif  // QUIPPER_TEST_H_
diff --git a/perfprofd/tests/Android.mk b/perfprofd/tests/Android.mk
new file mode 100644
index 0000000..45c2779
--- /dev/null
+++ b/perfprofd/tests/Android.mk
@@ -0,0 +1,58 @@
+# Build the unit tests.
+LOCAL_PATH := $(call my-dir)
+
+perfprofd_test_cppflags := -Wall -Wno-sign-compare -Wno-unused-parameter -Werror -std=gnu++11
+
+#
+# Static library with mockup utilities layer (called by unit test).
+#
+include $(CLEAR_VARS)
+LOCAL_CLANG := true
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_CXX_STL := libc++
+LOCAL_C_INCLUDES += system/extras/perfprofd
+LOCAL_MODULE := libperfprofdmockutils
+LOCAL_CPPFLAGS += $(perfprofd_test_cppflags)
+LOCAL_SRC_FILES := perfprofdmockutils.cc
+include $(BUILD_STATIC_LIBRARY)
+
+#
+# Canned perf.data file needed by unit test.
+#
+include $(CLEAR_VARS)
+LOCAL_MODULE := canned.perf.data
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := DATA
+LOCAL_MODULE_PATH := $(TARGET_OUT_DATA)/nativetest/perfprofd_test
+LOCAL_SRC_FILES := canned.perf.data
+include $(BUILD_PREBUILT)
+
+#
+# Second canned perf.data file needed by unit test.
+#
+include $(CLEAR_VARS)
+LOCAL_MODULE := callchain.canned.perf.data
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := DATA
+LOCAL_MODULE_PATH := $(TARGET_OUT_DATA)/nativetest/perfprofd_test
+LOCAL_SRC_FILES := callchain.canned.perf.data
+include $(BUILD_PREBUILT)
+
+#
+# Unit test for perfprofd
+#
+include $(CLEAR_VARS)
+LOCAL_CLANG := true
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_CXX_STL := libc++
+LOCAL_STATIC_LIBRARIES := libperfprofdcore libperfprofdmockutils libgtest libbase
+LOCAL_SHARED_LIBRARIES := libprotobuf-cpp-lite
+LOCAL_C_INCLUDES += system/extras/perfprofd external/protobuf/src
+LOCAL_SRC_FILES := perfprofd_test.cc
+LOCAL_CPPFLAGS += $(perfprofd_test_cppflags)
+LOCAL_SHARED_LIBRARIES += libcutils
+LOCAL_MODULE := perfprofd_test
+include $(BUILD_NATIVE_TEST)
+
+# Clean temp vars
+perfprofd_test_cppflags :=
diff --git a/perfprofd/tests/README.txt b/perfprofd/tests/README.txt
new file mode 100644
index 0000000..949e73f
--- /dev/null
+++ b/perfprofd/tests/README.txt
@@ -0,0 +1,64 @@
+Native tests for 'perfprofd'. Please run with
+
+   runtest --path=system/extras/perfprofd/tests
+
+(where runtest == $ANDROID_BUILD_TOP"/development/testrunner/runtest.py).
+
+Notes:
+
+1. Several of the testpoints in this unit tests perform a live 'simpleperf'
+run on the device (if you are using a userdebug build, simpleperf should
+already be available in /system/xbin/simpleperf).
+
+2. Part of the test is a system-wide profile, meaning that you will
+need to run 'adb root' prior to test execution.
+
+3. The daemon under test, perfprofd, is broken into a main function, a
+"core" library, and a "utils library. Picture:
+
+	+-----------+   perfprofdmain.o
+	| perfprofd |
+	| main()    |   1-liner; calls perfprofd_main()
+	+-----------+
+	   |
+	   v
+	+-----------+   perfprofdcore.a
+	| perfprofd |
+	| core      |   most of the interesting code is here;
+	|           |   calls into utils library when for
+	+-----------+   operations such as sleep, log, etc
+	   |
+	   v
+	+-----------+   perfprofdutils.a
+	| perfprofd |
+	| utils     |   real implementations of perfprofd_sleep,
+	|           |   perfprofd_log_* etc
+	+-----------+
+
+Because the daemon tends to spend a lot of time sleeping/waiting,
+it is impractical to try to test it directly. Instead we insert a
+mock utilities layer and then have a test driver that invokes the
+daemon main function. Picture for perfprofd_test:
+
+	+----------------+   perfprofd_test.cc
+	| perfprofd_test |
+	|                |   makes calls into perfprofd_main(),
+	+----------------+   then verifies behavior
+	   |
+	   v
+	+-----------+   perfprofdcore.a
+	| perfprofd |
+	| core      |   same as above
+	+-----------+
+	   |
+	   v
+	+-----------+   perfprofdmockutils.a
+	| perfprofd |
+	| mockutils |   mock implementations of perfprofd_sleep,
+	|           |   perfprofd_log_* etc
+	+-----------+
+
+The mockup versions of perfprofd_sleep() and  perfprofd_log_* do
+simply log the fact that they are called; the test driver can
+then examine the log to make sure that the daemon is doing
+what it is supposed to be doing.
diff --git a/perfprofd/tests/callchain.canned.perf.data b/perfprofd/tests/callchain.canned.perf.data
new file mode 100644
index 0000000..8d84393
--- /dev/null
+++ b/perfprofd/tests/callchain.canned.perf.data
Binary files differ
diff --git a/perfprofd/tests/canned.perf.data b/perfprofd/tests/canned.perf.data
new file mode 100644
index 0000000..e6510d2
--- /dev/null
+++ b/perfprofd/tests/canned.perf.data
Binary files differ
diff --git a/perfprofd/tests/perfprofd_test.cc b/perfprofd/tests/perfprofd_test.cc
new file mode 100644
index 0000000..3114190
--- /dev/null
+++ b/perfprofd/tests/perfprofd_test.cc
@@ -0,0 +1,833 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+#include <algorithm>
+#include <cctype>
+#include <string>
+#include <regex>
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#include <android-base/stringprintf.h>
+#include <cutils/properties.h>
+
+#include "perfprofdcore.h"
+#include "configreader.h"
+#include "perfprofdutils.h"
+#include "perfprofdmockutils.h"
+
+#include "perf_profile.pb.h"
+#include "google/protobuf/text_format.h"
+
+//
+// Set to argv[0] on startup
+//
+static const char *executable_path;
+
+//
+// test_dir is the directory containing the test executable and
+// any files associated with the test (will be created by the harness).
+//
+// dest_dir is a subdirectory of test_dir that we'll create on the fly
+// at the start of each testpoint (into which new files can be written),
+// then delete at end of testpoint.
+//
+static std::string test_dir;
+static std::string dest_dir;
+
+// Path to perf executable on device
+#define PERFPATH "/system/bin/perf"
+
+// Temporary config file that we will emit for the daemon to read
+#define CONFIGFILE "perfprofd.conf"
+
+static std::string encoded_file_path(int seq)
+{
+  return android::base::StringPrintf("%s/perf.data.encoded.%d",
+                                     dest_dir.c_str(), seq);
+}
+
+class PerfProfdTest : public testing::Test {
+ protected:
+  virtual void SetUp() {
+    mock_perfprofdutils_init();
+    create_dest_dir();
+    yesclean();
+  }
+
+  virtual void TearDown() {
+    mock_perfprofdutils_finish();
+  }
+
+  void noclean() {
+    clean_ = false;
+  }
+  void yesclean() {
+    clean_ = true;
+  }
+
+ private:
+  bool clean_;
+
+  void create_dest_dir() {
+    setup_dirs();
+    ASSERT_FALSE(dest_dir == "");
+    if (clean_) {
+      std::string cmd("rm -rf ");
+      cmd += dest_dir;
+      system(cmd.c_str());
+    }
+    std::string cmd("mkdir -p ");
+    cmd += dest_dir;
+    system(cmd.c_str());
+  }
+
+  void setup_dirs()
+  {
+    if (test_dir == "") {
+      ASSERT_TRUE(executable_path != nullptr);
+      std::string s(executable_path);
+      auto found = s.find_last_of('/');
+      test_dir = s.substr(0,found);
+      dest_dir = test_dir;
+      dest_dir += "/tmp";
+    }
+  }
+
+};
+
+static bool bothWhiteSpace(char lhs, char rhs)
+{
+  return (std::isspace(lhs) && std::isspace(rhs));
+}
+
+//
+// Squeeze out repeated whitespace from expected/actual logs.
+//
+static std::string squeezeWhite(const std::string &str,
+                                const char *tag,
+                                bool dump=false)
+{
+  if (dump) { fprintf(stderr, "raw %s is %s\n", tag, str.c_str()); }
+  std::string result(str);
+  std::replace( result.begin(), result.end(), '\n', ' ');
+  auto new_end = std::unique(result.begin(), result.end(), bothWhiteSpace);
+  result.erase(new_end, result.end());
+  while (result.begin() != result.end() && std::isspace(*result.rbegin())) {
+    result.pop_back();
+  }
+  if (dump) { fprintf(stderr, "squeezed %s is %s\n", tag, result.c_str()); }
+  return result;
+}
+
+///
+/// Helper class to kick off a run of the perfprofd daemon with a specific
+/// config file.
+///
+class PerfProfdRunner {
+ public:
+  PerfProfdRunner()
+      : config_path_(test_dir)
+  {
+    config_path_ += "/" CONFIGFILE;
+  }
+
+  ~PerfProfdRunner()
+  {
+    remove_processed_file();
+  }
+
+  void addToConfig(const std::string &line)
+  {
+    config_text_ += line;
+    config_text_ += "\n";
+  }
+
+  void remove_semaphore_file()
+  {
+    std::string semaphore(test_dir);
+    semaphore += "/" SEMAPHORE_FILENAME;
+    unlink(semaphore.c_str());
+  }
+
+  void create_semaphore_file()
+  {
+    std::string semaphore(test_dir);
+    semaphore += "/" SEMAPHORE_FILENAME;
+    close(open(semaphore.c_str(), O_WRONLY|O_CREAT));
+  }
+
+  void write_processed_file(int start_seq, int end_seq)
+  {
+    std::string processed = test_dir + "/" PROCESSED_FILENAME;
+    FILE *fp = fopen(processed.c_str(), "w");
+    for (int i = start_seq; i < end_seq; i++) {
+      fprintf(fp, "%d\n", i);
+    }
+    fclose(fp);
+  }
+
+  void remove_processed_file()
+  {
+    std::string processed = test_dir + "/" PROCESSED_FILENAME;
+    unlink(processed.c_str());
+  }
+
+  int invoke()
+  {
+    static const char *argv[3] = { "perfprofd", "-c", "" };
+    argv[2] = config_path_.c_str();
+
+    writeConfigFile(config_path_, config_text_);
+
+    // execute daemon main
+    return perfprofd_main(3, (char **) argv);
+  }
+
+ private:
+  std::string config_path_;
+  std::string config_text_;
+
+  void writeConfigFile(const std::string &config_path,
+                       const std::string &config_text)
+  {
+    FILE *fp = fopen(config_path.c_str(), "w");
+    ASSERT_TRUE(fp != nullptr);
+    fprintf(fp, "%s\n", config_text.c_str());
+    fclose(fp);
+  }
+};
+
+//......................................................................
+
+static void readEncodedProfile(const char *testpoint,
+                               wireless_android_play_playlog::AndroidPerfProfile &encodedProfile)
+{
+  struct stat statb;
+  int perf_data_stat_result = stat(encoded_file_path(0).c_str(), &statb);
+  ASSERT_NE(-1, perf_data_stat_result);
+
+  // read
+  std::string encoded;
+  encoded.resize(statb.st_size);
+  FILE *ifp = fopen(encoded_file_path(0).c_str(), "r");
+  ASSERT_NE(nullptr, ifp);
+  size_t items_read = fread((void*) encoded.data(), statb.st_size, 1, ifp);
+  ASSERT_EQ(1, items_read);
+  fclose(ifp);
+
+  // decode
+  encodedProfile.ParseFromString(encoded);
+}
+
+static std::string encodedLoadModuleToString(const wireless_android_play_playlog::LoadModule &lm)
+{
+  std::stringstream ss;
+  ss << "name: \"" << lm.name() << "\"\n";
+  if (lm.build_id() != "") {
+    ss << "build_id: \"" << lm.build_id() << "\"\n";
+  }
+  return ss.str();
+}
+
+static std::string encodedModuleSamplesToString(const wireless_android_play_playlog::LoadModuleSamples &mod)
+{
+  std::stringstream ss;
+
+  ss << "load_module_id: " << mod.load_module_id() << "\n";
+  for (size_t k = 0; k < mod.address_samples_size(); k++) {
+    const auto &sample = mod.address_samples(k);
+    ss << "  address_samples {\n";
+    for (size_t l = 0; l < mod.address_samples(k).address_size();
+         l++) {
+      auto address = mod.address_samples(k).address(l);
+      ss << "    address: " << address << "\n";
+    }
+    ss << "    count: " << sample.count() << "\n";
+    ss << "  }\n";
+  }
+  return ss.str();
+}
+
+#define RAW_RESULT(x) #x
+
+//
+// Check to see if the log messages emitted by the daemon
+// match the expected result. By default we use a partial
+// match, e.g. if we see the expected excerpt anywhere in the
+// result, it's a match (for exact match, set exact to true)
+//
+static void compareLogMessages(const std::string &actual,
+                               const std::string &expected,
+                               const char *testpoint,
+                               bool exactMatch=false)
+{
+   std::string sqexp = squeezeWhite(expected, "expected");
+   std::string sqact = squeezeWhite(actual, "actual");
+   if (exactMatch) {
+     EXPECT_STREQ(sqexp.c_str(), sqact.c_str());
+   } else {
+     std::size_t foundpos = sqact.find(sqexp);
+     bool wasFound = true;
+     if (foundpos == std::string::npos) {
+       std::cerr << testpoint << ": expected result not found\n";
+       std::cerr << " Actual: \"" << sqact << "\"\n";
+       std::cerr << " Expected: \"" << sqexp << "\"\n";
+       wasFound = false;
+     }
+     EXPECT_TRUE(wasFound);
+   }
+}
+
+TEST_F(PerfProfdTest, MissingGMS)
+{
+  //
+  // AWP requires cooperation between the daemon and the GMS core
+  // piece. If we're running on a device that has an old or damaged
+  // version of GMS core, then the config directory we're interested in
+  // may not be there. This test insures that the daemon does the
+  // right thing in this case.
+  //
+  PerfProfdRunner runner;
+  runner.addToConfig("only_debug_build=0");
+  runner.addToConfig("trace_config_read=0");
+  runner.addToConfig("config_directory=/does/not/exist");
+  runner.addToConfig("main_loop_iterations=1");
+  runner.addToConfig("use_fixed_seed=1");
+  runner.addToConfig("collection_interval=100");
+
+  // Kick off daemon
+  int daemon_main_return_code = runner.invoke();
+
+  // Check return code from daemon
+  EXPECT_EQ(0, daemon_main_return_code);
+
+  // Verify log contents
+  const std::string expected = RAW_RESULT(
+      I: sleep 90 seconds
+      W: unable to open config directory /does/not/exist: (No such file or directory)
+      I: profile collection skipped (missing config directory)
+                                          );
+
+  // check to make sure entire log matches
+  compareLogMessages(mock_perfprofdutils_getlogged(),
+                     expected, "MissingGMS");
+}
+
+
+TEST_F(PerfProfdTest, MissingOptInSemaphoreFile)
+{
+  //
+  // Android device owners must opt in to "collect and report usage
+  // data" in order for us to be able to collect profiles. The opt-in
+  // check is performed in the GMS core component; if the check
+  // passes, then it creates a semaphore file for the daemon to pick
+  // up on.
+  //
+  PerfProfdRunner runner;
+  runner.addToConfig("only_debug_build=0");
+  std::string cfparam("config_directory="); cfparam += test_dir;
+  runner.addToConfig(cfparam);
+  std::string ddparam("destination_directory="); ddparam += dest_dir;
+  runner.addToConfig(ddparam);
+  runner.addToConfig("main_loop_iterations=1");
+  runner.addToConfig("use_fixed_seed=1");
+  runner.addToConfig("collection_interval=100");
+
+  runner.remove_semaphore_file();
+
+  // Kick off daemon
+  int daemon_main_return_code = runner.invoke();
+
+  // Check return code from daemon
+  EXPECT_EQ(0, daemon_main_return_code);
+
+  // Verify log contents
+  const std::string expected = RAW_RESULT(
+      I: profile collection skipped (missing semaphore file)
+                                          );
+  // check to make sure log excerpt matches
+  compareLogMessages(mock_perfprofdutils_getlogged(),
+                     expected, "MissingOptInSemaphoreFile");
+}
+
+TEST_F(PerfProfdTest, MissingPerfExecutable)
+{
+  //
+  // Perfprofd uses the 'simpleperf' tool to collect profiles
+  // (although this may conceivably change in the future). This test
+  // checks to make sure that if 'simpleperf' is not present we bail out
+  // from collecting profiles.
+  //
+  PerfProfdRunner runner;
+  runner.addToConfig("only_debug_build=0");
+  runner.addToConfig("trace_config_read=1");
+  std::string cfparam("config_directory="); cfparam += test_dir;
+  runner.addToConfig(cfparam);
+  std::string ddparam("destination_directory="); ddparam += dest_dir;
+  runner.addToConfig(ddparam);
+  runner.addToConfig("main_loop_iterations=1");
+  runner.addToConfig("use_fixed_seed=1");
+  runner.addToConfig("collection_interval=100");
+  runner.addToConfig("perf_path=/does/not/exist");
+
+  // Create semaphore file
+  runner.create_semaphore_file();
+
+  // Kick off daemon
+  int daemon_main_return_code = runner.invoke();
+
+  // Check return code from daemon
+  EXPECT_EQ(0, daemon_main_return_code);
+
+  // expected log contents
+  const std::string expected = RAW_RESULT(
+      I: profile collection skipped (missing 'perf' executable)
+                                          );
+  // check to make sure log excerpt matches
+  compareLogMessages(mock_perfprofdutils_getlogged(),
+                     expected, "MissingPerfExecutable");
+}
+
+TEST_F(PerfProfdTest, BadPerfRun)
+{
+  //
+  // Perf tools tend to be tightly coupled with a specific kernel
+  // version -- if things are out of sync perf could fail or
+  // crash. This test makes sure that we detect such a case and log
+  // the error.
+  //
+  PerfProfdRunner runner;
+  runner.addToConfig("only_debug_build=0");
+  std::string cfparam("config_directory="); cfparam += test_dir;
+  runner.addToConfig(cfparam);
+  std::string ddparam("destination_directory="); ddparam += dest_dir;
+  runner.addToConfig(ddparam);
+  runner.addToConfig("main_loop_iterations=1");
+  runner.addToConfig("use_fixed_seed=1");
+  runner.addToConfig("collection_interval=100");
+  runner.addToConfig("perf_path=/system/bin/false");
+
+  // Create semaphore file
+  runner.create_semaphore_file();
+
+  // Kick off daemon
+  int daemon_main_return_code = runner.invoke();
+
+  // Check return code from daemon
+  EXPECT_EQ(0, daemon_main_return_code);
+
+  // Verify log contents
+  const std::string expected = RAW_RESULT(
+      I: profile collection failed (perf record returned bad exit status)
+                                          );
+
+  // check to make sure log excerpt matches
+  compareLogMessages(mock_perfprofdutils_getlogged(),
+                     expected, "BadPerfRun");
+}
+
+TEST_F(PerfProfdTest, ConfigFileParsing)
+{
+  //
+  // Gracefully handly malformed items in the config file
+  //
+  PerfProfdRunner runner;
+  runner.addToConfig("only_debug_build=0");
+  runner.addToConfig("main_loop_iterations=1");
+  runner.addToConfig("collection_interval=100");
+  runner.addToConfig("use_fixed_seed=1");
+  runner.addToConfig("destination_directory=/does/not/exist");
+
+  // assorted bad syntax
+  runner.addToConfig("collection_interval=0");
+  runner.addToConfig("collection_interval=-1");
+  runner.addToConfig("collection_interval=2");
+  runner.addToConfig("nonexistent_key=something");
+  runner.addToConfig("no_equals_stmt");
+
+  // Kick off daemon
+  int daemon_main_return_code = runner.invoke();
+
+  // Check return code from daemon
+  EXPECT_EQ(0, daemon_main_return_code);
+
+  // Verify log contents
+  const std::string expected = RAW_RESULT(
+      W: line 6: specified value 0 for 'collection_interval' outside permitted range [100 4294967295] (ignored)
+      W: line 7: malformed unsigned value (ignored)
+      W: line 8: specified value 2 for 'collection_interval' outside permitted range [100 4294967295] (ignored)
+      W: line 9: unknown option 'nonexistent_key' ignored
+      W: line 10: line malformed (no '=' found)
+                                          );
+
+  // check to make sure log excerpt matches
+  compareLogMessages(mock_perfprofdutils_getlogged(),
+                     expected, "ConfigFileParsing");
+}
+
+TEST_F(PerfProfdTest, ProfileCollectionAnnotations)
+{
+  unsigned util1 = collect_cpu_utilization();
+  EXPECT_LE(util1, 100);
+  EXPECT_GE(util1, 0);
+
+  // NB: expectation is that when we run this test, the device will be
+  // completed booted, will be on charger, and will not have the camera
+  // active.
+  EXPECT_FALSE(get_booting());
+  EXPECT_TRUE(get_charging());
+  EXPECT_FALSE(get_camera_active());
+}
+
+TEST_F(PerfProfdTest, BasicRunWithCannedPerf)
+{
+  //
+  // Verify the portion of the daemon that reads and encodes
+  // perf.data files. Here we run the encoder on a canned perf.data
+  // file and verify that the resulting protobuf contains what
+  // we think it should contain.
+  //
+  std::string input_perf_data(test_dir);
+  input_perf_data += "/canned.perf.data";
+
+  // Set up config to avoid these annotations (they are tested elsewhere)
+  ConfigReader config;
+  config.overrideUnsignedEntry("collect_cpu_utilization", 0);
+  config.overrideUnsignedEntry("collect_charging_state", 0);
+  config.overrideUnsignedEntry("collect_camera_active", 0);
+
+  // Kick off encoder and check return code
+  PROFILE_RESULT result =
+      encode_to_proto(input_perf_data, encoded_file_path(0).c_str(), config, 0);
+  EXPECT_EQ(OK_PROFILE_COLLECTION, result);
+
+  // Read and decode the resulting perf.data.encoded file
+  wireless_android_play_playlog::AndroidPerfProfile encodedProfile;
+  readEncodedProfile("BasicRunWithCannedPerf",
+                     encodedProfile);
+
+  // Expect 29 load modules
+  EXPECT_EQ(29, encodedProfile.programs_size());
+
+  // Check a couple of load modules
+  { const auto &lm0 = encodedProfile.load_modules(0);
+    std::string act_lm0 = encodedLoadModuleToString(lm0);
+    std::string sqact0 = squeezeWhite(act_lm0, "actual for lm 0");
+    const std::string expected_lm0 = RAW_RESULT(
+        name: "/data/app/com.google.android.apps.plus-1/lib/arm/libcronet.so"
+                                                );
+    std::string sqexp0 = squeezeWhite(expected_lm0, "expected_lm0");
+    EXPECT_STREQ(sqexp0.c_str(), sqact0.c_str());
+  }
+  { const auto &lm9 = encodedProfile.load_modules(9);
+    std::string act_lm9 = encodedLoadModuleToString(lm9);
+    std::string sqact9 = squeezeWhite(act_lm9, "actual for lm 9");
+    const std::string expected_lm9 = RAW_RESULT(
+        name: "/system/lib/libandroid_runtime.so" build_id: "8164ed7b3a8b8f5a220d027788922510"
+                                                );
+    std::string sqexp9 = squeezeWhite(expected_lm9, "expected_lm9");
+    EXPECT_STREQ(sqexp9.c_str(), sqact9.c_str());
+  }
+
+  // Examine some of the samples now
+  { const auto &p1 = encodedProfile.programs(0);
+    const auto &lm1 = p1.modules(0);
+    std::string act_lm1 = encodedModuleSamplesToString(lm1);
+    std::string sqact1 = squeezeWhite(act_lm1, "actual for lm1");
+    const std::string expected_lm1 = RAW_RESULT(
+        load_module_id: 9 address_samples { address: 296100 count: 1 }
+                                                );
+    std::string sqexp1 = squeezeWhite(expected_lm1, "expected_lm1");
+    EXPECT_STREQ(sqexp1.c_str(), sqact1.c_str());
+  }
+  { const auto &p1 = encodedProfile.programs(2);
+    const auto &lm2 = p1.modules(0);
+    std::string act_lm2 = encodedModuleSamplesToString(lm2);
+    std::string sqact2 = squeezeWhite(act_lm2, "actual for lm2");
+    const std::string expected_lm2 = RAW_RESULT(
+        load_module_id: 2
+        address_samples { address: 28030244 count: 1 }
+        address_samples { address: 29657840 count: 1 }
+                                                );
+    std::string sqexp2 = squeezeWhite(expected_lm2, "expected_lm2");
+    EXPECT_STREQ(sqexp2.c_str(), sqact2.c_str());
+  }
+}
+
+TEST_F(PerfProfdTest, CallchainRunWithCannedPerf)
+{
+  // This test makes sure that the perf.data converter
+  // can handle call chains.
+  //
+  std::string input_perf_data(test_dir);
+  input_perf_data += "/callchain.canned.perf.data";
+
+  // Set up config to avoid these annotations (they are tested elsewhere)
+  ConfigReader config;
+  config.overrideUnsignedEntry("collect_cpu_utilization", 0);
+  config.overrideUnsignedEntry("collect_charging_state", 0);
+  config.overrideUnsignedEntry("collect_camera_active", 0);
+
+  // Kick off encoder and check return code
+  PROFILE_RESULT result =
+      encode_to_proto(input_perf_data, encoded_file_path(0).c_str(), config, 0);
+  EXPECT_EQ(OK_PROFILE_COLLECTION, result);
+
+  // Read and decode the resulting perf.data.encoded file
+  wireless_android_play_playlog::AndroidPerfProfile encodedProfile;
+  readEncodedProfile("BasicRunWithCannedPerf",
+                     encodedProfile);
+
+
+  // Expect 4 programs 8 load modules
+  EXPECT_EQ(4, encodedProfile.programs_size());
+  EXPECT_EQ(8, encodedProfile.load_modules_size());
+
+  // Check a couple of load modules
+  { const auto &lm0 = encodedProfile.load_modules(0);
+    std::string act_lm0 = encodedLoadModuleToString(lm0);
+    std::string sqact0 = squeezeWhite(act_lm0, "actual for lm 0");
+    const std::string expected_lm0 = RAW_RESULT(
+        name: "/system/bin/dex2oat"
+        build_id: "ee12bd1a1de39422d848f249add0afc4"
+                                                );
+    std::string sqexp0 = squeezeWhite(expected_lm0, "expected_lm0");
+    EXPECT_STREQ(sqexp0.c_str(), sqact0.c_str());
+  }
+  { const auto &lm1 = encodedProfile.load_modules(1);
+    std::string act_lm1 = encodedLoadModuleToString(lm1);
+    std::string sqact1 = squeezeWhite(act_lm1, "actual for lm 1");
+    const std::string expected_lm1 = RAW_RESULT(
+        name: "/system/bin/linker"
+        build_id: "a36715f673a4a0aa76ef290124c516cc"
+                                                );
+    std::string sqexp1 = squeezeWhite(expected_lm1, "expected_lm1");
+    EXPECT_STREQ(sqexp1.c_str(), sqact1.c_str());
+  }
+
+  // Examine some of the samples now
+  { const auto &p0 = encodedProfile.programs(0);
+    const auto &lm1 = p0.modules(0);
+    std::string act_lm1 = encodedModuleSamplesToString(lm1);
+    std::string sqact1 = squeezeWhite(act_lm1, "actual for lm1");
+    const std::string expected_lm1 = RAW_RESULT(
+        load_module_id: 0
+        address_samples { address: 108552 count: 2 }
+                                                );
+    std::string sqexp1 = squeezeWhite(expected_lm1, "expected_lm1");
+    EXPECT_STREQ(sqexp1.c_str(), sqact1.c_str());
+  }
+  { const auto &p4 = encodedProfile.programs(3);
+    const auto &lm2 = p4.modules(1);
+    std::string act_lm2 = encodedModuleSamplesToString(lm2);
+    std::string sqact2 = squeezeWhite(act_lm2, "actual for lm2");
+    const std::string expected_lm2 = RAW_RESULT(
+        load_module_id: 2 address_samples { address: 403913 count: 1 } address_samples { address: 840761 count: 1 } address_samples { address: 846481 count: 1 } address_samples { address: 999053 count: 1 } address_samples { address: 1012959 count: 1 } address_samples { address: 1524309 count: 1 } address_samples { address: 1580779 count: 1 } address_samples { address: 4287986288 count: 1 }
+                                                );
+    std::string sqexp2 = squeezeWhite(expected_lm2, "expected_lm2");
+    EXPECT_STREQ(sqexp2.c_str(), sqact2.c_str());
+  }
+}
+
+TEST_F(PerfProfdTest, BasicRunWithLivePerf)
+{
+  //
+  // Basic test to exercise the main loop of the daemon. It includes
+  // a live 'perf' run
+  //
+  PerfProfdRunner runner;
+  runner.addToConfig("only_debug_build=0");
+  std::string ddparam("destination_directory="); ddparam += dest_dir;
+  runner.addToConfig(ddparam);
+  std::string cfparam("config_directory="); cfparam += test_dir;
+  runner.addToConfig(cfparam);
+  runner.addToConfig("main_loop_iterations=1");
+  runner.addToConfig("use_fixed_seed=12345678");
+  runner.addToConfig("max_unprocessed_profiles=100");
+  runner.addToConfig("collection_interval=9999");
+  runner.addToConfig("sample_duration=2");
+
+  // Create semaphore file
+  runner.create_semaphore_file();
+
+  // Kick off daemon
+  int daemon_main_return_code = runner.invoke();
+
+  // Check return code from daemon
+  EXPECT_EQ(0, daemon_main_return_code);
+
+  // Read and decode the resulting perf.data.encoded file
+  wireless_android_play_playlog::AndroidPerfProfile encodedProfile;
+  readEncodedProfile("BasicRunWithLivePerf", encodedProfile);
+
+  // Examine what we get back. Since it's a live profile, we can't
+  // really do much in terms of verifying the contents.
+  EXPECT_LT(0, encodedProfile.programs_size());
+
+  // Verify log contents
+  const std::string expected = RAW_RESULT(
+      I: starting Android Wide Profiling daemon
+      I: config file path set to /data/nativetest/perfprofd_test/perfprofd.conf
+      I: random seed set to 12345678
+      I: sleep 674 seconds
+      I: initiating profile collection
+      I: profile collection complete
+      I: sleep 9325 seconds
+      I: finishing Android Wide Profiling daemon
+                                          );
+  // check to make sure log excerpt matches
+  compareLogMessages(mock_perfprofdutils_getlogged(),
+                     expected, "BasicRunWithLivePerf", true);
+}
+
+TEST_F(PerfProfdTest, MultipleRunWithLivePerf)
+{
+  //
+  // Basic test to exercise the main loop of the daemon. It includes
+  // a live 'perf' run
+  //
+  PerfProfdRunner runner;
+  runner.addToConfig("only_debug_build=0");
+  std::string ddparam("destination_directory="); ddparam += dest_dir;
+  runner.addToConfig(ddparam);
+  std::string cfparam("config_directory="); cfparam += test_dir;
+  runner.addToConfig(cfparam);
+  runner.addToConfig("main_loop_iterations=3");
+  runner.addToConfig("use_fixed_seed=12345678");
+  runner.addToConfig("collection_interval=9999");
+  runner.addToConfig("sample_duration=2");
+  runner.write_processed_file(1, 2);
+
+  // Create semaphore file
+  runner.create_semaphore_file();
+
+  // Kick off daemon
+  int daemon_main_return_code = runner.invoke();
+
+  // Check return code from daemon
+  EXPECT_EQ(0, daemon_main_return_code);
+
+  // Read and decode the resulting perf.data.encoded file
+  wireless_android_play_playlog::AndroidPerfProfile encodedProfile;
+  readEncodedProfile("BasicRunWithLivePerf", encodedProfile);
+
+  // Examine what we get back. Since it's a live profile, we can't
+  // really do much in terms of verifying the contents.
+  EXPECT_LT(0, encodedProfile.programs_size());
+
+  // Examine that encoded.1 file is removed while encoded.{0|2} exists.
+  EXPECT_EQ(0, access(encoded_file_path(0).c_str(), F_OK));
+  EXPECT_NE(0, access(encoded_file_path(1).c_str(), F_OK));
+  EXPECT_EQ(0, access(encoded_file_path(2).c_str(), F_OK));
+
+  // Verify log contents
+  const std::string expected = RAW_RESULT(
+      I: starting Android Wide Profiling daemon
+      I: config file path set to /data/nativetest/perfprofd_test/perfprofd.conf
+      I: random seed set to 12345678
+      I: sleep 674 seconds
+      I: initiating profile collection
+      I: profile collection complete
+      I: sleep 9325 seconds
+      I: sleep 4974 seconds
+      I: initiating profile collection
+      I: profile collection complete
+      I: sleep 5025 seconds
+      I: sleep 501 seconds
+      I: initiating profile collection
+      I: profile collection complete
+      I: sleep 9498 seconds
+      I: finishing Android Wide Profiling daemon
+                                          );
+  // check to make sure log excerpt matches
+  compareLogMessages(mock_perfprofdutils_getlogged(),
+                     expected, "BasicRunWithLivePerf", true);
+}
+
+TEST_F(PerfProfdTest, CallChainRunWithLivePerf)
+{
+  //
+  // Callchain profiles are only supported on certain devices.
+  // For now this test is stubbed out except when run on "angler".
+  //
+  char propBuf[PROPERTY_VALUE_MAX];
+  propBuf[0] = '\0';
+  property_get("ro.hardware", propBuf, "");
+  if (strcmp(propBuf, "angler")) {
+    return;
+  }
+
+  //
+  // Collect a callchain profile, so as to exercise the code in
+  // perf_data post-processing that digests callchains.
+  //
+  PerfProfdRunner runner;
+  std::string ddparam("destination_directory="); ddparam += dest_dir;
+  runner.addToConfig(ddparam);
+  std::string cfparam("config_directory="); cfparam += test_dir;
+  runner.addToConfig(cfparam);
+  runner.addToConfig("main_loop_iterations=1");
+  runner.addToConfig("use_fixed_seed=12345678");
+  runner.addToConfig("max_unprocessed_profiles=100");
+  runner.addToConfig("collection_interval=9999");
+  runner.addToConfig("stack_profile=1");
+  runner.addToConfig("sample_duration=2");
+
+  // Create semaphore file
+  runner.create_semaphore_file();
+
+  // Kick off daemon
+  int daemon_main_return_code = runner.invoke();
+
+  // Check return code from daemon
+  EXPECT_EQ(0, daemon_main_return_code);
+
+  // Read and decode the resulting perf.data.encoded file
+  wireless_android_play_playlog::AndroidPerfProfile encodedProfile;
+  readEncodedProfile("CallChainRunWithLivePerf", encodedProfile);
+
+  // Examine what we get back. Since it's a live profile, we can't
+  // really do much in terms of verifying the contents.
+  EXPECT_LT(0, encodedProfile.programs_size());
+
+  // Verify log contents
+  const std::string expected = RAW_RESULT(
+      I: starting Android Wide Profiling daemon
+      I: config file path set to /data/nativetest/perfprofd_test/perfprofd.conf
+      I: random seed set to 12345678
+      I: sleep 674 seconds
+      I: initiating profile collection
+      I: profile collection complete
+      I: sleep 9325 seconds
+      I: finishing Android Wide Profiling daemon
+                                          );
+  // check to make sure log excerpt matches
+  compareLogMessages(mock_perfprofdutils_getlogged(),
+                     expected, "CallChainRunWithLivePerf", true);
+}
+
+int main(int argc, char **argv) {
+  executable_path = argv[0];
+  // switch to / before starting testing (perfprofd
+  // should be location-independent)
+  chdir("/");
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
diff --git a/perfprofd/tests/perfprofdmockutils.cc b/perfprofd/tests/perfprofdmockutils.cc
new file mode 100644
index 0000000..5af58c4
--- /dev/null
+++ b/perfprofd/tests/perfprofdmockutils.cc
@@ -0,0 +1,106 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#define LOG_TAG "perfprofd"
+
+#include <stdarg.h>
+#include <unistd.h>
+#include <vector>
+#include <string>
+#include <assert.h>
+
+#include <utils/Log.h>
+
+#include "perfprofdutils.h"
+
+static std::vector<std::string> *mock_log;
+
+static void append_to_log(const std::string &s)
+{
+  assert(mock_log);
+  mock_log->push_back(s);
+}
+
+void mock_perfprofdutils_init()
+{
+  assert(!mock_log);
+  mock_log = new std::vector<std::string>;
+}
+
+void mock_perfprofdutils_finish()
+{
+  assert(mock_log);
+  delete mock_log;
+}
+
+std::string mock_perfprofdutils_getlogged()
+{
+  std::string result;
+  assert(mock_log);
+  for (const std::string &s : (*mock_log)) {
+    result += s;
+  }
+  mock_log->clear();
+  return result;
+}
+
+extern "C" {
+
+#define LMAX 8192
+
+void perfprofd_mocklog(const char *tag, const char *fmt, va_list ap)
+{
+    char buffer[LMAX];
+    strcpy(buffer, tag);
+    vsnprintf(buffer+strlen(tag), LMAX, fmt, ap);
+    std::string b(buffer); b += "\012";
+    append_to_log(b);
+}
+
+void perfprofd_log_error(const char *fmt, ...)
+{
+    va_list ap;
+    va_start(ap, fmt);
+    vfprintf(stderr, fmt, ap); fprintf(stderr, "\n");
+    perfprofd_mocklog("E: ", fmt, ap);
+    va_end(ap);
+}
+
+void perfprofd_log_warning(const char *fmt, ...)
+{
+    va_list ap;
+    va_start(ap, fmt);
+    vfprintf(stderr, fmt, ap); fprintf(stderr, "\n");
+    perfprofd_mocklog("W: ", fmt, ap);
+    va_end(ap);
+}
+
+void perfprofd_log_info(const char *fmt, ...)
+{
+    va_list ap;
+    va_start(ap, fmt);
+    vfprintf(stderr, fmt, ap); fprintf(stderr, "\n");
+    perfprofd_mocklog("I: ", fmt, ap);
+    va_end(ap);
+}
+
+void perfprofd_sleep(int seconds)
+{
+    perfprofd_log_info("sleep %d seconds", seconds);
+}
+
+}
diff --git a/perfprofd/tests/perfprofdmockutils.h b/perfprofd/tests/perfprofdmockutils.h
new file mode 100644
index 0000000..12caabb
--- /dev/null
+++ b/perfprofd/tests/perfprofdmockutils.h
@@ -0,0 +1,31 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+///
+/// Set up mock utilities layer prior to unit test execution
+///
+extern void mock_perfprofdutils_init();
+
+///
+/// Set up mock utilities layer prior to unit test execution
+///
+extern void mock_perfprofdutils_finish();
+
+///
+/// Return string containing things logged to logd, plus sleep instances
+///
+extern std::string mock_perfprofdutils_getlogged();