Project import
diff --git a/faux-folly/Android.mk b/faux-folly/Android.mk
new file mode 100644
index 0000000..ab119b0
--- /dev/null
+++ b/faux-folly/Android.mk
@@ -0,0 +1,3 @@
+LOCAL_PATH := $(my-dir)
+
+include $(LOCAL_PATH)/folly/Android.mk
diff --git a/faux-folly/LICENSE b/faux-folly/LICENSE
new file mode 100644
index 0000000..f433b1a
--- /dev/null
+++ b/faux-folly/LICENSE
@@ -0,0 +1,177 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
diff --git a/faux-folly/MODULE_LICENSE_APACHE b/faux-folly/MODULE_LICENSE_APACHE
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/faux-folly/MODULE_LICENSE_APACHE
diff --git a/faux-folly/README.faux-folly.md b/faux-folly/README.faux-folly.md
new file mode 100644
index 0000000..05e6d6c
--- /dev/null
+++ b/faux-folly/README.faux-folly.md
@@ -0,0 +1,207 @@
+The Faux Folly Manifest: Intent and Implementation
+--------------------------------------------------
+
+Folly is an open-source C++ library developed and used at Facebook.
+
+Faux-Folly is a fork of Folly that removes everything that is not
+required by the Futures library in Folly. It was created by the Nest
+Camera Software team for use in Camera products.
+
+Why?
+----
+
+Folly includes a state-of-the-art C++ Futures/Promises library that is
+thread-friendly and does not impose the use of coroutines. This opens
+up better, less-error-prone programming styles when doing asynchronous
+programming. I won't go into them in this doc, but instead refer you
+to the Nest presentation "TDD-lite, Futures, and Promises:"
+
+https://docs.google.com/a/nestlabs.com/presentation/d/1rKi_LhuY26zgzLRREwdkrDmp_qyTdkUfBO2IZvorK_A/edit?usp=sharing
+
+When porting Folly to Brillo/Android, we ran in to numerous problems:
+
+    1. The only thing we really want (or need) from Folly is the
+       futures library. Folly is a very broad, boost-like library full
+       of Good Ideas and Bad Ideas and Different Ideas. Developers
+       will constantly try to use the more dubious constructs in the
+       library (e.g. their spin-lock implementation)... and we
+       discourage that.
+
+    2. Brillo/Android uses BoringSSL instead of OpenSSL, and we found
+       it too hard to port the code. BoringSSL is a Google fork of
+       OpenSSL, and is neither ABI nor API compatible with
+       OpenSSL. However, it uses the <openssl/*> header namespace and
+       outputs libraries libssl.so and libcrypt.so. In other words,
+       they intentionally make it hard for anyone to have BoringSSL
+       and OpenSSL co-existing on the system. Since we don't have no
+       plans to use the SSL-parts of the library, it makes more sense
+       to remove it.
+
+    3. Lots of dependencies. The original dependency list of Folly is
+       a little long, and most of those things have not been ported to
+       Android/Brillo, and most of them are not related to the Futures
+       library. Why ship them on device??
+
+    4. We keep running into problems with Folly because we're
+       violating the assumption that this is an x86_64 server
+       application. In one case, the CacheLocality class unloaded
+       linux-vdso.so.1 because it couldn't find an x86-specific
+       symbol. This breaks things like clock_gettime. Since our team
+       will need to support this library with little or no interaction
+       with Facebook, it needs a smaller surface area to support.
+
+And because of these, we chose to fork the library and trim away
+everything that wasn't part of the futures library.  All said and
+done, the fork removes over 115,000 LOC.
+
+So... why not use boost::future<> or std::future<>?
+
+std::future<>: as of C++14, there is still no support for asynchronous
+programming styles/workflows. Namely: they do not support
+continuations. See
+https://channel9.msdn.com/Shows/Going+Deep/C-and-Beyond-2012-Herb-Sutter-Concurrency-and-Parallelism .
+Folly Futures supports this.
+
+boost::future<>: The latest boost thread library does indeed include
+support for asynchronous programming styles (continuations), but it
+the destructor of boost::future<> will call wait() if the future is
+not realized. Put another way: you can not orphan (forget) a
+future. I.e. you can't do this:
+
+```
+void Foo::bar(SomeObject& o)
+{
+    boost::future<int> f = o.get_value();
+    f.then([=](int val) {
+        scoped_lock lock(this->m_mutex);
+        this->m_val = val;
+    });
+}
+```
+
+The continuation (the .then() lambda) has everything. There's no need
+to keep the future, we just need to make sure that the continuation
+fires.
+
+So, with boost this means that you must create some kind of
+garbage-collection mechanism that will hold all of the unrealized
+futures and periodically clean them up as futures are realized. That's
+a lot of bookkeeping, and the purpose of using futures<> is to
+/reduce/ explicit bookkeeping. Folly Futures /does/ support this
+style, though it has a few gaps for doing it "right."
+
+What are the gaps in Folly Futures for orphaning the future?
+
+    1. There is no concept of a compulsary cancellation.  Once I set
+       the continuation... you must ensure that all references stay
+       valid until the continuation fires.  Again... more
+       bookkeeping. However, it's fairly simple to add a cancellation
+       concept to the library.
+
+    2. If the future is set to an exception state (i.e. an error), you
+       must either provide an .onError() callback every time you
+       create a .then() callback, or you will not get a notification
+       of the error. This one is trickier to solve without breaking
+       "normal" (non-orphaning) futures use-cases, but we have found a
+       solution where you can designate an executor on which the
+       exception should fire.
+
+So... why not write our own Futures library? Doesn't Folly do a lot of
+strange things with templates and move semantics that are hard to
+understand?
+
+As we worked with several libraries that implement futures and
+promises, we found that writing a high-quality, high-reliability
+futures library requires a lot of effort and thought to design it well
+and implement it well. All of the implementations do strange tricks
+with templates and move semantics. All of the implementations, too,
+have solution to corner-cases like promise<void> and shared
+futures. They also include libraries for aggregating futures
+(e.g. functions that take a list of futures and returns a new future
+that is realized when any or all of the futures in the list have a
+value or error). Some of the code we've found that is hard to
+understand was added because there was a valid use-case that wasn't
+working or because there was a deadlock or race condition.
+
+And if we were to write our own, it would end up having almost the
+same API and implementation as Folly... because it is designed well.
+
+How?
+----
+
+The basic process was something along the lines of:
+
+   1. Delete something not in folly/futures/ directory.
+   2. Compile.
+   3. If compile succeeds: run unit tests.
+   4. If unit tests pass: commit!
+   5. If compile or tests fail: skip. (don't delete it)
+
+Once that was done, there were a few libraries that we found to be
+tangled up:
+
+   experimental/fibers: a coroutines library
+   experimental/io: a whole bunch of socket programming stuff
+
+So, we made two (possibly dubious) replacements so that we could break
+this dependency:
+
+    1. Replaced folly::fibers::Baton<> with folly::Baton<>. This isn't
+       a very clean replacement.
+       
+    2. Remove folly::RequestContext from Futures. This is some kind of
+       system to attach void*-like stuff to future/promises. It came
+       out cleanly.
+
+    3. The implementation of Futures was using folly/io/async/HWWheelTimer.
+       In order to remove the async library we needed a replacement. So I
+       wrote a class to give equivalent functionality (but not using the
+       wheel timer algorithm).
+
+New components also come with new unit tests.
+
+Pulling Changes from Upstream
+-----------------------------
+
+It is possible to pull changes from upstream. We forked folly at
+version 0.52.0. Up until 0.57.0 merges are fairly straightforward and
+work like this:
+
+    0. Start by compiling for your host machine... not Android.
+    
+    1. Do a git merge on the upstream tag. You will get conflicts.
+    
+    2. If it is a file we have deleted, keep it deleted. The `git
+       status' will say something like "deleted in ours."
+
+    3. If it is a new file in a library we have deleted
+       (experimental/* wangle/*, gen/*, io/async/*) then delete it.
+
+    4. If it is a Makefile, you will need to figure out what is
+       added on "our" branch vs. what is added on "their" branch.
+       Usually this is as simple as ignoring the upstream change
+       if they simply added files that we delete.
+
+    5. For any other kind of conflict, you'll need to put your
+       engineer's hat on and work it out.
+
+    6. Build the code. Ideally, there should be no warnings.
+
+    7. Run all the unit tests. They should all pass.
+
+    8. As much as possible do only the minimum needed in order to
+       get the code to compile and tests to pass. If you run into
+       a large problem... it is better to break up the changes.
+       Get the merge commit just so that it builds, and then resolve
+       issues on subsequent commits on a mini-branch.
+
+In version 0.57.0, we run in to trouble because of our replacement of
+Baton<> in Futures doesn't work as expected for some new test cases.
+
+What's Next?
+------------
+
+We might rename the .so-file so that this library can co-exist with
+folly on the same system.
+
+There's probably no need to rename the folly:: namespace.
diff --git a/faux-folly/README.md b/faux-folly/README.md
new file mode 100644
index 0000000..244fc41
--- /dev/null
+++ b/faux-folly/README.md
@@ -0,0 +1,153 @@
+Folly: Facebook Open-source Library
+-----------------------------------
+
+Folly is an open-source C++ library developed and used at Facebook.
+
+###[Get Started](folly/docs/Overview.md)
+
+Folly is published on Github at https://github.com/facebook/folly; for
+discussions, there is a Google group at
+https://groups.google.com/d/forum/facebook-folly.
+
+Dependencies
+------------
+
+folly requires gcc 4.8+ and a version of boost compiled with C++11 support.
+
+Please download googletest from
+https://googletest.googlecode.com/files/gtest-1.7.0.zip and unzip it in the
+folly/test subdirectory.
+
+Ubuntu 12.04
+------------
+
+This release is old, requiring many upgrades. However, since Travis CI runs
+on 12.04, `folly/build/deps_ubuntu_12.04.sh` is provided, and upgrades all
+the required packages.
+
+Ubuntu 13.10
+------------
+
+The following packages are required (feel free to cut and paste the apt-get
+command below):
+
+```
+sudo apt-get install \
+    g++ \
+    automake \
+    autoconf \
+    autoconf-archive \
+    libtool \
+    libboost-all-dev \
+    libevent-dev \
+    libdouble-conversion-dev \
+    libgoogle-glog-dev \
+    libgflags-dev \
+    make \
+    binutils-dev \
+    libjemalloc-dev \
+    libssl-dev
+```
+
+Ubuntu 14.04 LTS
+----------------
+
+The packages listed above for Ubuntu 13.10 are required, as well as:
+
+```
+sudo apt-get install \
+    libiberty-dev
+```
+
+The above packages are sufficient for Ubuntu 13.10 and Ubuntu 14.04.
+
+In the folly directory, run
+```
+  autoreconf -ivf
+  ./configure
+  make
+  make check
+  sudo make install
+```
+
+OS X (Homebrew)
+----
+folly is available as a Formula and releases may be built via `brew install folly`.
+
+You may also use `folly/build/bootstrap-osx-homebrew.sh` to build against `master`:
+
+```
+  cd folly
+  ./build/bootstrap-osx-homebrew.sh
+  make
+  make check
+```
+
+OS X (MacPorts)
+----
+Install the required packages from MacPorts:
+
+```
+  sudo port install \
+    autoconf \
+    automake \
+    boost \
+    gflags \
+    git \
+    google-glog \
+    libevent \
+    libtool \
+    scons \
+```
+
+Download and install double-conversion:
+
+```
+  git clone https://github.com/google/double-conversion.git
+  cd double-conversion
+  cmake -DBUILD_SHARED_LIBS=ON .
+  make
+  sudo make install
+```
+
+Download and install folly with the parameters listed below:
+
+```
+  git clone https://github.com/facebook/folly.git
+  cd folly/folly
+  autoreconf -ivf
+  ./configure CPPFLAGS="-I/opt/local/include" LDFLAGS="-L/opt/local/lib"
+  make
+  sudo make install
+```
+
+Other Linux distributions
+-------------------------
+
+- double-conversion (https://github.com/google/double-conversion)
+
+  Download and build double-conversion.
+  You may need to tell configure where to find it.
+
+  [double-conversion/] `ln -s src double-conversion`
+
+  [folly/] `./configure LDFLAGS=-L$DOUBLE_CONVERSION_HOME/ CPPFLAGS=-I$DOUBLE_CONVERSION_HOME/`
+
+  [folly/] `LD_LIBRARY_PATH=$DOUBLE_CONVERSION_HOME/ make`
+
+- additional platform specific dependencies:
+
+  Fedora 21 64-bit
+    - gcc
+    - gcc-c++
+    - autoconf
+    - autoconf-archive
+    - automake
+    - boost-devel
+    - libtool
+    - glog-devel
+    - gflags-devel
+    - scons
+    - double-conversion-devel
+    - openssl-devel
+    - libevent-devel
diff --git a/faux-folly/folly/Android.mk b/faux-folly/folly/Android.mk
new file mode 100644
index 0000000..4357fac
--- /dev/null
+++ b/faux-folly/folly/Android.mk
@@ -0,0 +1,262 @@
+LOCAL_PATH := $(my-dir)
+
+FOLLY_LIBFOLLYBASE_SOURCES := \
+	Conv.cpp \
+	Demangle.cpp \
+	android/src/EscapeTables.cpp \
+	Format.cpp \
+	android/src/FormatTables.cpp \
+	Malloc.cpp \
+	Range.cpp \
+	StringBase.cpp \
+	String.cpp \
+	detail/FunctionalExcept.cpp
+
+FOLLY_LIBFOLLY_SOURCES = \
+	detail/CacheLocality.cpp \
+	dynamic.cpp \
+	File.cpp \
+	FileUtil.cpp \
+	futures/detail/ThreadWheelTimekeeper.cpp \
+	futures/detail/TimerMap.cpp \
+	futures/Barrier.cpp \
+	futures/ThreadedExecutor.cpp \
+	futures/Future.cpp \
+	futures/InlineExecutor.cpp \
+	futures/ManualExecutor.cpp \
+	futures/QueuedImmediateExecutor.cpp \
+	futures/ScheduledExecutor.cpp \
+	detail/Futex.cpp \
+	LifoSem.cpp \
+	io/IOBuf.cpp \
+	io/IOBufQueue.cpp \
+	detail/MemoryIdler.cpp \
+	Random.cpp \
+	SpookyHashV1.cpp \
+	SpookyHashV2.cpp \
+	stats/Instantiations.cpp \
+	Version.cpp
+
+###
+### libfollybase
+###
+include $(CLEAR_VARS)
+include $(LOCAL_PATH)/android/build/faux-folly-common.mk
+LOCAL_SRC_FILES := \
+	$(FOLLY_LIBFOLLYBASE_SOURCES)
+LOCAL_MODULE := libfollybase
+include $(BUILD_STATIC_LIBRARY)
+
+###
+### libfolly
+###
+include $(CLEAR_VARS)
+include $(LOCAL_PATH)/android/build/faux-folly-common.mk
+# See https://groups.google.com/forum/#!topic/android-ndk/6TR4MA7LxYg
+# the android defaults triggers the problem at CacheLocality.cpp:235
+LOCAL_CFLAGS += -fno-function-sections -fno-data-sections
+LOCAL_SRC_FILES := \
+	$(FOLLY_LIBFOLLY_SOURCES)
+LOCAL_WHOLE_STATIC_LIBRARIES += libfollybase
+LOCAL_STATIC_LIBRARIES += libjemalloc
+LOCAL_MODULE := libfolly
+include $(BUILD_SHARED_LIBRARY)
+
+###
+### libfollybenchmark
+###
+include $(CLEAR_VARS)
+include $(LOCAL_PATH)/android/build/faux-folly-common.mk
+LOCAL_SRC_FILES := Benchmark.cpp
+LOCAL_SHARED_LIBRARIES += libboost_regex \
+	libfolly
+LOCAL_MODULE := libfollybenchmark
+include $(BUILD_STATIC_LIBRARY)
+
+###
+### UNIT TESTS
+###
+
+FAUX_FOLLY_UNIT_TESTS :=
+FAUX_FOLLY_BUILD_TEST_EXECUTABLE := $(LOCAL_PATH)/android/build/faux-folly-test-case.mk
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := test/ForeachTest.cpp
+LOCAL_MODULE := foreach_test
+include $(FAUX_FOLLY_BUILD_TEST_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := test/HashTest.cpp
+LOCAL_MODULE := hash_test
+include $(FAUX_FOLLY_BUILD_TEST_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := test/FBStringTest.cpp
+LOCAL_MODULE := fbstring_test_using_jemalloc
+include $(FAUX_FOLLY_BUILD_TEST_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := test/ThreadCachedIntTest.cpp
+LOCAL_MODULE := thread_cached_int_test
+include $(FAUX_FOLLY_BUILD_TEST_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := test/ThreadLocalTest.cpp
+LOCAL_SHARED_LIBRARIES += libboost_thread \
+	libboost_system
+LOCAL_MODULE := thread_local_test
+include $(FAUX_FOLLY_BUILD_TEST_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := test/FBVectorTest.cpp
+LOCAL_MODULE := fbvector_test
+include $(FAUX_FOLLY_BUILD_TEST_EXECUTABLE)
+
+## # fails due to cout
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := test/DynamicTest.cpp
+LOCAL_MODULE := dynamic_test
+include $(FAUX_FOLLY_BUILD_TEST_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := test/BenchmarkTest.cpp
+LOCAL_MODULE := benchmark_test
+include $(FAUX_FOLLY_BUILD_TEST_EXECUTABLE)
+
+# fails due to destructor
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := test/ScopeGuardTest.cpp
+LOCAL_MODULE := scope_guard_test
+include $(FAUX_FOLLY_BUILD_TEST_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := test/ConvTest.cpp
+LOCAL_MODULE := conv_test
+include $(FAUX_FOLLY_BUILD_TEST_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := test/RangeTest.cpp
+LOCAL_MODULE := range_test
+include $(FAUX_FOLLY_BUILD_TEST_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := test/BitsTest.cpp
+LOCAL_MODULE := bits_test
+include $(FAUX_FOLLY_BUILD_TEST_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := test/BitIteratorTest.cpp
+LOCAL_MODULE := bit_iterator_test
+include $(FAUX_FOLLY_BUILD_TEST_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := test/EndianTest.cpp
+LOCAL_MODULE := endian_test
+include $(FAUX_FOLLY_BUILD_TEST_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := test/HistogramTest.cpp
+LOCAL_MODULE := histogram_test
+include $(FAUX_FOLLY_BUILD_TEST_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := test/MapUtilTest.cpp
+LOCAL_MODULE := map_util_test
+include $(FAUX_FOLLY_BUILD_TEST_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := test/StringTest.cpp
+LOCAL_MODULE := string_test
+include $(FAUX_FOLLY_BUILD_TEST_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := test/FormatTest.cpp
+LOCAL_MODULE := format_test
+include $(FAUX_FOLLY_BUILD_TEST_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := test/PortabilityTest.cpp
+LOCAL_MODULE := portability_test
+include $(FAUX_FOLLY_BUILD_TEST_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := test/SpookyHashV1Test.cpp
+LOCAL_MODULE := spooky_hash_v1_test
+include $(FAUX_FOLLY_BUILD_TEST_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := test/SpookyHashV2Test.cpp
+LOCAL_MODULE := spooky_hash_v2_test
+include $(FAUX_FOLLY_BUILD_TEST_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := test/CancellationTest.cpp
+LOCAL_MODULE := cancellation_test
+include $(FAUX_FOLLY_BUILD_TEST_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := test/MPMCQueueTest.cpp \
+	test/DeterministicSchedule.cpp
+LOCAL_MODULE := mpmc_queue_test
+include $(FAUX_FOLLY_BUILD_TEST_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := test/FutexTest.cpp \
+	test/DeterministicSchedule.cpp
+LOCAL_MODULE := futex_test
+include $(FAUX_FOLLY_BUILD_TEST_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := test/DeterministicScheduleTest.cpp \
+	test/DeterministicSchedule.cpp
+LOCAL_MODULE := deterministic_schedule_test
+include $(FAUX_FOLLY_BUILD_TEST_EXECUTABLE)
+
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := \
+	futures/test/CancellationTest.cpp \
+	futures/test/CollectTest.cpp \
+	futures/test/ThreadedExecutorTest.cpp \
+	futures/test/EnsureTest.cpp \
+	futures/test/ExecutorTest.cpp \
+	futures/test/FSMTest.cpp \
+	futures/test/FilterTest.cpp \
+	futures/test/FutureTest.cpp \
+	futures/test/HeaderCompileTest.cpp \
+	futures/test/InterruptTest.cpp \
+	futures/test/MapTest.cpp \
+	futures/test/PollTest.cpp \
+	futures/test/PromiseTest.cpp \
+	futures/test/ReduceTest.cpp \
+	futures/test/SharedPromiseTest.cpp \
+	futures/test/ThenCompileTest.cpp \
+	futures/test/ThenTest.cpp \
+	futures/test/TimekeeperTest.cpp \
+	futures/test/TimerMapTest.cpp \
+	futures/test/TryTest.cpp \
+	futures/test/UnitTest.cpp \
+	futures/test/UnwrapTest.cpp \
+	futures/test/ViaTest.cpp \
+	futures/test/WaitTest.cpp \
+	futures/test/WillEqualTest.cpp \
+	futures/test/WindowTest.cpp
+LOCAL_SHARED_LIBRARIES += libboost_thread \
+	libboost_system
+LOCAL_MODULE := futures_test
+include $(FAUX_FOLLY_BUILD_TEST_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := android/test/faux-folly-tests.sh
+LOCAL_MODULE_CLASS := EXECUTABLES
+LOCAL_MODULE_PATH := $(TARGET_OUT_DATA_NATIVE_TESTS)/faux-folly
+LOCAL_REQUIRED_MODULES := $(FAUX_FOLLY_UNIT_TESTS)
+LOCAL_MODULE := faux-folly-tests.sh
+include $(BUILD_PREBUILT)
+
+###
+### Clean the environment...
+### the EPA loves us!
+###
+override FOLLY_LIBFOLLYBASE_SOURCES :=
+override FOLLY_LIBFOLLY_SOURCES :=
diff --git a/faux-folly/folly/ApplyTuple.h b/faux-folly/folly/ApplyTuple.h
new file mode 100644
index 0000000..a592704
--- /dev/null
+++ b/faux-folly/folly/ApplyTuple.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2015 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Defines a function folly::applyTuple, which takes a function and a
+ * std::tuple of arguments and calls the function with those
+ * arguments.
+ *
+ * Example:
+ *
+ *    int x = folly::applyTuple(std::plus<int>(), std::make_tuple(12, 12));
+ *    ASSERT(x == 24);
+ */
+
+#ifndef FOLLY_APPLYTUPLE_H_
+#define FOLLY_APPLYTUPLE_H_
+
+#include <tuple>
+#include <functional>
+#include <type_traits>
+
+namespace folly {
+
+//////////////////////////////////////////////////////////////////////
+
+namespace detail {
+
+// This is to allow using this with pointers to member functions,
+// where the first argument in the tuple will be the this pointer.
+template<class F> F& makeCallable(F& f) { return f; }
+template<class R, class C, class ...A>
+auto makeCallable(R (C::*d)(A...)) -> decltype(std::mem_fn(d)) {
+  return std::mem_fn(d);
+}
+
+template<class Tuple>
+struct DerefSize
+  : std::tuple_size<typename std::remove_reference<Tuple>::type>
+{};
+
+template<class Tuple, class ...Unpacked> struct ExprDoUnpack {
+  enum {
+    value = sizeof...(Unpacked) < DerefSize<Tuple>::value
+  };
+};
+
+template<class Tuple, class ...Unpacked> struct ExprIsUnpacked {
+  enum {
+    value = sizeof...(Unpacked) == DerefSize<Tuple>::value
+  };
+};
+
+// CallTuple recursively unpacks tuple arguments so we can forward
+// them into the function.
+template<class Ret>
+struct CallTuple {
+  template<class F, class Tuple, class ...Unpacked>
+  static typename std::enable_if<ExprDoUnpack<Tuple, Unpacked...>::value,
+    Ret
+  >::type call(const F& f, Tuple&& t, Unpacked&&... unp) {
+    typedef typename std::tuple_element<
+      sizeof...(Unpacked),
+      typename std::remove_reference<Tuple>::type
+    >::type ElementType;
+    return CallTuple<Ret>::call(f, std::forward<Tuple>(t),
+      std::forward<Unpacked>(unp)...,
+      std::forward<ElementType>(std::get<sizeof...(Unpacked)>(t))
+    );
+  }
+
+  template<class F, class Tuple, class ...Unpacked>
+  static typename std::enable_if<ExprIsUnpacked<Tuple, Unpacked...>::value,
+    Ret
+  >::type call(const F& f, Tuple&& t, Unpacked&&... unp) {
+    return makeCallable(f)(std::forward<Unpacked>(unp)...);
+  }
+};
+
+// The point of this meta function is to extract the contents of the
+// tuple as a parameter pack so we can pass it into std::result_of<>.
+template<class F, class Args> struct ReturnValue;
+template<class F, class ...Args>
+struct ReturnValue<F,std::tuple<Args...>> {
+  typedef typename std::result_of<F (Args...)>::type type;
+};
+
+}
+
+//////////////////////////////////////////////////////////////////////
+
+template<class Callable, class Tuple>
+typename detail::ReturnValue<
+  typename std::decay<Callable>::type,
+  typename std::decay<Tuple>::type
+>::type
+applyTuple(const Callable& c, Tuple&& t) {
+  typedef typename detail::ReturnValue<
+    typename std::decay<Callable>::type,
+    typename std::decay<Tuple>::type
+  >::type RetT;
+  return detail::CallTuple<RetT>::call(c, std::forward<Tuple>(t));
+}
+
+//////////////////////////////////////////////////////////////////////
+
+}
+
+#endif
diff --git a/faux-folly/folly/AtomicStruct.h b/faux-folly/folly/AtomicStruct.h
new file mode 100644
index 0000000..ce1dedd
--- /dev/null
+++ b/faux-folly/folly/AtomicStruct.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2015 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FOLLY_ATOMIC_STRUCT_H_
+#define FOLLY_ATOMIC_STRUCT_H_
+
+#include <atomic>
+#include <type_traits>
+#include <folly/Traits.h>
+#include <string.h>
+#include <stdint.h>
+
+namespace folly {
+
+namespace detail {
+template <int N> struct AtomicStructIntPick {};
+}
+
+/// AtomicStruct<T> work like C++ atomics, but can be used on any POD
+/// type <= 8 bytes.
+template <
+    typename T,
+    template<typename> class Atom = std::atomic,
+    typename Raw = typename detail::AtomicStructIntPick<sizeof(T)>::type>
+class AtomicStruct {
+  static_assert(alignof(T) <= alignof(Raw),
+      "target type can't have stricter alignment than matching int");
+  static_assert(sizeof(T) <= sizeof(Raw),
+      "underlying type isn't big enough");
+  static_assert(std::is_trivial<T>::value ||
+                folly::IsTriviallyCopyable<T>::value,
+      "target type must be trivially copyable");
+
+  Atom<Raw> data;
+
+  static Raw encode(T v) noexcept {
+    // we expect the compiler to optimize away the memcpy, but without
+    // it we would violate strict aliasing rules
+    Raw d = 0;
+    memcpy(&d, &v, sizeof(T));
+    return d;
+  }
+
+  static T decode(Raw d) noexcept {
+    T v;
+    memcpy(&v, &d, sizeof(T));
+    return v;
+  }
+
+ public:
+  AtomicStruct() = default;
+  ~AtomicStruct() = default;
+  AtomicStruct(AtomicStruct<T> const &) = delete;
+  AtomicStruct<T>& operator= (AtomicStruct<T> const &) = delete;
+
+  constexpr /* implicit */ AtomicStruct(T v) noexcept : data(encode(v)) {}
+
+  bool is_lock_free() const noexcept {
+    return data.is_lock_free();
+  }
+
+  bool compare_exchange_strong(
+          T& v0, T v1,
+          std::memory_order mo = std::memory_order_seq_cst) noexcept {
+    Raw d0 = encode(v0);
+    bool rv = data.compare_exchange_strong(d0, encode(v1), mo);
+    if (!rv) {
+      v0 = decode(d0);
+    }
+    return rv;
+  }
+
+  bool compare_exchange_weak(
+          T& v0, T v1,
+          std::memory_order mo = std::memory_order_seq_cst) noexcept {
+    Raw d0 = encode(v0);
+    bool rv = data.compare_exchange_weak(d0, encode(v1), mo);
+    if (!rv) {
+      v0 = decode(d0);
+    }
+    return rv;
+  }
+
+  T exchange(T v, std::memory_order mo = std::memory_order_seq_cst) noexcept {
+    return decode(data.exchange(encode(v), mo));
+  }
+
+  /* implicit */ operator T () const noexcept {
+    return decode(data);
+  }
+
+  T load(std::memory_order mo = std::memory_order_seq_cst) const noexcept {
+    return decode(data.load(mo));
+  }
+
+  T operator= (T v) noexcept {
+    return decode(data = encode(v));
+  }
+
+  void store(T v, std::memory_order mo = std::memory_order_seq_cst) noexcept {
+    data.store(encode(v), mo);
+  }
+
+  // std::atomic also provides volatile versions of all of the access
+  // methods.  These are callable on volatile objects, and also can
+  // theoretically have different implementations than their non-volatile
+  // counterpart.  If someone wants them here they can easily be added
+  // by duplicating the above code and the corresponding unit tests.
+};
+
+namespace detail {
+
+template <> struct AtomicStructIntPick<1> { typedef uint8_t type; };
+template <> struct AtomicStructIntPick<2> { typedef uint16_t type; };
+template <> struct AtomicStructIntPick<3> { typedef uint32_t type; };
+template <> struct AtomicStructIntPick<4> { typedef uint32_t type; };
+template <> struct AtomicStructIntPick<5> { typedef uint64_t type; };
+template <> struct AtomicStructIntPick<6> { typedef uint64_t type; };
+template <> struct AtomicStructIntPick<7> { typedef uint64_t type; };
+template <> struct AtomicStructIntPick<8> { typedef uint64_t type; };
+
+} // namespace detail
+
+} // namespace folly
+
+#endif
diff --git a/faux-folly/folly/Baton.h b/faux-folly/folly/Baton.h
new file mode 100644
index 0000000..6600079
--- /dev/null
+++ b/faux-folly/folly/Baton.h
@@ -0,0 +1,297 @@
+/*
+ * Copyright 2015 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FOLLY_BATON_H
+#define FOLLY_BATON_H
+
+#include <stdint.h>
+#include <atomic>
+#include <boost/noncopyable.hpp>
+#include <errno.h>
+#include <assert.h>
+
+#include <folly/detail/Futex.h>
+#include <folly/detail/MemoryIdler.h>
+
+namespace folly {
+
+/// A Baton allows a thread to block once and be awoken: it captures
+/// a single handoff.  During its lifecycle (from construction/reset to
+/// destruction/reset) a baton must either be post()ed and wait()ed exactly
+/// once each, or not at all.
+///
+/// Baton includes no internal padding, and is only 4 bytes in size.
+/// Any alignment or padding to avoid false sharing is up to the user.
+///
+/// This is basically a stripped-down semaphore that supports only a
+/// single call to sem_post and a single call to sem_wait.  The current
+/// posix semaphore sem_t isn't too bad, but this provides more a bit more
+/// speed, inlining, smaller size, a guarantee that the implementation
+/// won't change, and compatibility with DeterministicSchedule.  By having
+/// a much more restrictive lifecycle we can also add a bunch of assertions
+/// that can help to catch race conditions ahead of time.
+template <template<typename> class Atom = std::atomic>
+struct Baton {
+  Baton() : state_(INIT) {}
+
+  /* using boost::noncopyable increases the object size by 4 bytes */
+  Baton(const Baton&) = delete;
+  Baton& operator=(const Baton&) = delete;
+  Baton(Baton&&) = delete;
+  Baton& operator=(Baton&&) = delete;
+
+  /// It is an error to destroy a Baton on which a thread is currently
+  /// wait()ing.  In practice this means that the waiter usually takes
+  /// responsibility for destroying the Baton.
+  ~Baton() {
+    // The docblock for this function says that it can't be called when
+    // there is a concurrent waiter.  We assume a strong version of this
+    // requirement in which the caller must _know_ that this is true, they
+    // are not allowed to be merely lucky.  If two threads are involved,
+    // the destroying thread must actually have synchronized with the
+    // waiting thread after wait() returned.  To convey causality the the
+    // waiting thread must have used release semantics and the destroying
+    // thread must have used acquire semantics for that communication,
+    // so we are guaranteed to see the post-wait() value of state_,
+    // which cannot be WAITING.
+    //
+    // Note that since we only care about a single memory location,
+    // the only two plausible memory orders here are relaxed and seq_cst.
+    assert(state_.load(std::memory_order_relaxed) != WAITING);
+  }
+
+  /// Equivalent to destroying the Baton and creating a new one.  It is
+  /// a bug to call this while there is a waiting thread, so in practice
+  /// the waiter will be the one that resets the baton.
+  void reset() {
+    // See ~Baton for a discussion about why relaxed is okay here
+    assert(state_.load(std::memory_order_relaxed) != WAITING);
+
+    // We use a similar argument to justify the use of a relaxed store
+    // here.  Since both wait() and post() are required to be called
+    // only once per lifetime, no thread can actually call those methods
+    // correctly after a reset() unless it synchronizes with the thread
+    // that performed the reset().  If a post() or wait() on another thread
+    // didn't synchronize, then regardless of what operation we performed
+    // here there would be a race on proper use of the Baton's spec
+    // (although not on any particular load and store).  Put another way,
+    // we don't need to synchronize here because anybody that might rely
+    // on such synchronization is required by the baton rules to perform
+    // an additional synchronization that has the desired effect anyway.
+    //
+    // There is actually a similar argument to be made about the
+    // constructor, in which the fenceless constructor initialization
+    // of state_ is piggybacked on whatever synchronization mechanism
+    // distributes knowledge of the Baton's existence
+    state_.store(INIT, std::memory_order_relaxed);
+  }
+
+  /// Causes wait() to wake up.  For each lifetime of a Baton (where a
+  /// lifetime starts at construction or reset() and ends at destruction
+  /// or reset()) there can be at most one call to post().  Any thread
+  /// may call post().
+  ///
+  /// Although we could implement a more generic semaphore semantics
+  /// without any extra size or CPU overhead, the single-call limitation
+  /// allows us to have better assert-ions during debug builds.
+  void post() {
+    uint32_t before = state_.load(std::memory_order_acquire);
+
+    assert(before == INIT || before == WAITING || before == TIMED_OUT);
+
+    if (before == INIT &&
+        state_.compare_exchange_strong(before, EARLY_DELIVERY)) {
+      return;
+    }
+
+    assert(before == WAITING || before == TIMED_OUT);
+
+    if (before == TIMED_OUT) {
+      return;
+    }
+
+    assert(before == WAITING);
+    state_.store(LATE_DELIVERY, std::memory_order_release);
+    state_.futexWake(1);
+  }
+
+  /// Waits until post() has been called in the current Baton lifetime.
+  /// May be called at most once during a Baton lifetime (construction
+  /// |reset until destruction|reset).  If post is called before wait in
+  /// the current lifetime then this method returns immediately.
+  ///
+  /// The restriction that there can be at most one wait() per lifetime
+  /// could be relaxed somewhat without any perf or size regressions,
+  /// but by making this condition very restrictive we can provide better
+  /// checking in debug builds.
+  void wait() {
+    if (spinWaitForEarlyDelivery()) {
+      assert(state_.load(std::memory_order_acquire) == EARLY_DELIVERY);
+      return;
+    }
+
+    // guess we have to block :(
+    uint32_t expected = INIT;
+    if (!state_.compare_exchange_strong(expected, WAITING)) {
+      // CAS failed, last minute reprieve
+      assert(expected == EARLY_DELIVERY);
+      return;
+    }
+
+    while (true) {
+      detail::MemoryIdler::futexWait(state_, WAITING);
+
+      // state_ is the truth even if FUTEX_WAIT reported a matching
+      // FUTEX_WAKE, since we aren't using type-stable storage and we
+      // don't guarantee reuse.  The scenario goes like this: thread
+      // A's last touch of a Baton is a call to wake(), which stores
+      // LATE_DELIVERY and gets an unlucky context switch before delivering
+      // the corresponding futexWake.  Thread B sees LATE_DELIVERY
+      // without consuming a futex event, because it calls futexWait
+      // with an expected value of WAITING and hence doesn't go to sleep.
+      // B returns, so the Baton's memory is reused and becomes another
+      // Baton (or a reuse of this one).  B calls futexWait on the new
+      // Baton lifetime, then A wakes up and delivers a spurious futexWake
+      // to the same memory location.  B's futexWait will then report a
+      // consumed wake event even though state_ is still WAITING.
+      //
+      // It would be possible to add an extra state_ dance to communicate
+      // that the futexWake has been sent so that we can be sure to consume
+      // it before returning, but that would be a perf and complexity hit.
+      uint32_t s = state_.load(std::memory_order_acquire);
+      assert(s == WAITING || s == LATE_DELIVERY);
+
+      if (s == LATE_DELIVERY) {
+        return;
+      }
+      // retry
+    }
+  }
+
+  /// Similar to wait, but with a timeout. The thread is unblocked if the
+  /// timeout expires.
+  /// Note: Only a single call to timed_wait/wait is allowed during a baton's
+  /// life-cycle (from construction/reset to destruction/reset). In other
+  /// words, after timed_wait the caller can't invoke wait/timed_wait/try_wait
+  /// again on the same baton without resetting it.
+  ///
+  /// @param  deadline      Time until which the thread can block
+  /// @return               true if the baton was posted to before timeout,
+  ///                       false otherwise
+  template <typename Clock, typename Duration = typename Clock::duration>
+  bool timed_wait(const std::chrono::time_point<Clock,Duration>& deadline) {
+    if (spinWaitForEarlyDelivery()) {
+      assert(state_.load(std::memory_order_acquire) == EARLY_DELIVERY);
+      return true;
+    }
+
+    // guess we have to block :(
+    uint32_t expected = INIT;
+    if (!state_.compare_exchange_strong(expected, WAITING)) {
+      // CAS failed, last minute reprieve
+      assert(expected == EARLY_DELIVERY);
+      return true;
+    }
+
+    while (true) {
+      auto rv = state_.futexWaitUntil(WAITING, deadline);
+      if (rv == folly::detail::FutexResult::TIMEDOUT) {
+        state_.store(TIMED_OUT, std::memory_order_release);
+        return false;
+      }
+
+      uint32_t s = state_.load(std::memory_order_acquire);
+      assert(s == WAITING || s == LATE_DELIVERY);
+      if (s == LATE_DELIVERY) {
+        return true;
+      }
+    }
+  }
+
+  /// Similar to wait, but doesn't block the thread if it hasn't been posted.
+  ///
+  /// try_wait has the following semantics:
+  /// - It is ok to call try_wait any number times on the same baton until
+  ///   try_wait reports that the baton has been posted.
+  /// - It is ok to call timed_wait or wait on the same baton if try_wait
+  ///   reports that baton hasn't been posted.
+  /// - If try_wait indicates that the baton has been posted, it is invalid to
+  ///   call wait, try_wait or timed_wait on the same baton without resetting
+  ///
+  /// @return       true if baton has been posted, false othewise
+  bool try_wait() {
+    auto s = state_.load(std::memory_order_acquire);
+    assert(s == INIT || s == EARLY_DELIVERY);
+    return s == EARLY_DELIVERY;
+  }
+
+ private:
+  enum State : uint32_t {
+    INIT = 0,
+    EARLY_DELIVERY = 1,
+    WAITING = 2,
+    LATE_DELIVERY = 3,
+    TIMED_OUT = 4
+  };
+
+  enum {
+    // Must be positive.  If multiple threads are actively using a
+    // higher-level data structure that uses batons internally, it is
+    // likely that the post() and wait() calls happen almost at the same
+    // time.  In this state, we lose big 50% of the time if the wait goes
+    // to sleep immediately.  On circa-2013 devbox hardware it costs about
+    // 7 usec to FUTEX_WAIT and then be awoken (half the t/iter as the
+    // posix_sem_pingpong test in BatonTests).  We can improve our chances
+    // of EARLY_DELIVERY by spinning for a bit, although we have to balance
+    // this against the loss if we end up sleeping any way.  Spins on this
+    // hw take about 7 nanos (all but 0.5 nanos is the pause instruction).
+    // We give ourself 300 spins, which is about 2 usec of waiting.  As a
+    // partial consolation, since we are using the pause instruction we
+    // are giving a speed boost to the colocated hyperthread.
+    PreBlockAttempts = 300,
+  };
+
+  // Spin for "some time" (see discussion on PreBlockAttempts) waiting
+  // for a post.
+  //
+  // @return       true if we received an early delivery during the wait,
+  //               false otherwise. If the function returns true then
+  //               state_ is guaranteed to be EARLY_DELIVERY
+  bool spinWaitForEarlyDelivery() {
+
+    static_assert(PreBlockAttempts > 0,
+        "isn't this assert clearer than an uninitialized variable warning?");
+    for (int i = 0; i < PreBlockAttempts; ++i) {
+      if (try_wait()) {
+        // hooray!
+        return true;
+      }
+      // The pause instruction is the polite way to spin, but it doesn't
+      // actually affect correctness to omit it if we don't have it.
+      // Pausing donates the full capabilities of the current core to
+      // its other hyperthreads for a dozen cycles or so
+      asm_volatile_pause();
+    }
+
+    return false;
+  }
+
+  detail::Futex<Atom> state_;
+};
+
+} // namespace folly
+
+#endif
diff --git a/faux-folly/folly/Benchmark.cpp b/faux-folly/folly/Benchmark.cpp
new file mode 100644
index 0000000..458e689
--- /dev/null
+++ b/faux-folly/folly/Benchmark.cpp
@@ -0,0 +1,462 @@
+/*
+ * Copyright 2015 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// @author Andrei Alexandrescu (andrei.alexandrescu@fb.com)
+
+#include <folly/Benchmark.h>
+#include <folly/Foreach.h>
+#include <folly/String.h>
+
+#include <algorithm>
+#include <boost/regex.hpp>
+#include <cmath>
+#include <iostream>
+#include <limits>
+#include <utility>
+#include <vector>
+#include <cstring>
+
+using namespace std;
+
+DEFINE_bool(benchmark, false, "Run benchmarks.");
+
+DEFINE_string(bm_regex, "",
+              "Only benchmarks whose names match this regex will be run.");
+
+DEFINE_int64(bm_min_usec, 100,
+             "Minimum # of microseconds we'll accept for each benchmark.");
+
+DEFINE_int64(bm_min_iters, 1,
+             "Minimum # of iterations we'll try for each benchmark.");
+
+DEFINE_int32(bm_max_secs, 1,
+             "Maximum # of seconds we'll spend on each benchmark.");
+
+
+namespace folly {
+
+BenchmarkSuspender::NanosecondsSpent BenchmarkSuspender::nsSpent;
+
+typedef function<detail::TimeIterPair(unsigned int)> BenchmarkFun;
+
+
+vector<tuple<const char*, const char*, BenchmarkFun>>& benchmarks() {
+  static vector<tuple<const char*, const char*, BenchmarkFun>> _benchmarks;
+  return _benchmarks;
+}
+
+#define FB_FOLLY_GLOBAL_BENCHMARK_BASELINE fbFollyGlobalBenchmarkBaseline
+#define FB_STRINGIZE_X2(x) FB_STRINGIZE(x)
+
+// Add the global baseline
+BENCHMARK(FB_FOLLY_GLOBAL_BENCHMARK_BASELINE) {
+#ifdef _MSC_VER
+  _ReadWriteBarrier();
+#else
+  asm volatile("");
+#endif
+}
+
+int getGlobalBenchmarkBaselineIndex() {
+  const char *global = FB_STRINGIZE_X2(FB_FOLLY_GLOBAL_BENCHMARK_BASELINE);
+  auto it = std::find_if(
+    benchmarks().begin(),
+    benchmarks().end(),
+    [global](const tuple<const char*, const char*, BenchmarkFun> &v) {
+      return std::strcmp(get<1>(v), global) == 0;
+    }
+  );
+  CHECK(it != benchmarks().end());
+  return it - benchmarks().begin();
+}
+
+#undef FB_STRINGIZE_X2
+#undef FB_FOLLY_GLOBAL_BENCHMARK_BASELINE
+
+void detail::addBenchmarkImpl(const char* file, const char* name,
+                              BenchmarkFun fun) {
+  benchmarks().emplace_back(file, name, std::move(fun));
+}
+
+/**
+ * Given a point, gives density at that point as a number 0.0 < x <=
+ * 1.0. The result is 1.0 if all samples are equal to where, and
+ * decreases near 0 if all points are far away from it. The density is
+ * computed with the help of a radial basis function.
+ */
+static double density(const double * begin, const double *const end,
+                      const double where, const double bandwidth) {
+  assert(begin < end);
+  assert(bandwidth > 0.0);
+  double sum = 0.0;
+  FOR_EACH_RANGE (i, begin, end) {
+    auto d = (*i - where) / bandwidth;
+    sum += exp(- d * d);
+  }
+  return sum / (end - begin);
+}
+
+/**
+ * Computes mean and variance for a bunch of data points. Note that
+ * mean is currently not being used.
+ */
+static pair<double, double>
+meanVariance(const double * begin, const double *const end) {
+  assert(begin < end);
+  double sum = 0.0, sum2 = 0.0;
+  FOR_EACH_RANGE (i, begin, end) {
+    sum += *i;
+    sum2 += *i * *i;
+  }
+  auto const n = end - begin;
+  return make_pair(sum / n, sqrt((sum2 - sum * sum / n) / n));
+}
+
+/**
+ * Computes the mode of a sample set through brute force. Assumes
+ * input is sorted.
+ */
+static double mode(const double * begin, const double *const end) {
+  assert(begin < end);
+  // Lower bound and upper bound for result and their respective
+  // densities.
+  auto
+    result = 0.0,
+    bestDensity = 0.0;
+
+  // Get the variance so we pass it down to density()
+  auto const sigma = meanVariance(begin, end).second;
+  if (!sigma) {
+    // No variance means constant signal
+    return *begin;
+  }
+
+  FOR_EACH_RANGE (i, begin, end) {
+    assert(i == begin || *i >= i[-1]);
+    auto candidate = density(begin, end, *i, sigma * sqrt(2.0));
+    if (candidate > bestDensity) {
+      // Found a new best
+      bestDensity = candidate;
+      result = *i;
+    } else {
+      // Density is decreasing... we could break here if we definitely
+      // knew this is unimodal.
+    }
+  }
+
+  return result;
+}
+
+/**
+ * Given a bunch of benchmark samples, estimate the actual run time.
+ */
+static double estimateTime(double * begin, double * end) {
+  assert(begin < end);
+
+  // Current state of the art: get the minimum. After some
+  // experimentation, it seems taking the minimum is the best.
+
+  return *min_element(begin, end);
+
+  // What follows after estimates the time as the mode of the
+  // distribution.
+
+  // Select the awesomest (i.e. most frequent) result. We do this by
+  // sorting and then computing the longest run length.
+  sort(begin, end);
+
+  // Eliminate outliers. A time much larger than the minimum time is
+  // considered an outlier.
+  while (end[-1] > 2.0 * *begin) {
+    --end;
+    if (begin == end) {
+      LOG(INFO) << *begin;
+    }
+    assert(begin < end);
+  }
+
+  double result = 0;
+
+  /* Code used just for comparison purposes */ {
+    unsigned bestFrequency = 0;
+    unsigned candidateFrequency = 1;
+    double candidateValue = *begin;
+    for (auto current = begin + 1; ; ++current) {
+      if (current == end || *current != candidateValue) {
+        // Done with the current run, see if it was best
+        if (candidateFrequency > bestFrequency) {
+          bestFrequency = candidateFrequency;
+          result = candidateValue;
+        }
+        if (current == end) {
+          break;
+        }
+        // Start a new run
+        candidateValue = *current;
+        candidateFrequency = 1;
+      } else {
+        // Cool, inside a run, increase the frequency
+        ++candidateFrequency;
+      }
+    }
+  }
+
+  result = mode(begin, end);
+
+  return result;
+}
+
+static double runBenchmarkGetNSPerIteration(const BenchmarkFun& fun,
+                                            const double globalBaseline) {
+  // They key here is accuracy; too low numbers means the accuracy was
+  // coarse. We up the ante until we get to at least minNanoseconds
+  // timings.
+  static uint64_t resolutionInNs = 0, coarseResolutionInNs = 0;
+  if (!resolutionInNs) {
+    timespec ts;
+    CHECK_EQ(0, clock_getres(detail::DEFAULT_CLOCK_ID, &ts));
+    CHECK_EQ(0, ts.tv_sec) << "Clock sucks.";
+    CHECK_LT(0, ts.tv_nsec) << "Clock too fast for its own good.";
+    CHECK_EQ(1, ts.tv_nsec) << "Clock too coarse, upgrade your kernel.";
+    resolutionInNs = ts.tv_nsec;
+  }
+  // We choose a minimum minimum (sic) of 100,000 nanoseconds, but if
+  // the clock resolution is worse than that, it will be larger. In
+  // essence we're aiming at making the quantization noise 0.01%.
+  static const auto minNanoseconds =
+    max<uint64_t>(FLAGS_bm_min_usec * 1000UL,
+        min<uint64_t>(resolutionInNs * 100000, 1000000000ULL));
+
+  // We do measurements in several epochs and take the minimum, to
+  // account for jitter.
+  static const unsigned int epochs = 1000;
+  // We establish a total time budget as we don't want a measurement
+  // to take too long. This will curtail the number of actual epochs.
+  const uint64_t timeBudgetInNs = FLAGS_bm_max_secs * 1000000000ULL;
+  timespec global;
+  CHECK_EQ(0, clock_gettime(CLOCK_REALTIME, &global));
+
+  double epochResults[epochs] = { 0 };
+  size_t actualEpochs = 0;
+
+  for (; actualEpochs < epochs; ++actualEpochs) {
+    for (unsigned int n = FLAGS_bm_min_iters; n < (1UL << 30); n *= 2) {
+      auto const nsecsAndIter = fun(n);
+      if (nsecsAndIter.first < minNanoseconds) {
+        continue;
+      }
+      // We got an accurate enough timing, done. But only save if
+      // smaller than the current result.
+      epochResults[actualEpochs] = max(0.0, double(nsecsAndIter.first) /
+                                       nsecsAndIter.second - globalBaseline);
+      // Done with the current epoch, we got a meaningful timing.
+      break;
+    }
+    timespec now;
+    CHECK_EQ(0, clock_gettime(CLOCK_REALTIME, &now));
+    if (detail::timespecDiff(now, global) >= timeBudgetInNs) {
+      // No more time budget available.
+      ++actualEpochs;
+      break;
+    }
+  }
+
+  // If the benchmark was basically drowned in baseline noise, it's
+  // possible it became negative.
+  return max(0.0, estimateTime(epochResults, epochResults + actualEpochs));
+}
+
+struct ScaleInfo {
+  double boundary;
+  const char* suffix;
+};
+
+static const ScaleInfo kTimeSuffixes[] {
+  { 365.25 * 24 * 3600, "years" },
+  { 24 * 3600, "days" },
+  { 3600, "hr" },
+  { 60, "min" },
+  { 1, "s" },
+  { 1E-3, "ms" },
+  { 1E-6, "us" },
+  { 1E-9, "ns" },
+  { 1E-12, "ps" },
+  { 1E-15, "fs" },
+  { 0, nullptr },
+};
+
+static const ScaleInfo kMetricSuffixes[] {
+  { 1E24, "Y" },  // yotta
+  { 1E21, "Z" },  // zetta
+  { 1E18, "X" },  // "exa" written with suffix 'X' so as to not create
+                  //   confusion with scientific notation
+  { 1E15, "P" },  // peta
+  { 1E12, "T" },  // terra
+  { 1E9, "G" },   // giga
+  { 1E6, "M" },   // mega
+  { 1E3, "K" },   // kilo
+  { 1, "" },
+  { 1E-3, "m" },  // milli
+  { 1E-6, "u" },  // micro
+  { 1E-9, "n" },  // nano
+  { 1E-12, "p" }, // pico
+  { 1E-15, "f" }, // femto
+  { 1E-18, "a" }, // atto
+  { 1E-21, "z" }, // zepto
+  { 1E-24, "y" }, // yocto
+  { 0, nullptr },
+};
+
+static string humanReadable(double n, unsigned int decimals,
+                            const ScaleInfo* scales) {
+  if (std::isinf(n) || std::isnan(n)) {
+    return folly::to<string>(n);
+  }
+
+  const double absValue = fabs(n);
+  const ScaleInfo* scale = scales;
+  while (absValue < scale[0].boundary && scale[1].suffix != nullptr) {
+    ++scale;
+  }
+
+  const double scaledValue = n / scale->boundary;
+  return stringPrintf("%.*f%s", decimals, scaledValue, scale->suffix);
+}
+
+static string readableTime(double n, unsigned int decimals) {
+  return humanReadable(n, decimals, kTimeSuffixes);
+}
+
+static string metricReadable(double n, unsigned int decimals) {
+  return humanReadable(n, decimals, kMetricSuffixes);
+}
+
+static void printBenchmarkResultsAsTable(
+  const vector<tuple<const char*, const char*, double> >& data) {
+  // Width available
+  static const unsigned int columns = 76;
+
+  // Compute the longest benchmark name
+  size_t longestName = 0;
+  FOR_EACH_RANGE (i, 1, benchmarks().size()) {
+    longestName = max(longestName, strlen(get<1>(benchmarks()[i])));
+  }
+
+  // Print a horizontal rule
+  auto separator = [&](char pad) {
+    puts(string(columns, pad).c_str());
+  };
+
+  // Print header for a file
+  auto header = [&](const char* file) {
+    separator('=');
+    printf("%-*srelative  time/iter  iters/s\n",
+           columns - 28, file);
+    separator('=');
+  };
+
+  double baselineNsPerIter = numeric_limits<double>::max();
+  const char* lastFile = "";
+
+  for (auto& datum : data) {
+    auto file = get<0>(datum);
+    if (strcmp(file, lastFile)) {
+      // New file starting
+      header(file);
+      lastFile = file;
+    }
+
+    string s = get<1>(datum);
+    if (s == "-") {
+      separator('-');
+      continue;
+    }
+    bool useBaseline /* = void */;
+    if (s[0] == '%') {
+      s.erase(0, 1);
+      useBaseline = true;
+    } else {
+      baselineNsPerIter = get<2>(datum);
+      useBaseline = false;
+    }
+    s.resize(columns - 29, ' ');
+    auto nsPerIter = get<2>(datum);
+    auto secPerIter = nsPerIter / 1E9;
+    auto itersPerSec = 1 / secPerIter;
+    if (!useBaseline) {
+      // Print without baseline
+      printf("%*s           %9s  %7s\n",
+             static_cast<int>(s.size()), s.c_str(),
+             readableTime(secPerIter, 2).c_str(),
+             metricReadable(itersPerSec, 2).c_str());
+    } else {
+      // Print with baseline
+      auto rel = baselineNsPerIter / nsPerIter * 100.0;
+      printf("%*s %7.2f%%  %9s  %7s\n",
+             static_cast<int>(s.size()), s.c_str(),
+             rel,
+             readableTime(secPerIter, 2).c_str(),
+             metricReadable(itersPerSec, 2).c_str());
+    }
+  }
+  separator('=');
+}
+
+static void printBenchmarkResults(
+  const vector<tuple<const char*, const char*, double> >& data) {
+
+  printBenchmarkResultsAsTable(data);
+}
+
+void runBenchmarks() {
+  CHECK(!benchmarks().empty());
+
+  vector<tuple<const char*, const char*, double>> results;
+  results.reserve(benchmarks().size() - 1);
+
+  std::unique_ptr<boost::regex> bmRegex;
+  if (!FLAGS_bm_regex.empty()) {
+    bmRegex.reset(new boost::regex(FLAGS_bm_regex));
+  }
+
+  // PLEASE KEEP QUIET. MEASUREMENTS IN PROGRESS.
+
+  unsigned int baselineIndex = getGlobalBenchmarkBaselineIndex();
+
+  auto const globalBaseline =
+      runBenchmarkGetNSPerIteration(get<2>(benchmarks()[baselineIndex]), 0);
+  FOR_EACH_RANGE (i, 0, benchmarks().size()) {
+    if (i == baselineIndex) {
+      continue;
+    }
+    double elapsed = 0.0;
+    if (strcmp(get<1>(benchmarks()[i]), "-") != 0) { // skip separators
+      if (bmRegex && !boost::regex_search(get<1>(benchmarks()[i]), *bmRegex)) {
+        continue;
+      }
+      elapsed = runBenchmarkGetNSPerIteration(get<2>(benchmarks()[i]),
+                                              globalBaseline);
+    }
+    results.emplace_back(get<0>(benchmarks()[i]),
+                         get<1>(benchmarks()[i]), elapsed);
+  }
+
+  // PLEASE MAKE NOISE. MEASUREMENTS DONE.
+
+  printBenchmarkResults(results);
+}
+
+} // namespace folly
diff --git a/faux-folly/folly/Benchmark.h b/faux-folly/folly/Benchmark.h
new file mode 100644
index 0000000..c25ca28
--- /dev/null
+++ b/faux-folly/folly/Benchmark.h
@@ -0,0 +1,539 @@
+/*
+ * Copyright 2015 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FOLLY_BENCHMARK_H_
+#define FOLLY_BENCHMARK_H_
+
+#include <folly/ScopeGuard.h>
+#include <folly/Portability.h>
+#include <folly/Preprocessor.h> // for FB_ANONYMOUS_VARIABLE
+#include <cassert>
+#include <ctime>
+#include <boost/function_types/function_arity.hpp>
+#include <functional>
+#include <glog/logging.h>
+#include <gflags/gflags.h>
+#include <limits>
+#include <type_traits>
+
+DECLARE_bool(benchmark);
+
+namespace folly {
+
+/**
+ * Runs all benchmarks defined. Usually put in main().
+ */
+void runBenchmarks();
+
+/**
+ * Runs all benchmarks defined if and only if the --benchmark flag has
+ * been passed to the program. Usually put in main().
+ */
+inline bool runBenchmarksOnFlag() {
+  if (FLAGS_benchmark) {
+    runBenchmarks();
+  }
+  return FLAGS_benchmark;
+}
+
+namespace detail {
+
+/**
+ * This is the clock ID used for measuring time. On older kernels, the
+ * resolution of this clock will be very coarse, which will cause the
+ * benchmarks to fail.
+ */
+enum Clock { DEFAULT_CLOCK_ID = CLOCK_REALTIME };
+
+typedef std::pair<uint64_t, unsigned int> TimeIterPair;
+
+/**
+ * Adds a benchmark wrapped in a std::function. Only used
+ * internally. Pass by value is intentional.
+ */
+void addBenchmarkImpl(const char* file,
+                      const char* name,
+                      std::function<TimeIterPair(unsigned int)>);
+
+/**
+ * Takes the difference between two timespec values. end is assumed to
+ * occur after start.
+ */
+inline uint64_t timespecDiff(timespec end, timespec start) {
+  if (end.tv_sec == start.tv_sec) {
+    assert(end.tv_nsec >= start.tv_nsec);
+    return end.tv_nsec - start.tv_nsec;
+  }
+  assert(end.tv_sec > start.tv_sec);
+  auto diff = uint64_t(end.tv_sec - start.tv_sec);
+  assert(diff <
+         std::numeric_limits<uint64_t>::max() / 1000000000UL);
+  return diff * 1000000000UL
+    + end.tv_nsec - start.tv_nsec;
+}
+
+/**
+ * Takes the difference between two sets of timespec values. The first
+ * two come from a high-resolution clock whereas the other two come
+ * from a low-resolution clock. The crux of the matter is that
+ * high-res values may be bogus as documented in
+ * http://linux.die.net/man/3/clock_gettime. The trouble is when the
+ * running process migrates from one CPU to another, which is more
+ * likely for long-running processes. Therefore we watch for high
+ * differences between the two timings.
+ *
+ * This function is subject to further improvements.
+ */
+inline uint64_t timespecDiff(timespec end, timespec start,
+                             timespec endCoarse, timespec startCoarse) {
+  auto fine = timespecDiff(end, start);
+  auto coarse = timespecDiff(endCoarse, startCoarse);
+  if (coarse - fine >= 1000000) {
+    // The fine time is in all likelihood bogus
+    return coarse;
+  }
+  return fine;
+}
+
+} // namespace detail
+
+/**
+ * Supporting type for BENCHMARK_SUSPEND defined below.
+ */
+struct BenchmarkSuspender {
+  BenchmarkSuspender() {
+    CHECK_EQ(0, clock_gettime(detail::DEFAULT_CLOCK_ID, &start));
+  }
+
+  BenchmarkSuspender(const BenchmarkSuspender &) = delete;
+  BenchmarkSuspender(BenchmarkSuspender && rhs) noexcept {
+    start = rhs.start;
+    rhs.start.tv_nsec = rhs.start.tv_sec = 0;
+  }
+
+  BenchmarkSuspender& operator=(const BenchmarkSuspender &) = delete;
+  BenchmarkSuspender& operator=(BenchmarkSuspender && rhs) {
+    if (start.tv_nsec > 0 || start.tv_sec > 0) {
+      tally();
+    }
+    start = rhs.start;
+    rhs.start.tv_nsec = rhs.start.tv_sec = 0;
+    return *this;
+  }
+
+  ~BenchmarkSuspender() {
+    if (start.tv_nsec > 0 || start.tv_sec > 0) {
+      tally();
+    }
+  }
+
+  void dismiss() {
+    assert(start.tv_nsec > 0 || start.tv_sec > 0);
+    tally();
+    start.tv_nsec = start.tv_sec = 0;
+  }
+
+  void rehire() {
+    assert(start.tv_nsec == 0 || start.tv_sec == 0);
+    CHECK_EQ(0, clock_gettime(detail::DEFAULT_CLOCK_ID, &start));
+  }
+
+  template <class F>
+  auto dismissing(F f) -> typename std::result_of<F()>::type {
+    SCOPE_EXIT { rehire(); };
+    dismiss();
+    return f();
+  }
+
+  /**
+   * This is for use inside of if-conditions, used in BENCHMARK macros.
+   * If-conditions bypass the explicit on operator bool.
+   */
+  explicit operator bool() const {
+    return false;
+  }
+
+  /**
+   * Accumulates nanoseconds spent outside benchmark.
+   */
+  typedef uint64_t NanosecondsSpent;
+  static NanosecondsSpent nsSpent;
+
+private:
+  void tally() {
+    timespec end;
+    CHECK_EQ(0, clock_gettime(detail::DEFAULT_CLOCK_ID, &end));
+    nsSpent += detail::timespecDiff(end, start);
+    start = end;
+  }
+
+  timespec start;
+};
+
+/**
+ * Adds a benchmark. Usually not called directly but instead through
+ * the macro BENCHMARK defined below. The lambda function involved
+ * must take exactly one parameter of type unsigned, and the benchmark
+ * uses it with counter semantics (iteration occurs inside the
+ * function).
+ */
+template <typename Lambda>
+typename std::enable_if<
+  boost::function_types::function_arity<decltype(&Lambda::operator())>::value
+  == 2
+>::type
+addBenchmark(const char* file, const char* name, Lambda&& lambda) {
+  auto execute = [=](unsigned int times) {
+    BenchmarkSuspender::nsSpent = 0;
+    timespec start, end;
+    unsigned int niter;
+
+    // CORE MEASUREMENT STARTS
+    auto const r1 = clock_gettime(detail::DEFAULT_CLOCK_ID, &start);
+    niter = lambda(times);
+    auto const r2 = clock_gettime(detail::DEFAULT_CLOCK_ID, &end);
+    // CORE MEASUREMENT ENDS
+
+    CHECK_EQ(0, r1);
+    CHECK_EQ(0, r2);
+
+    return detail::TimeIterPair(
+      detail::timespecDiff(end, start) - BenchmarkSuspender::nsSpent,
+      niter);
+  };
+
+  detail::addBenchmarkImpl(file, name,
+    std::function<detail::TimeIterPair(unsigned int)>(execute));
+}
+
+/**
+ * Adds a benchmark. Usually not called directly but instead through
+ * the macro BENCHMARK defined below. The lambda function involved
+ * must take zero parameters, and the benchmark calls it repeatedly
+ * (iteration occurs outside the function).
+ */
+template <typename Lambda>
+typename std::enable_if<
+  boost::function_types::function_arity<decltype(&Lambda::operator())>::value
+  == 1
+>::type
+addBenchmark(const char* file, const char* name, Lambda&& lambda) {
+  addBenchmark(file, name, [=](unsigned int times) {
+      unsigned int niter = 0;
+      while (times-- > 0) {
+        niter += lambda();
+      }
+      return niter;
+    });
+}
+
+/**
+ * Call doNotOptimizeAway(var) against variables that you use for
+ * benchmarking but otherwise are useless. The compiler tends to do a
+ * good job at eliminating unused variables, and this function fools
+ * it into thinking var is in fact needed.
+ */
+#ifdef _MSC_VER
+
+#pragma optimize("", off)
+
+template <class T>
+void doNotOptimizeAway(T&& datum) {
+  datum = datum;
+}
+
+#pragma optimize("", on)
+
+#elif defined(__clang__)
+
+template <class T>
+__attribute__((__optnone__)) void doNotOptimizeAway(T&& datum) {
+}
+
+#else
+
+template <class T>
+void doNotOptimizeAway(T&& datum) {
+  asm volatile("" : "+r" (datum));
+}
+
+#endif
+
+} // namespace folly
+
+/**
+ * Introduces a benchmark function. Used internally, see BENCHMARK and
+ * friends below.
+ */
+#define BENCHMARK_IMPL(funName, stringName, rv, paramType, paramName)   \
+  static void funName(paramType);                                       \
+  static bool FB_ANONYMOUS_VARIABLE(follyBenchmarkUnused) = (           \
+    ::folly::addBenchmark(__FILE__, stringName,                         \
+      [](paramType paramName) -> unsigned { funName(paramName);         \
+                                            return rv; }),              \
+    true);                                                              \
+  static void funName(paramType paramName)
+
+/**
+ * Introduces a benchmark function with support for returning the actual
+ * number of iterations. Used internally, see BENCHMARK_MULTI and friends
+ * below.
+ */
+#define BENCHMARK_MULTI_IMPL(funName, stringName, paramType, paramName) \
+  static unsigned funName(paramType);                                   \
+  static bool FB_ANONYMOUS_VARIABLE(follyBenchmarkUnused) = (           \
+    ::folly::addBenchmark(__FILE__, stringName,                         \
+      [](paramType paramName) { return funName(paramName); }),          \
+    true);                                                              \
+  static unsigned funName(paramType paramName)
+
+/**
+ * Introduces a benchmark function. Use with either one or two arguments.
+ * The first is the name of the benchmark. Use something descriptive, such
+ * as insertVectorBegin. The second argument may be missing, or could be a
+ * symbolic counter. The counter dictates how many internal iteration the
+ * benchmark does. Example:
+ *
+ * BENCHMARK(vectorPushBack) {
+ *   vector<int> v;
+ *   v.push_back(42);
+ * }
+ *
+ * BENCHMARK(insertVectorBegin, n) {
+ *   vector<int> v;
+ *   FOR_EACH_RANGE (i, 0, n) {
+ *     v.insert(v.begin(), 42);
+ *   }
+ * }
+ */
+#define BENCHMARK(name, ...)                                    \
+  BENCHMARK_IMPL(                                               \
+    name,                                                       \
+    FB_STRINGIZE(name),                                         \
+    FB_ARG_2_OR_1(1, ## __VA_ARGS__),                           \
+    FB_ONE_OR_NONE(unsigned, ## __VA_ARGS__),                   \
+    __VA_ARGS__)
+
+/**
+ * Like BENCHMARK above, but allows the user to return the actual
+ * number of iterations executed in the function body. This can be
+ * useful if the benchmark function doesn't know upfront how many
+ * iterations it's going to run or if it runs through a certain
+ * number of test cases, e.g.:
+ *
+ * BENCHMARK_MULTI(benchmarkSomething) {
+ *   std::vector<int> testCases { 0, 1, 1, 2, 3, 5 };
+ *   for (int c : testCases) {
+ *     doSomething(c);
+ *   }
+ *   return testCases.size();
+ * }
+ */
+#define BENCHMARK_MULTI(name, ...)                              \
+  BENCHMARK_MULTI_IMPL(                                         \
+    name,                                                       \
+    FB_STRINGIZE(name),                                         \
+    FB_ONE_OR_NONE(unsigned, ## __VA_ARGS__),                   \
+    __VA_ARGS__)
+
+/**
+ * Defines a benchmark that passes a parameter to another one. This is
+ * common for benchmarks that need a "problem size" in addition to
+ * "number of iterations". Consider:
+ *
+ * void pushBack(uint n, size_t initialSize) {
+ *   vector<int> v;
+ *   BENCHMARK_SUSPEND {
+ *     v.resize(initialSize);
+ *   }
+ *   FOR_EACH_RANGE (i, 0, n) {
+ *    v.push_back(i);
+ *   }
+ * }
+ * BENCHMARK_PARAM(pushBack, 0)
+ * BENCHMARK_PARAM(pushBack, 1000)
+ * BENCHMARK_PARAM(pushBack, 1000000)
+ *
+ * The benchmark above estimates the speed of push_back at different
+ * initial sizes of the vector. The framework will pass 0, 1000, and
+ * 1000000 for initialSize, and the iteration count for n.
+ */
+#define BENCHMARK_PARAM(name, param)                                    \
+  BENCHMARK_NAMED_PARAM(name, param, param)
+
+/**
+ * Same as BENCHMARK_PARAM, but allows to return the actual number of
+ * iterations that have been run.
+ */
+#define BENCHMARK_PARAM_MULTI(name, param)                              \
+  BENCHMARK_NAMED_PARAM_MULTI(name, param, param)
+
+/*
+ * Like BENCHMARK_PARAM(), but allows a custom name to be specified for each
+ * parameter, rather than using the parameter value.
+ *
+ * Useful when the parameter value is not a valid token for string pasting,
+ * of when you want to specify multiple parameter arguments.
+ *
+ * For example:
+ *
+ * void addValue(uint n, int64_t bucketSize, int64_t min, int64_t max) {
+ *   Histogram<int64_t> hist(bucketSize, min, max);
+ *   int64_t num = min;
+ *   FOR_EACH_RANGE (i, 0, n) {
+ *     hist.addValue(num);
+ *     ++num;
+ *     if (num > max) { num = min; }
+ *   }
+ * }
+ *
+ * BENCHMARK_NAMED_PARAM(addValue, 0_to_100, 1, 0, 100)
+ * BENCHMARK_NAMED_PARAM(addValue, 0_to_1000, 10, 0, 1000)
+ * BENCHMARK_NAMED_PARAM(addValue, 5k_to_20k, 250, 5000, 20000)
+ */
+#define BENCHMARK_NAMED_PARAM(name, param_name, ...)                    \
+  BENCHMARK_IMPL(                                                       \
+      FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)),              \
+      FB_STRINGIZE(name) "(" FB_STRINGIZE(param_name) ")",              \
+      iters,                                                            \
+      unsigned,                                                         \
+      iters) {                                                          \
+    name(iters, ## __VA_ARGS__);                                        \
+  }
+
+/**
+ * Same as BENCHMARK_NAMED_PARAM, but allows to return the actual number
+ * of iterations that have been run.
+ */
+#define BENCHMARK_NAMED_PARAM_MULTI(name, param_name, ...)              \
+  BENCHMARK_MULTI_IMPL(                                                 \
+      FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)),              \
+      FB_STRINGIZE(name) "(" FB_STRINGIZE(param_name) ")",              \
+      unsigned,                                                         \
+      iters) {                                                          \
+    return name(iters, ## __VA_ARGS__);                                 \
+  }
+
+/**
+ * Just like BENCHMARK, but prints the time relative to a
+ * baseline. The baseline is the most recent BENCHMARK() seen in
+ * lexical order. Example:
+ *
+ * // This is the baseline
+ * BENCHMARK(insertVectorBegin, n) {
+ *   vector<int> v;
+ *   FOR_EACH_RANGE (i, 0, n) {
+ *     v.insert(v.begin(), 42);
+ *   }
+ * }
+ *
+ * BENCHMARK_RELATIVE(insertListBegin, n) {
+ *   list<int> s;
+ *   FOR_EACH_RANGE (i, 0, n) {
+ *     s.insert(s.begin(), 42);
+ *   }
+ * }
+ *
+ * Any number of relative benchmark can be associated with a
+ * baseline. Another BENCHMARK() occurrence effectively establishes a
+ * new baseline.
+ */
+#define BENCHMARK_RELATIVE(name, ...)                           \
+  BENCHMARK_IMPL(                                               \
+    name,                                                       \
+    "%" FB_STRINGIZE(name),                                     \
+    FB_ARG_2_OR_1(1, ## __VA_ARGS__),                           \
+    FB_ONE_OR_NONE(unsigned, ## __VA_ARGS__),                   \
+    __VA_ARGS__)
+
+/**
+ * Same as BENCHMARK_RELATIVE, but allows to return the actual number
+ * of iterations that have been run.
+ */
+#define BENCHMARK_RELATIVE_MULTI(name, ...)                     \
+  BENCHMARK_MULTI_IMPL(                                         \
+    name,                                                       \
+    "%" FB_STRINGIZE(name),                                     \
+    FB_ONE_OR_NONE(unsigned, ## __VA_ARGS__),                   \
+    __VA_ARGS__)
+
+/**
+ * A combination of BENCHMARK_RELATIVE and BENCHMARK_PARAM.
+ */
+#define BENCHMARK_RELATIVE_PARAM(name, param)                           \
+  BENCHMARK_RELATIVE_NAMED_PARAM(name, param, param)
+
+/**
+ * Same as BENCHMARK_RELATIVE_PARAM, but allows to return the actual
+ * number of iterations that have been run.
+ */
+#define BENCHMARK_RELATIVE_PARAM_MULTI(name, param)                     \
+  BENCHMARK_RELATIVE_NAMED_PARAM_MULTI(name, param, param)
+
+/**
+ * A combination of BENCHMARK_RELATIVE and BENCHMARK_NAMED_PARAM.
+ */
+#define BENCHMARK_RELATIVE_NAMED_PARAM(name, param_name, ...)           \
+  BENCHMARK_IMPL(                                                       \
+      FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)),              \
+      "%" FB_STRINGIZE(name) "(" FB_STRINGIZE(param_name) ")",          \
+      iters,                                                            \
+      unsigned,                                                         \
+      iters) {                                                          \
+    name(iters, ## __VA_ARGS__);                                        \
+  }
+
+/**
+ * Same as BENCHMARK_RELATIVE_NAMED_PARAM, but allows to return the
+ * actual number of iterations that have been run.
+ */
+#define BENCHMARK_RELATIVE_NAMED_PARAM_MULTI(name, param_name, ...)     \
+  BENCHMARK_MULTI_IMPL(                                                 \
+      FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)),              \
+      "%" FB_STRINGIZE(name) "(" FB_STRINGIZE(param_name) ")",          \
+      unsigned,                                                         \
+      iters) {                                                          \
+    return name(iters, ## __VA_ARGS__);                                 \
+  }
+
+/**
+ * Draws a line of dashes.
+ */
+#define BENCHMARK_DRAW_LINE()                                             \
+  static bool FB_ANONYMOUS_VARIABLE(follyBenchmarkUnused) = (             \
+    ::folly::addBenchmark(__FILE__, "-", []() -> unsigned { return 0; }), \
+    true);
+
+/**
+ * Allows execution of code that doesn't count torward the benchmark's
+ * time budget. Example:
+ *
+ * BENCHMARK_START_GROUP(insertVectorBegin, n) {
+ *   vector<int> v;
+ *   BENCHMARK_SUSPEND {
+ *     v.reserve(n);
+ *   }
+ *   FOR_EACH_RANGE (i, 0, n) {
+ *     v.insert(v.begin(), 42);
+ *   }
+ * }
+ */
+#define BENCHMARK_SUSPEND                               \
+  if (auto FB_ANONYMOUS_VARIABLE(BENCHMARK_SUSPEND) =   \
+      ::folly::BenchmarkSuspender()) {}                 \
+  else
+
+#endif // FOLLY_BENCHMARK_H_
diff --git a/faux-folly/folly/Bits.h b/faux-folly/folly/Bits.h
new file mode 100644
index 0000000..3fddca9
--- /dev/null
+++ b/faux-folly/folly/Bits.h
@@ -0,0 +1,621 @@
+/*
+ * Copyright 2015 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Various low-level, bit-manipulation routines.
+ *
+ * findFirstSet(x)  [constexpr]
+ *    find first (least significant) bit set in a value of an integral type,
+ *    1-based (like ffs()).  0 = no bits are set (x == 0)
+ *
+ * findLastSet(x)  [constexpr]
+ *    find last (most significant) bit set in a value of an integral type,
+ *    1-based.  0 = no bits are set (x == 0)
+ *    for x != 0, findLastSet(x) == 1 + floor(log2(x))
+ *
+ * nextPowTwo(x)  [constexpr]
+ *    Finds the next power of two >= x.
+ *
+ * isPowTwo(x)  [constexpr]
+ *    return true iff x is a power of two
+ *
+ * popcount(x)
+ *    return the number of 1 bits in x
+ *
+ * Endian
+ *    convert between native, big, and little endian representation
+ *    Endian::big(x)      big <-> native
+ *    Endian::little(x)   little <-> native
+ *    Endian::swap(x)     big <-> little
+ *
+ * BitIterator
+ *    Wrapper around an iterator over an integral type that iterates
+ *    over its underlying bits in MSb to LSb order
+ *
+ * findFirstSet(BitIterator begin, BitIterator end)
+ *    return a BitIterator pointing to the first 1 bit in [begin, end), or
+ *    end if all bits in [begin, end) are 0
+ *
+ * @author Tudor Bosman (tudorb@fb.com)
+ */
+
+#ifndef FOLLY_BITS_H_
+#define FOLLY_BITS_H_
+
+#if !defined(__clang__) && !defined(_MSC_VER)
+#define FOLLY_INTRINSIC_CONSTEXPR constexpr
+#else
+// GCC is the only compiler with intrinsics constexpr.
+#define FOLLY_INTRINSIC_CONSTEXPR const
+#endif
+
+#include <folly/Portability.h>
+
+#include <folly/detail/BitsDetail.h>
+#include <folly/detail/BitIteratorDetail.h>
+#include <folly/Likely.h>
+
+#if FOLLY_HAVE_BYTESWAP_H
+# include <byteswap.h>
+#endif
+
+#ifdef _MSC_VER
+# include <intrin.h>
+# pragma intrinsic(_BitScanForward)
+# pragma intrinsic(_BitScanForward64)
+# pragma intrinsic(_BitScanReverse)
+# pragma intrinsic(_BitScanReverse64)
+#endif
+
+#include <cassert>
+#include <cinttypes>
+#include <iterator>
+#include <limits>
+#include <type_traits>
+#include <boost/iterator/iterator_adaptor.hpp>
+#include <stdint.h>
+
+namespace folly {
+
+// Generate overloads for findFirstSet as wrappers around
+// appropriate ffs, ffsl, ffsll gcc builtins
+template <class T>
+inline FOLLY_INTRINSIC_CONSTEXPR
+typename std::enable_if<
+  (std::is_integral<T>::value &&
+   std::is_unsigned<T>::value &&
+   sizeof(T) <= sizeof(unsigned int)),
+  unsigned int>::type
+  findFirstSet(T x) {
+#ifdef _MSC_VER
+  unsigned long index;
+  return _BitScanForward(&index, x) ? index : 0;
+#else
+  return __builtin_ffs(x);
+#endif
+}
+
+template <class T>
+inline FOLLY_INTRINSIC_CONSTEXPR
+typename std::enable_if<
+  (std::is_integral<T>::value &&
+   std::is_unsigned<T>::value &&
+   sizeof(T) > sizeof(unsigned int) &&
+   sizeof(T) <= sizeof(unsigned long)),
+  unsigned int>::type
+  findFirstSet(T x) {
+#ifdef _MSC_VER
+  unsigned long index;
+  return _BitScanForward(&index, x) ? index : 0;
+#else
+  return __builtin_ffsl(x);
+#endif
+}
+
+template <class T>
+inline FOLLY_INTRINSIC_CONSTEXPR
+typename std::enable_if<
+  (std::is_integral<T>::value &&
+   std::is_unsigned<T>::value &&
+   sizeof(T) > sizeof(unsigned long) &&
+   sizeof(T) <= sizeof(unsigned long long)),
+  unsigned int>::type
+  findFirstSet(T x) {
+#ifdef _MSC_VER
+  unsigned long index;
+  return _BitScanForward64(&index, x) ? index : 0;
+#else
+  return __builtin_ffsll(x);
+#endif
+}
+
+template <class T>
+inline FOLLY_INTRINSIC_CONSTEXPR
+typename std::enable_if<
+  (std::is_integral<T>::value && std::is_signed<T>::value),
+  unsigned int>::type
+  findFirstSet(T x) {
+  // Note that conversion from a signed type to the corresponding unsigned
+  // type is technically implementation-defined, but will likely work
+  // on any impementation that uses two's complement.
+  return findFirstSet(static_cast<typename std::make_unsigned<T>::type>(x));
+}
+
+// findLastSet: return the 1-based index of the highest bit set
+// for x > 0, findLastSet(x) == 1 + floor(log2(x))
+template <class T>
+inline FOLLY_INTRINSIC_CONSTEXPR
+typename std::enable_if<
+  (std::is_integral<T>::value &&
+   std::is_unsigned<T>::value &&
+   sizeof(T) <= sizeof(unsigned int)),
+  unsigned int>::type
+  findLastSet(T x) {
+#ifdef _MSC_VER
+  unsigned long index;
+  int clz;
+  if (_BitScanReverse(&index, x)) {
+    clz = static_cast<int>(31 - index);
+  } else {
+    clz = 32;
+  }
+  return x ? 8 * sizeof(unsigned int) - clz : 0;
+#else
+  return x ? 8 * sizeof(unsigned int) - __builtin_clz(x) : 0;
+#endif
+}
+
+template <class T>
+inline FOLLY_INTRINSIC_CONSTEXPR
+typename std::enable_if<
+  (std::is_integral<T>::value &&
+   std::is_unsigned<T>::value &&
+   sizeof(T) > sizeof(unsigned int) &&
+   sizeof(T) <= sizeof(unsigned long)),
+  unsigned int>::type
+  findLastSet(T x) {
+#ifdef _MSC_VER
+  unsigned long index;
+  int clz;
+  if (_BitScanReverse(&index, x)) {
+    clz = static_cast<int>(31 - index);
+  } else {
+    clz = 32;
+  }
+  return x ? 8 * sizeof(unsigned int) - clz : 0;
+#else
+  return x ? 8 * sizeof(unsigned long) - __builtin_clzl(x) : 0;
+#endif
+}
+
+template <class T>
+inline FOLLY_INTRINSIC_CONSTEXPR
+typename std::enable_if<
+  (std::is_integral<T>::value &&
+   std::is_unsigned<T>::value &&
+   sizeof(T) > sizeof(unsigned long) &&
+   sizeof(T) <= sizeof(unsigned long long)),
+  unsigned int>::type
+  findLastSet(T x) {
+#ifdef _MSC_VER
+  unsigned long index;
+  unsigned long long clz;
+  if (_BitScanReverse(&index, x)) {
+    clz = static_cast<unsigned long long>(63 - index);
+  } else {
+    clz = 64;
+  }
+  return x ? 8 * sizeof(unsigned long long) - clz : 0;
+#else
+  return x ? 8 * sizeof(unsigned long long) - __builtin_clzll(x) : 0;
+#endif
+}
+
+template <class T>
+inline FOLLY_INTRINSIC_CONSTEXPR
+typename std::enable_if<
+  (std::is_integral<T>::value &&
+   std::is_signed<T>::value),
+  unsigned int>::type
+  findLastSet(T x) {
+  return findLastSet(static_cast<typename std::make_unsigned<T>::type>(x));
+}
+
+template <class T>
+inline FOLLY_INTRINSIC_CONSTEXPR
+typename std::enable_if<
+  std::is_integral<T>::value && std::is_unsigned<T>::value,
+  T>::type
+nextPowTwo(T v) {
+  return v ? (1ul << findLastSet(v - 1)) : 1;
+}
+
+template <class T>
+inline constexpr
+typename std::enable_if<
+  std::is_integral<T>::value && std::is_unsigned<T>::value,
+  bool>::type
+isPowTwo(T v) {
+  return (v != 0) && !(v & (v - 1));
+}
+
+/**
+ * Population count
+ */
+template <class T>
+inline typename std::enable_if<
+  (std::is_integral<T>::value &&
+   std::is_unsigned<T>::value &&
+   sizeof(T) <= sizeof(unsigned int)),
+  size_t>::type
+  popcount(T x) {
+  return __builtin_popcount(x);
+}
+
+template <class T>
+inline typename std::enable_if<
+  (std::is_integral<T>::value &&
+   std::is_unsigned<T>::value &&
+   sizeof(T) > sizeof(unsigned int) &&
+   sizeof(T) <= sizeof(unsigned long long)),
+  size_t>::type
+  popcount(T x) {
+  return __builtin_popcountll(x);
+}
+
+/**
+ * Endianness detection and manipulation primitives.
+ */
+namespace detail {
+
+template <class T>
+struct EndianIntBase {
+ public:
+  static T swap(T x);
+};
+
+#ifndef _MSC_VER
+
+/**
+ * If we have the bswap_16 macro from byteswap.h, use it; otherwise, provide our
+ * own definition.
+ */
+#ifdef bswap_16
+# define our_bswap16 bswap_16
+#else
+
+template<class Int16>
+inline constexpr typename std::enable_if<
+  sizeof(Int16) == 2,
+  Int16>::type
+our_bswap16(Int16 x) {
+  return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
+}
+#endif
+
+#endif
+
+#define FB_GEN(t, fn) \
+template<> inline t EndianIntBase<t>::swap(t x) { return fn(x); }
+
+// fn(x) expands to (x) if the second argument is empty, which is exactly
+// what we want for [u]int8_t. Also, gcc 4.7 on Intel doesn't have
+// __builtin_bswap16 for some reason, so we have to provide our own.
+FB_GEN( int8_t,)
+FB_GEN(uint8_t,)
+#ifdef _MSC_VER
+FB_GEN( int64_t, _byteswap_uint64)
+FB_GEN(uint64_t, _byteswap_uint64)
+FB_GEN( int32_t, _byteswap_ulong)
+FB_GEN(uint32_t, _byteswap_ulong)
+FB_GEN( int16_t, _byteswap_ushort)
+FB_GEN(uint16_t, _byteswap_ushort)
+#else
+FB_GEN( int64_t, __builtin_bswap64)
+FB_GEN(uint64_t, __builtin_bswap64)
+FB_GEN( int32_t, __builtin_bswap32)
+FB_GEN(uint32_t, __builtin_bswap32)
+FB_GEN( int16_t, our_bswap16)
+FB_GEN(uint16_t, our_bswap16)
+#endif
+
+#undef FB_GEN
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+
+template <class T>
+struct EndianInt : public detail::EndianIntBase<T> {
+ public:
+  static T big(T x) { return EndianInt::swap(x); }
+  static T little(T x) { return x; }
+};
+
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+
+template <class T>
+struct EndianInt : public detail::EndianIntBase<T> {
+ public:
+  static T big(T x) { return x; }
+  static T little(T x) { return EndianInt::swap(x); }
+};
+
+#else
+# error Your machine uses a weird endianness!
+#endif  /* __BYTE_ORDER__ */
+
+}  // namespace detail
+
+// big* convert between native and big-endian representations
+// little* convert between native and little-endian representations
+// swap* convert between big-endian and little-endian representations
+//
+// ntohs, htons == big16
+// ntohl, htonl == big32
+#define FB_GEN1(fn, t, sz) \
+  static t fn##sz(t x) { return fn<t>(x); } \
+
+#define FB_GEN2(t, sz) \
+  FB_GEN1(swap, t, sz) \
+  FB_GEN1(big, t, sz) \
+  FB_GEN1(little, t, sz)
+
+#define FB_GEN(sz) \
+  FB_GEN2(uint##sz##_t, sz) \
+  FB_GEN2(int##sz##_t, sz)
+
+class Endian {
+ public:
+  enum class Order : uint8_t {
+    LITTLE,
+    BIG
+  };
+
+  static constexpr Order order =
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+    Order::LITTLE;
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+    Order::BIG;
+#else
+# error Your machine uses a weird endianness!
+#endif  /* __BYTE_ORDER__ */
+
+  template <class T> static T swap(T x) {
+    return detail::EndianInt<T>::swap(x);
+  }
+  template <class T> static T big(T x) {
+    return detail::EndianInt<T>::big(x);
+  }
+  template <class T> static T little(T x) {
+    return detail::EndianInt<T>::little(x);
+  }
+
+  FB_GEN(64)
+  FB_GEN(32)
+  FB_GEN(16)
+  FB_GEN(8)
+};
+
+#undef FB_GEN
+#undef FB_GEN2
+#undef FB_GEN1
+
+/**
+ * Fast bit iteration facility.
+ */
+
+
+template <class BaseIter> class BitIterator;
+template <class BaseIter>
+BitIterator<BaseIter> findFirstSet(BitIterator<BaseIter>,
+                                   BitIterator<BaseIter>);
+/**
+ * Wrapper around an iterator over an integer type that iterates
+ * over its underlying bits in LSb to MSb order.
+ *
+ * BitIterator models the same iterator concepts as the base iterator.
+ */
+template <class BaseIter>
+class BitIterator
+  : public bititerator_detail::BitIteratorBase<BaseIter>::type {
+ public:
+  /**
+   * Return the number of bits in an element of the underlying iterator.
+   */
+  static unsigned int bitsPerBlock() {
+    return std::numeric_limits<
+      typename std::make_unsigned<
+        typename std::iterator_traits<BaseIter>::value_type
+      >::type
+    >::digits;
+  }
+
+  /**
+   * Construct a BitIterator that points at a given bit offset (default 0)
+   * in iter.
+   */
+  #pragma GCC diagnostic push // bitOffset shadows a member
+  #pragma GCC diagnostic ignored "-Wshadow"
+  explicit BitIterator(const BaseIter& iter, size_t bitOffset=0)
+    : bititerator_detail::BitIteratorBase<BaseIter>::type(iter),
+      bitOffset_(bitOffset) {
+    assert(bitOffset_ < bitsPerBlock());
+  }
+  #pragma GCC diagnostic pop
+
+  size_t bitOffset() const {
+    return bitOffset_;
+  }
+
+  void advanceToNextBlock() {
+    bitOffset_ = 0;
+    ++this->base_reference();
+  }
+
+  BitIterator& operator=(const BaseIter& other) {
+    this->~BitIterator();
+    new (this) BitIterator(other);
+    return *this;
+  }
+
+ private:
+  friend class boost::iterator_core_access;
+  friend BitIterator findFirstSet<>(BitIterator, BitIterator);
+
+  typedef bititerator_detail::BitReference<
+      typename std::iterator_traits<BaseIter>::reference,
+      typename std::iterator_traits<BaseIter>::value_type
+    > BitRef;
+
+  void advanceInBlock(size_t n) {
+    bitOffset_ += n;
+    assert(bitOffset_ < bitsPerBlock());
+  }
+
+  BitRef dereference() const {
+    return BitRef(*this->base_reference(), bitOffset_);
+  }
+
+  void advance(ssize_t n) {
+    size_t bpb = bitsPerBlock();
+    ssize_t blocks = n / bpb;
+    bitOffset_ += n % bpb;
+    if (bitOffset_ >= bpb) {
+      bitOffset_ -= bpb;
+      ++blocks;
+    }
+    this->base_reference() += blocks;
+  }
+
+  void increment() {
+    if (++bitOffset_ == bitsPerBlock()) {
+      advanceToNextBlock();
+    }
+  }
+
+  void decrement() {
+    if (bitOffset_-- == 0) {
+      bitOffset_ = bitsPerBlock() - 1;
+      --this->base_reference();
+    }
+  }
+
+  bool equal(const BitIterator& other) const {
+    return (bitOffset_ == other.bitOffset_ &&
+            this->base_reference() == other.base_reference());
+  }
+
+  ssize_t distance_to(const BitIterator& other) const {
+    return
+      (other.base_reference() - this->base_reference()) * bitsPerBlock() +
+      other.bitOffset_ - bitOffset_;
+  }
+
+  unsigned int bitOffset_;
+};
+
+/**
+ * Helper function, so you can write
+ * auto bi = makeBitIterator(container.begin());
+ */
+template <class BaseIter>
+BitIterator<BaseIter> makeBitIterator(const BaseIter& iter) {
+  return BitIterator<BaseIter>(iter);
+}
+
+
+/**
+ * Find first bit set in a range of bit iterators.
+ * 4.5x faster than the obvious std::find(begin, end, true);
+ */
+template <class BaseIter>
+BitIterator<BaseIter> findFirstSet(BitIterator<BaseIter> begin,
+                                   BitIterator<BaseIter> end) {
+  // shortcut to avoid ugly static_cast<>
+  static const typename BaseIter::value_type one = 1;
+
+  while (begin.base() != end.base()) {
+    typename BaseIter::value_type v = *begin.base();
+    // mask out the bits that don't matter (< begin.bitOffset)
+    v &= ~((one << begin.bitOffset()) - 1);
+    size_t firstSet = findFirstSet(v);
+    if (firstSet) {
+      --firstSet;  // now it's 0-based
+      assert(firstSet >= begin.bitOffset());
+      begin.advanceInBlock(firstSet - begin.bitOffset());
+      return begin;
+    }
+    begin.advanceToNextBlock();
+  }
+
+  // now begin points to the same block as end
+  if (end.bitOffset() != 0) {  // assume end is dereferenceable
+    typename BaseIter::value_type v = *begin.base();
+    // mask out the bits that don't matter (< begin.bitOffset)
+    v &= ~((one << begin.bitOffset()) - 1);
+    // mask out the bits that don't matter (>= end.bitOffset)
+    v &= (one << end.bitOffset()) - 1;
+    size_t firstSet = findFirstSet(v);
+    if (firstSet) {
+      --firstSet;  // now it's 0-based
+      assert(firstSet >= begin.bitOffset());
+      begin.advanceInBlock(firstSet - begin.bitOffset());
+      return begin;
+    }
+  }
+
+  return end;
+}
+
+
+template <class T, class Enable=void> struct Unaligned;
+
+/**
+ * Representation of an unaligned value of a POD type.
+ */
+FOLLY_PACK_PUSH
+template <class T>
+struct Unaligned<
+    T,
+    typename std::enable_if<std::is_pod<T>::value>::type> {
+  Unaligned() = default;  // uninitialized
+  /* implicit */ Unaligned(T v) : value(v) { }
+  T value;
+} FOLLY_PACK_ATTR;
+FOLLY_PACK_POP
+
+/**
+ * Read an unaligned value of type T and return it.
+ */
+template <class T>
+inline T loadUnaligned(const void* p) {
+  static_assert(sizeof(Unaligned<T>) == sizeof(T), "Invalid unaligned size");
+  static_assert(alignof(Unaligned<T>) == 1, "Invalid alignment");
+  return static_cast<const Unaligned<T>*>(p)->value;
+}
+
+/**
+ * Write an unaligned value of type T.
+ */
+template <class T>
+inline void storeUnaligned(void* p, T value) {
+  static_assert(sizeof(Unaligned<T>) == sizeof(T), "Invalid unaligned size");
+  static_assert(alignof(Unaligned<T>) == 1, "Invalid alignment");
+  new (p) Unaligned<T>(value);
+}
+
+}  // namespace folly
+
+#endif /* FOLLY_BITS_H_ */
diff --git a/faux-folly/folly/CPortability.h b/faux-folly/folly/CPortability.h
new file mode 100644
index 0000000..65ffa6f
--- /dev/null
+++ b/faux-folly/folly/CPortability.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2015 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CPORTABILITY_H
+#define CPORTABILITY_H
+
+/* These definitions are in a separate file so that they
+ * may be included from C- as well as C++-based projects. */
+
+/* Define a convenience macro to test when address sanitizer is being used
+ * across the different compilers (e.g. clang, gcc) */
+#if defined(__clang__)
+# if __has_feature(address_sanitizer)
+#  define FOLLY_SANITIZE_ADDRESS 1
+# endif
+#elif defined (__GNUC__) && \
+      (((__GNUC__ == 4) && (__GNUC_MINOR__ >= 8)) || (__GNUC__ >= 5)) && \
+      __SANITIZE_ADDRESS__
+# define FOLLY_SANITIZE_ADDRESS 1
+#endif
+
+/* Define attribute wrapper for function attribute used to disable
+ * address sanitizer instrumentation. Unfortunately, this attribute
+ * has issues when inlining is used, so disable that as well. */
+#ifdef FOLLY_SANITIZE_ADDRESS
+# if defined(__clang__)
+#  if __has_attribute(__no_address_safety_analysis__)
+#   define FOLLY_DISABLE_ADDRESS_SANITIZER \
+      __attribute__((__no_address_safety_analysis__, __noinline__))
+#  elif __has_attribute(__no_sanitize_address__)
+#   define FOLLY_DISABLE_ADDRESS_SANITIZER \
+      __attribute__((__no_sanitize_address__, __noinline__))
+#  endif
+# elif defined(__GNUC__)
+#  define FOLLY_DISABLE_ADDRESS_SANITIZER \
+     __attribute__((__no_address_safety_analysis__, __noinline__))
+# endif
+#endif
+#ifndef FOLLY_DISABLE_ADDRESS_SANITIZER
+# define FOLLY_DISABLE_ADDRESS_SANITIZER
+#endif
+
+#endif
diff --git a/faux-folly/folly/Cancellation.h b/faux-folly/folly/Cancellation.h
new file mode 100644
index 0000000..3c9454a
--- /dev/null
+++ b/faux-folly/folly/Cancellation.h
@@ -0,0 +1,256 @@
+/*
+ * Copyright 2015 Nest Labs, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <cassert>
+#include <mutex>
+#include <condition_variable>
+#include <memory>
+
+#ifdef __GNUC__
+#define FOLLY_CANCELLATION_WARN_UNUSED __attribute__((warn_unused_result))
+#else
+#define FOLLY_CANCELLATION_WARN_UNUSED
+#endif
+
+#include <folly/detail/CancellationDetail.h>
+
+namespace folly {
+
+/**
+ * RAII object to prevent a Cancellation state from changing
+ *
+ * CancellationStateLock shall not be re-lockable. This is because
+ * after you've unlocked it, it is possible that the underlying
+ * cancellation was cancelled. There isn't a reliable way to represent
+ * this other than throwing an exception.
+ *
+ * This object is convertable to bool. If it becomes `true', then
+ * Cancellation IS NOT cancelled and you may proceed as normal. If it
+ * becomes `false' then the Cancellation has been CANCELLED.
+ *
+ * <code>
+ *     auto lock = Cancellation.hold_state();
+ *     if (lock) {                         // i.e. static_cast<bool>(lock)
+ *         // NOT cancelled
+ *     } else {
+ *         // CANCELLED
+ *     }
+ * </code>
+ *
+ * This is implemented using the "try_lock()" idiom of
+ * std::unique_lock<T>.  Thus operator bool() is implemented to return
+ * 'true' if the lock is acquired, and 'false' if not. In the case of
+ * a Cancellation object, returning `true' means that we've acquired a
+ * hold on the "un-cancelled" state. `false' means that the
+ * Cancellation has been cancelled.
+ */
+class CancellationStateLock : public std::unique_lock<detail::CancellationSharedState>
+{
+public:
+    friend class Cancellation;
+
+    using base_type = std::unique_lock<detail::CancellationSharedState>;
+
+    /* Should only be constructed in Cancellation::hold_state() */
+    CancellationStateLock() = delete;
+
+    ~CancellationStateLock() = default;
+
+    /* non-copyable */
+    CancellationStateLock(const CancellationStateLock& o) = delete;
+    CancellationStateLock& operator= (const CancellationStateLock& o) = delete;
+
+    /**
+     * move constructor
+     */
+    CancellationStateLock(CancellationStateLock&& o) :
+        base_type(std::forward<base_type>(o))
+    {}
+
+    /**
+     * move operator
+     */
+    CancellationStateLock& operator= (CancellationStateLock&& o)
+    {
+        base_type::operator=(std::forward<base_type>(o));
+        return *this;
+    }
+
+private:
+    /**
+     * Construct a lock that refers to a specific CancellationSharedState
+     *
+     */
+    CancellationStateLock(detail::CancellationSharedState& ss, std::try_to_lock_t tag)
+        : base_type(ss, tag)
+    {
+    }
+
+    /**
+     * This is made private to prevent its use (i.e. for re-lock()).
+     */
+    void lock();
+};
+
+/**
+ * A thread-safe Cancellation object
+ *
+ * This is a token that is used to indicate whether or not some
+ * operation or object is cancelled, deleted, out of date,
+ * whatever. Like a mutex, this is an advisory token, and therefore it
+ * is up to the programmer to use it properly.
+ *
+ * It is implemented so that all objects refer to a shared state (much
+ * like a shared_ptr). Thus, all copies of the object refer to the
+ * same shared state object. Therefore a Cancellation token can be
+ * freely copied (by value), retaining a reference to the same shared
+ * state as the copied-from token.
+ *
+ * It has a very simple state machine:
+ *
+ * <code>
+ *                               cancel()
+ * start --> [NOT CANCELLED] ----------------> [CANCELLED]
+ * </code>
+ *
+ * The CANCELLED state is terminal.
+ */
+class Cancellation
+{
+public:
+    /**
+     * Construct a new cancellation object.
+     */
+    Cancellation() :
+        _d(new detail::CancellationSharedState)
+    {}
+
+    /**
+     * Copy constructor
+     *
+     * The new Cancellation will refer to the same shared state as `o'.
+     *
+     * @param o an existing Cancellation object from whom we get our
+     * shared state.
+     */
+    Cancellation(const Cancellation& o) :
+        _d(o._d)
+    {}
+
+    /**
+     * Assignment operator
+     *
+     * On return, the Cancellation will refer to the same shared state as `o'.
+     * We will drop the reference to the `current' shared state.
+     *
+     * @param o an existing Cancellation object from whom we get our
+     * shared state.
+     **/
+    Cancellation& operator=(const Cancellation& o)
+    {
+        _d = o._d;
+        return *this;
+    }
+
+    /**
+     * Move constructor
+     *
+     * The new Cancellation will refer to the same shared state as `o'.
+     *
+     * @param o an existing Cancellation object from whom we get our
+     * shared state.
+     */
+    Cancellation(Cancellation&& o) :
+        _d(std::move(o._d))
+    {}
+
+    /**
+     * Move operator
+     *
+     * On return, the new Cancellation will refer to the same shared
+     * state as `o'. We will drop the reference to the `current' shared state.
+     *
+     * @param o an existing Cancellation object from whom we get our
+     * shared state.
+     */
+    Cancellation& operator=(Cancellation&& o)
+    {
+        _d = std::move(o._d);
+        return *this;
+    }
+
+    /**
+     * Returns the current cancellation state of the object.
+     *
+     * It is generally not recommended that you use this function
+     * because it is not thread-safe. Use hold_state(), instead.
+     *
+     * Once an object is Cancelled, this function returns `true'. Once
+     * that happens, this function will never again return `false'.
+     *
+     * However, if this function returns `false' -- there's a
+     * possibility that the object is actually Cancelled. Therefore
+     * you should never rely on this value.
+     *
+     * @return true if the object has been cancelled. false if the
+     * object has NOT been cancelled.
+     */
+    bool is_cancelled() const
+    {
+        return _d->is_cancelled();
+    }
+
+    /**
+     * Set the state of the object to "cancelled"
+     *
+     * If the object is alread cancelled, this function returns
+     * immediately.
+     *
+     * If not already cancelled, this function may block until all
+     * state holds are released.
+     */
+    void cancel()
+    {
+        _d->cancel();
+    }
+
+    /**
+     * Acquire a hold on the state of the object
+     *
+     * If the object is not not cancelled, the returned object will
+     * prevent cancellation until it is deleted or released
+     * (via. unlock()).
+     *
+     * If the object is cancelled, it returns immediately.
+     *
+     * You MUST check the validity of the returned lock. When cast to
+     * bool, if it returns false then the object is cancelled.
+     *
+     * @return a CancellationStateLock object the prevents the state
+     * of the object from changing.
+     */
+    CancellationStateLock hold_state() const FOLLY_CANCELLATION_WARN_UNUSED {
+        CancellationStateLock lock(*_d, std::try_to_lock);
+        return lock;
+    }
+
+private:
+    std::shared_ptr<detail::CancellationSharedState> _d;
+};
+
+} /* namespace folly */
diff --git a/faux-folly/folly/ContainerTraits.h b/faux-folly/folly/ContainerTraits.h
new file mode 100644
index 0000000..5f6e5d4
--- /dev/null
+++ b/faux-folly/folly/ContainerTraits.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2015 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FOLLY_BASE_CONTAINER_TRAITS_H_
+#define FOLLY_BASE_CONTAINER_TRAITS_H_
+
+#include <folly/Traits.h>
+
+namespace folly {
+
+FOLLY_CREATE_HAS_MEMBER_FN_TRAITS(container_emplace_back_traits, emplace_back);
+
+template <class Container, typename... Args>
+inline
+typename std::enable_if<
+    container_emplace_back_traits<Container, void(Args...)>::value>::type
+container_emplace_back_or_push_back(Container& container, Args&&... args) {
+  container.emplace_back(std::forward<Args>(args)...);
+}
+
+template <class Container, typename... Args>
+inline
+typename std::enable_if<
+    !container_emplace_back_traits<Container, void(Args...)>::value>::type
+container_emplace_back_or_push_back(Container& container, Args&&... args) {
+  using v = typename Container::value_type;
+  container.push_back(v(std::forward<Args>(args)...));
+}
+
+}
+
+#endif
diff --git a/faux-folly/folly/Conv.cpp b/faux-folly/folly/Conv.cpp
new file mode 100644
index 0000000..0ae9b22
--- /dev/null
+++ b/faux-folly/folly/Conv.cpp
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2015 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define FOLLY_CONV_INTERNAL
+#include <folly/Conv.h>
+
+namespace folly {
+namespace detail {
+
+extern const char digit1[101] =
+  "00000000001111111111222222222233333333334444444444"
+  "55555555556666666666777777777788888888889999999999";
+extern const char digit2[101] =
+  "01234567890123456789012345678901234567890123456789"
+  "01234567890123456789012345678901234567890123456789";
+
+template <> const char *const MaxString<bool>::value = "true";
+template <> const char *const MaxString<uint8_t>::value = "255";
+template <> const char *const MaxString<uint16_t>::value = "65535";
+template <> const char *const MaxString<uint32_t>::value = "4294967295";
+#if __SIZEOF_LONG__ == 4
+template <> const char *const MaxString<unsigned long>::value =
+  "4294967295";
+#else
+template <> const char *const MaxString<unsigned long>::value =
+  "18446744073709551615";
+#endif
+static_assert(sizeof(unsigned long) >= 4,
+              "Wrong value for MaxString<unsigned long>::value,"
+              " please update.");
+template <> const char *const MaxString<unsigned long long>::value =
+  "18446744073709551615";
+static_assert(sizeof(unsigned long long) >= 8,
+              "Wrong value for MaxString<unsigned long long>::value"
+              ", please update.");
+
+#ifdef FOLLY_HAVE_INT128_T
+template <> const char *const MaxString<__uint128_t>::value =
+  "340282366920938463463374607431768211455";
+#endif
+
+inline bool bool_str_cmp(const char** b, size_t len, const char* value) {
+  // Can't use strncasecmp, since we want to ensure that the full value matches
+  const char* p = *b;
+  const char* e = *b + len;
+  const char* v = value;
+  while (*v != '\0') {
+    if (p == e || tolower(*p) != *v) { // value is already lowercase
+      return false;
+    }
+    ++p;
+    ++v;
+  }
+
+  *b = p;
+  return true;
+}
+
+bool str_to_bool(StringPiece* src) {
+  auto b = src->begin(), e = src->end();
+  for (;; ++b) {
+    FOLLY_RANGE_CHECK_STRINGPIECE(
+      b < e, "No non-whitespace characters found in input string", *src);
+    if (!isspace(*b)) break;
+  }
+
+  bool result;
+  size_t len = e - b;
+  switch (*b) {
+    case '0':
+    case '1': {
+      result = false;
+      for (; b < e && isdigit(*b); ++b) {
+        FOLLY_RANGE_CHECK_STRINGPIECE(
+          !result && (*b == '0' || *b == '1'),
+          "Integer overflow when parsing bool: must be 0 or 1", *src);
+        result = (*b == '1');
+      }
+      break;
+    }
+    case 'y':
+    case 'Y':
+      result = true;
+      if (!bool_str_cmp(&b, len, "yes")) {
+        ++b;  // accept the single 'y' character
+      }
+      break;
+    case 'n':
+    case 'N':
+      result = false;
+      if (!bool_str_cmp(&b, len, "no")) {
+        ++b;
+      }
+      break;
+    case 't':
+    case 'T':
+      result = true;
+      if (!bool_str_cmp(&b, len, "true")) {
+        ++b;
+      }
+      break;
+    case 'f':
+    case 'F':
+      result = false;
+      if (!bool_str_cmp(&b, len, "false")) {
+        ++b;
+      }
+      break;
+    case 'o':
+    case 'O':
+      if (bool_str_cmp(&b, len, "on")) {
+        result = true;
+      } else if (bool_str_cmp(&b, len, "off")) {
+        result = false;
+      } else {
+        FOLLY_RANGE_CHECK_STRINGPIECE(false, "Invalid value for bool", *src);
+      }
+      break;
+    default:
+      FOLLY_RANGE_CHECK_STRINGPIECE(false, "Invalid value for bool", *src);
+  }
+
+  src->assign(b, e);
+  return result;
+}
+
+} // namespace detail
+} // namespace folly
diff --git a/faux-folly/folly/Conv.h b/faux-folly/folly/Conv.h
new file mode 100644
index 0000000..74cdf51
--- /dev/null
+++ b/faux-folly/folly/Conv.h
@@ -0,0 +1,1565 @@
+/*
+ * Copyright 2015 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Converts anything to anything, with an emphasis on performance and
+ * safety.
+ *
+ * @author Andrei Alexandrescu (andrei.alexandrescu@fb.com)
+ */
+
+#ifndef FOLLY_BASE_CONV_H_
+#define FOLLY_BASE_CONV_H_
+
+#include <folly/FBString.h>
+#include <folly/Likely.h>
+#include <folly/Preprocessor.h>
+#include <folly/Range.h>
+
+#include <boost/implicit_cast.hpp>
+#include <algorithm>
+#include <type_traits>
+#include <limits>
+#include <string>
+#include <tuple>
+#include <stdexcept>
+#include <typeinfo>
+
+#include <limits.h>
+
+// V8 JavaScript implementation
+#include <double-conversion/double-conversion.h>
+
+#define FOLLY_RANGE_CHECK_STRINGIZE(x) #x
+#define FOLLY_RANGE_CHECK_STRINGIZE2(x) FOLLY_RANGE_CHECK_STRINGIZE(x)
+
+// Android doesn't support std::to_string so just use a placeholder there.
+#ifdef __ANDROID__
+#define FOLLY_RANGE_CHECK_TO_STRING(x) std::string("N/A")
+#else
+#define FOLLY_RANGE_CHECK_TO_STRING(x) std::to_string(x)
+#endif
+
+#define FOLLY_RANGE_CHECK(condition, message, src)                          \
+  ((condition) ? (void)0 : throw std::range_error(                          \
+    (std::string(__FILE__ "(" FOLLY_RANGE_CHECK_STRINGIZE2(__LINE__) "): ") \
+     + (message) + ": '" + (src) + "'").c_str()))
+
+#define FOLLY_RANGE_CHECK_BEGIN_END(condition, message, b, e)    \
+  FOLLY_RANGE_CHECK(condition, message, std::string((b), (e) - (b)))
+
+#define FOLLY_RANGE_CHECK_STRINGPIECE(condition, message, sp)    \
+  FOLLY_RANGE_CHECK(condition, message, std::string((sp).data(), (sp).size()))
+
+namespace folly {
+
+/**
+ * The identity conversion function.
+ * to<T>(T) returns itself for all types T.
+ */
+template <class Tgt, class Src>
+typename std::enable_if<std::is_same<Tgt, Src>::value, Tgt>::type
+to(const Src & value) {
+  return value;
+}
+
+template <class Tgt, class Src>
+typename std::enable_if<std::is_same<Tgt, Src>::value, Tgt>::type
+to(Src && value) {
+  return std::move(value);
+}
+
+/*******************************************************************************
+ * Integral to integral
+ ******************************************************************************/
+
+/**
+ * Checked conversion from integral to integral. The checks are only
+ * performed when meaningful, e.g. conversion from int to long goes
+ * unchecked.
+ */
+template <class Tgt, class Src>
+typename std::enable_if<
+  std::is_integral<Src>::value
+  && std::is_integral<Tgt>::value
+  && !std::is_same<Tgt, Src>::value,
+  Tgt>::type
+to(const Src & value) {
+  /* static */ if (std::numeric_limits<Tgt>::max()
+                   < std::numeric_limits<Src>::max()) {
+    FOLLY_RANGE_CHECK(
+      (!greater_than<Tgt, std::numeric_limits<Tgt>::max()>(value)),
+      "Overflow",
+      FOLLY_RANGE_CHECK_TO_STRING(value));
+  }
+  /* static */ if (std::is_signed<Src>::value &&
+                   (!std::is_signed<Tgt>::value || sizeof(Src) > sizeof(Tgt))) {
+    FOLLY_RANGE_CHECK(
+      (!less_than<Tgt, std::numeric_limits<Tgt>::min()>(value)),
+      "Negative overflow",
+      FOLLY_RANGE_CHECK_TO_STRING(value));
+  }
+  return static_cast<Tgt>(value);
+}
+
+/*******************************************************************************
+ * Floating point to floating point
+ ******************************************************************************/
+
+template <class Tgt, class Src>
+typename std::enable_if<
+  std::is_floating_point<Tgt>::value
+  && std::is_floating_point<Src>::value
+  && !std::is_same<Tgt, Src>::value,
+  Tgt>::type
+to(const Src & value) {
+  /* static */ if (std::numeric_limits<Tgt>::max() <
+                   std::numeric_limits<Src>::max()) {
+    FOLLY_RANGE_CHECK(value <= std::numeric_limits<Tgt>::max(),
+                      "Overflow",
+                      FOLLY_RANGE_CHECK_TO_STRING(value));
+    FOLLY_RANGE_CHECK(value >= -std::numeric_limits<Tgt>::max(),
+                      "Negative overflow",
+                      FOLLY_RANGE_CHECK_TO_STRING(value));
+  }
+  return boost::implicit_cast<Tgt>(value);
+}
+
+/*******************************************************************************
+ * Anything to string
+ ******************************************************************************/
+
+namespace detail {
+
+template <class T>
+const T& getLastElement(const T & v) {
+  return v;
+}
+
+template <class T, class... Ts>
+typename std::tuple_element<
+  sizeof...(Ts),
+  std::tuple<T, Ts...> >::type const&
+  getLastElement(const T&, const Ts&... vs) {
+  return getLastElement(vs...);
+}
+
+// This class exists to specialize away std::tuple_element in the case where we
+// have 0 template arguments. Without this, Clang/libc++ will blow a
+// static_assert even if tuple_element is protected by an enable_if.
+template <class... Ts>
+struct last_element {
+  typedef typename std::enable_if<
+    sizeof...(Ts) >= 1,
+    typename std::tuple_element<
+      sizeof...(Ts) - 1, std::tuple<Ts...>
+    >::type>::type type;
+};
+
+template <>
+struct last_element<> {
+  typedef void type;
+};
+
+} // namespace detail
+
+/*******************************************************************************
+ * Conversions from integral types to string types.
+ ******************************************************************************/
+
+#if FOLLY_HAVE_INT128_T
+namespace detail {
+
+template <typename IntegerType>
+constexpr unsigned int
+digitsEnough() {
+  return ceil((double(sizeof(IntegerType) * CHAR_BIT) * M_LN2) / M_LN10);
+}
+
+inline size_t
+unsafeTelescope128(char * buffer, size_t room, unsigned __int128 x) {
+  typedef unsigned __int128 Usrc;
+  size_t p = room - 1;
+
+  while (x >= (Usrc(1) << 64)) { // Using 128-bit division while needed
+    const auto y = x / 10;
+    const auto digit = x % 10;
+
+    buffer[p--] = '0' + digit;
+    x = y;
+  }
+
+  uint64_t xx = x; // Moving to faster 64-bit division thereafter
+
+  while (xx >= 10) {
+    const auto y = xx / 10ULL;
+    const auto digit = xx % 10ULL;
+
+    buffer[p--] = '0' + digit;
+    xx = y;
+  }
+
+  buffer[p] = '0' + xx;
+
+  return p;
+}
+
+}
+#endif
+
+/**
+ * Returns the number of digits in the base 10 representation of an
+ * uint64_t. Useful for preallocating buffers and such. It's also used
+ * internally, see below. Measurements suggest that defining a
+ * separate overload for 32-bit integers is not worthwhile.
+ */
+
+inline uint32_t digits10(uint64_t v) {
+#ifdef __x86_64__
+
+  // For this arch we can get a little help from specialized CPU instructions
+  // which can count leading zeroes; 64 minus that is appx. log (base 2).
+  // Use that to approximate base-10 digits (log_10) and then adjust if needed.
+
+  // 10^i, defined for i 0 through 19.
+  // This is 20 * 8 == 160 bytes, which fits neatly into 5 cache lines
+  // (assuming a cache line size of 64).
+  static const uint64_t powersOf10[20] FOLLY_ALIGNED(64) = {
+    1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000,
+    10000000000, 100000000000, 1000000000000, 10000000000000, 100000000000000,
+    1000000000000000, 10000000000000000, 100000000000000000,
+    1000000000000000000, 10000000000000000000UL
+  };
+
+  // "count leading zeroes" operation not valid; for 0; special case this.
+  if UNLIKELY (! v) {
+    return 1;
+  }
+
+  // bits is in the ballpark of log_2(v).
+  const uint8_t leadingZeroes = __builtin_clzll(v);
+  const auto bits = 63 - leadingZeroes;
+
+  // approximate log_10(v) == log_10(2) * bits.
+  // Integer magic below: 77/256 is appx. 0.3010 (log_10(2)).
+  // The +1 is to make this the ceiling of the log_10 estimate.
+  const uint32_t minLength = 1 + ((bits * 77) >> 8);
+
+  // return that log_10 lower bound, plus adjust if input >= 10^(that bound)
+  // in case there's a small error and we misjudged length.
+  return minLength + (uint32_t) (UNLIKELY (v >= powersOf10[minLength]));
+
+#else
+
+  uint32_t result = 1;
+  for (;;) {
+    if (LIKELY(v < 10)) return result;
+    if (LIKELY(v < 100)) return result + 1;
+    if (LIKELY(v < 1000)) return result + 2;
+    if (LIKELY(v < 10000)) return result + 3;
+    // Skip ahead by 4 orders of magnitude
+    v /= 10000U;
+    result += 4;
+  }
+
+#endif
+}
+
+/**
+ * Copies the ASCII base 10 representation of v into buffer and
+ * returns the number of bytes written. Does NOT append a \0. Assumes
+ * the buffer points to digits10(v) bytes of valid memory. Note that
+ * uint64 needs at most 20 bytes, uint32_t needs at most 10 bytes,
+ * uint16_t needs at most 5 bytes, and so on. Measurements suggest
+ * that defining a separate overload for 32-bit integers is not
+ * worthwhile.
+ *
+ * This primitive is unsafe because it makes the size assumption and
+ * because it does not add a terminating \0.
+ */
+
+inline uint32_t uint64ToBufferUnsafe(uint64_t v, char *const buffer) {
+  auto const result = digits10(v);
+  // WARNING: using size_t or pointer arithmetic for pos slows down
+  // the loop below 20x. This is because several 32-bit ops can be
+  // done in parallel, but only fewer 64-bit ones.
+  uint32_t pos = result - 1;
+  while (v >= 10) {
+    // Keep these together so a peephole optimization "sees" them and
+    // computes them in one shot.
+    auto const q = v / 10;
+    auto const r = static_cast<uint32_t>(v % 10);
+    buffer[pos--] = '0' + r;
+    v = q;
+  }
+  // Last digit is trivial to handle
+  buffer[pos] = static_cast<uint32_t>(v) + '0';
+  return result;
+}
+
+/**
+ * A single char gets appended.
+ */
+template <class Tgt>
+void toAppend(char value, Tgt * result) {
+  *result += value;
+}
+
+template<class T>
+constexpr typename std::enable_if<
+  std::is_same<T, char>::value,
+  size_t>::type
+estimateSpaceNeeded(T) {
+  return 1;
+}
+
+/**
+ * Ubiquitous helper template for writing string appenders
+ */
+template <class T> struct IsSomeString {
+  enum { value = std::is_same<T, std::string>::value
+         || std::is_same<T, fbstring>::value };
+};
+
+/**
+ * Everything implicitly convertible to const char* gets appended.
+ */
+template <class Tgt, class Src>
+typename std::enable_if<
+  std::is_convertible<Src, const char*>::value
+  && IsSomeString<Tgt>::value>::type
+toAppend(Src value, Tgt * result) {
+  // Treat null pointers like an empty string, as in:
+  // operator<<(std::ostream&, const char*).
+  const char* c = value;
+  if (c) {
+    result->append(value);
+  }
+}
+
+template<class Src>
+typename std::enable_if<
+  std::is_convertible<Src, const char*>::value,
+  size_t>::type
+estimateSpaceNeeded(Src value) {
+  const char *c = value;
+  if (c) {
+    return folly::StringPiece(value).size();
+  };
+  return 0;
+}
+
+template<class Src>
+typename std::enable_if<
+  (std::is_convertible<Src, folly::StringPiece>::value ||
+  IsSomeString<Src>::value) &&
+  !std::is_convertible<Src, const char*>::value,
+  size_t>::type
+estimateSpaceNeeded(Src value) {
+  return folly::StringPiece(value).size();
+}
+
+template<class Src>
+typename std::enable_if<
+  std::is_pointer<Src>::value &&
+  IsSomeString<std::remove_pointer<Src>>::value,
+  size_t>::type
+estimateSpaceNeeded(Src value) {
+  return value->size();
+}
+
+/**
+ * Strings get appended, too.
+ */
+template <class Tgt, class Src>
+typename std::enable_if<
+  IsSomeString<Src>::value && IsSomeString<Tgt>::value>::type
+toAppend(const Src& value, Tgt * result) {
+  result->append(value);
+}
+
+/**
+ * and StringPiece objects too
+ */
+template <class Tgt>
+typename std::enable_if<
+   IsSomeString<Tgt>::value>::type
+toAppend(StringPiece value, Tgt * result) {
+  result->append(value.data(), value.size());
+}
+
+/**
+ * There's no implicit conversion from fbstring to other string types,
+ * so make a specialization.
+ */
+template <class Tgt>
+typename std::enable_if<
+   IsSomeString<Tgt>::value>::type
+toAppend(const fbstring& value, Tgt * result) {
+  result->append(value.data(), value.size());
+}
+
+#if FOLLY_HAVE_INT128_T
+/**
+ * Special handling for 128 bit integers.
+ */
+
+template <class Tgt>
+void
+toAppend(__int128 value, Tgt * result) {
+  typedef unsigned __int128 Usrc;
+  char buffer[detail::digitsEnough<unsigned __int128>() + 1];
+  size_t p;
+
+  if (value < 0) {
+    p = detail::unsafeTelescope128(buffer, sizeof(buffer), Usrc(-value));
+    buffer[--p] = '-';
+  } else {
+    p = detail::unsafeTelescope128(buffer, sizeof(buffer), value);
+  }
+
+  result->append(buffer + p, buffer + sizeof(buffer));
+}
+
+template <class Tgt>
+void
+toAppend(unsigned __int128 value, Tgt * result) {
+  char buffer[detail::digitsEnough<unsigned __int128>()];
+  size_t p;
+
+  p = detail::unsafeTelescope128(buffer, sizeof(buffer), value);
+
+  result->append(buffer + p, buffer + sizeof(buffer));
+}
+
+template<class T>
+constexpr typename std::enable_if<
+  std::is_same<T, __int128>::value,
+  size_t>::type
+estimateSpaceNeeded(T) {
+  return detail::digitsEnough<__int128>();
+}
+
+template<class T>
+constexpr typename std::enable_if<
+  std::is_same<T, unsigned __int128>::value,
+  size_t>::type
+estimateSpaceNeeded(T) {
+  return detail::digitsEnough<unsigned __int128>();
+}
+
+#endif
+
+/**
+ * int32_t and int64_t to string (by appending) go through here. The
+ * result is APPENDED to a preexisting string passed as the second
+ * parameter. This should be efficient with fbstring because fbstring
+ * incurs no dynamic allocation below 23 bytes and no number has more
+ * than 22 bytes in its textual representation (20 for digits, one for
+ * sign, one for the terminating 0).
+ */
+template <class Tgt, class Src>
+typename std::enable_if<
+  std::is_integral<Src>::value && std::is_signed<Src>::value &&
+  IsSomeString<Tgt>::value && sizeof(Src) >= 4>::type
+toAppend(Src value, Tgt * result) {
+  char buffer[20];
+  if (value < 0) {
+    result->push_back('-');
+    result->append(buffer, uint64ToBufferUnsafe(-uint64_t(value), buffer));
+  } else {
+    result->append(buffer, uint64ToBufferUnsafe(value, buffer));
+  }
+}
+
+template <class Src>
+typename std::enable_if<
+  std::is_integral<Src>::value && std::is_signed<Src>::value
+  && sizeof(Src) >= 4 && sizeof(Src) < 16,
+  size_t>::type
+estimateSpaceNeeded(Src value) {
+  if (value < 0) {
+    return 1 + digits10(static_cast<uint64_t>(-value));
+  }
+
+  return digits10(static_cast<uint64_t>(value));
+}
+
+/**
+ * As above, but for uint32_t and uint64_t.
+ */
+template <class Tgt, class Src>
+typename std::enable_if<
+  std::is_integral<Src>::value && !std::is_signed<Src>::value
+  && IsSomeString<Tgt>::value && sizeof(Src) >= 4>::type
+toAppend(Src value, Tgt * result) {
+  char buffer[20];
+  result->append(buffer, buffer + uint64ToBufferUnsafe(value, buffer));
+}
+
+template <class Src>
+typename std::enable_if<
+  std::is_integral<Src>::value && !std::is_signed<Src>::value
+  && sizeof(Src) >= 4 && sizeof(Src) < 16,
+  size_t>::type
+estimateSpaceNeeded(Src value) {
+  return digits10(value);
+}
+
+/**
+ * All small signed and unsigned integers to string go through 32-bit
+ * types int32_t and uint32_t, respectively.
+ */
+template <class Tgt, class Src>
+typename std::enable_if<
+  std::is_integral<Src>::value
+  && IsSomeString<Tgt>::value && sizeof(Src) < 4>::type
+toAppend(Src value, Tgt * result) {
+  typedef typename
+    std::conditional<std::is_signed<Src>::value, int64_t, uint64_t>::type
+    Intermediate;
+  toAppend<Tgt>(static_cast<Intermediate>(value), result);
+}
+
+template <class Src>
+typename std::enable_if<
+  std::is_integral<Src>::value
+  && sizeof(Src) < 4
+  && !std::is_same<Src, char>::value,
+  size_t>::type
+estimateSpaceNeeded(Src value) {
+  typedef typename
+    std::conditional<std::is_signed<Src>::value, int64_t, uint64_t>::type
+    Intermediate;
+  return estimateSpaceNeeded(static_cast<Intermediate>(value));
+}
+
+#if defined(__clang__) || __GNUC_PREREQ(4, 7)
+// std::underlying_type became available by gcc 4.7.0
+
+/**
+ * Enumerated values get appended as integers.
+ */
+template <class Tgt, class Src>
+typename std::enable_if<
+  std::is_enum<Src>::value && IsSomeString<Tgt>::value>::type
+toAppend(Src value, Tgt * result) {
+  toAppend(
+      static_cast<typename std::underlying_type<Src>::type>(value), result);
+}
+
+template <class Src>
+typename std::enable_if<
+  std::is_enum<Src>::value, size_t>::type
+estimateSpaceNeeded(Src value) {
+  return estimateSpaceNeeded(
+      static_cast<typename std::underlying_type<Src>::type>(value));
+}
+
+#else
+
+/**
+ * Enumerated values get appended as integers.
+ */
+template <class Tgt, class Src>
+typename std::enable_if<
+  std::is_enum<Src>::value && IsSomeString<Tgt>::value>::type
+toAppend(Src value, Tgt * result) {
+  /* static */ if (Src(-1) < 0) {
+    /* static */ if (sizeof(Src) <= sizeof(int)) {
+      toAppend(static_cast<int>(value), result);
+    } else {
+      toAppend(static_cast<long>(value), result);
+    }
+  } else {
+    /* static */ if (sizeof(Src) <= sizeof(int)) {
+      toAppend(static_cast<unsigned int>(value), result);
+    } else {
+      toAppend(static_cast<unsigned long>(value), result);
+    }
+  }
+}
+
+template <class Src>
+typename std::enable_if<
+  std::is_enum<Src>::value, size_t>::type
+estimateSpaceNeeded(Src value) {
+  /* static */ if (Src(-1) < 0) {
+    /* static */ if (sizeof(Src) <= sizeof(int)) {
+      return estimateSpaceNeeded(static_cast<int>(value));
+    } else {
+      return estimateSpaceNeeded(static_cast<long>(value));
+    }
+  } else {
+    /* static */ if (sizeof(Src) <= sizeof(int)) {
+      return estimateSpaceNeeded(static_cast<unsigned int>(value));
+    } else {
+      return estimateSpaceNeeded(static_cast<unsigned long>(value));
+    }
+  }
+}
+
+#endif // gcc 4.7 onwards
+
+/*******************************************************************************
+ * Conversions from floating-point types to string types.
+ ******************************************************************************/
+
+namespace detail {
+constexpr int kConvMaxDecimalInShortestLow = -6;
+constexpr int kConvMaxDecimalInShortestHigh = 21;
+} // folly::detail
+
+/** Wrapper around DoubleToStringConverter **/
+template <class Tgt, class Src>
+typename std::enable_if<
+  std::is_floating_point<Src>::value
+  && IsSomeString<Tgt>::value>::type
+toAppend(
+  Src value,
+  Tgt * result,
+  double_conversion::DoubleToStringConverter::DtoaMode mode,
+  unsigned int numDigits) {
+  using namespace double_conversion;
+  DoubleToStringConverter
+    conv(DoubleToStringConverter::NO_FLAGS,
+         "Infinity", "NaN", 'E',
+         detail::kConvMaxDecimalInShortestLow,
+         detail::kConvMaxDecimalInShortestHigh,
+         6,   // max leading padding zeros
+         1);  // max trailing padding zeros
+  char buffer[256];
+  StringBuilder builder(buffer, sizeof(buffer));
+  switch (mode) {
+    case DoubleToStringConverter::SHORTEST:
+      conv.ToShortest(value, &builder);
+      break;
+    case DoubleToStringConverter::FIXED:
+      conv.ToFixed(value, numDigits, &builder);
+      break;
+    default:
+      CHECK(mode == DoubleToStringConverter::PRECISION);
+      conv.ToPrecision(value, numDigits, &builder);
+      break;
+  }
+  const size_t length = builder.position();
+  builder.Finalize();
+  result->append(buffer, length);
+}
+
+/**
+ * As above, but for floating point
+ */
+template <class Tgt, class Src>
+typename std::enable_if<
+  std::is_floating_point<Src>::value
+  && IsSomeString<Tgt>::value>::type
+toAppend(Src value, Tgt * result) {
+  toAppend(
+    value, result, double_conversion::DoubleToStringConverter::SHORTEST, 0);
+}
+
+/**
+ * Upper bound of the length of the output from
+ * DoubleToStringConverter::ToShortest(double, StringBuilder*),
+ * as used in toAppend(double, string*).
+ */
+template <class Src>
+typename std::enable_if<
+  std::is_floating_point<Src>::value, size_t>::type
+estimateSpaceNeeded(Src value) {
+  // kBase10MaximalLength is 17. We add 1 for decimal point,
+  // e.g. 10.0/9 is 17 digits and 18 characters, including the decimal point.
+  constexpr int kMaxMantissaSpace =
+    double_conversion::DoubleToStringConverter::kBase10MaximalLength + 1;
+  // strlen("E-") + digits10(numeric_limits<double>::max_exponent10)
+  constexpr int kMaxExponentSpace = 2 + 3;
+  static const int kMaxPositiveSpace = std::max({
+      // E.g. 1.1111111111111111E-100.
+      kMaxMantissaSpace + kMaxExponentSpace,
+      // E.g. 0.000001.1111111111111111, if kConvMaxDecimalInShortestLow is -6.
+      kMaxMantissaSpace - detail::kConvMaxDecimalInShortestLow,
+      // If kConvMaxDecimalInShortestHigh is 21, then 1e21 is the smallest
+      // number > 1 which ToShortest outputs in exponential notation,
+      // so 21 is the longest non-exponential number > 1.
+      detail::kConvMaxDecimalInShortestHigh
+    });
+  return kMaxPositiveSpace + (value < 0);  // +1 for minus sign, if negative
+}
+
+/**
+ * This can be specialized, together with adding specialization
+ * for estimateSpaceNeed for your type, so that we allocate
+ * as much as you need instead of the default
+ */
+template<class Src>
+struct HasLengthEstimator : std::false_type {};
+
+template <class Src>
+constexpr typename std::enable_if<
+  !std::is_fundamental<Src>::value
+#ifdef FOLLY_HAVE_INT128_T
+  // On OSX 10.10, is_fundamental<__int128> is false :-O
+  && !std::is_same<__int128, Src>::value
+  && !std::is_same<unsigned __int128, Src>::value
+#endif
+  && !IsSomeString<Src>::value
+  && !std::is_convertible<Src, const char*>::value
+  && !std::is_convertible<Src, StringPiece>::value
+  && !std::is_enum<Src>::value
+  && !HasLengthEstimator<Src>::value,
+  size_t>::type
+estimateSpaceNeeded(const Src&) {
+  return sizeof(Src) + 1; // dumbest best effort ever?
+}
+
+namespace detail {
+
+inline size_t estimateSpaceToReserve(size_t sofar) {
+  return sofar;
+}
+
+template <class T, class... Ts>
+size_t estimateSpaceToReserve(size_t sofar, const T& v, const Ts&... vs) {
+  return estimateSpaceToReserve(sofar + estimateSpaceNeeded(v), vs...);
+}
+
+template<class T>
+size_t estimateSpaceToReserve(size_t sofar, const T& v) {
+  return sofar + estimateSpaceNeeded(v);
+}
+
+template<class...Ts>
+void reserveInTarget(const Ts&...vs) {
+  getLastElement(vs...)->reserve(estimateSpaceToReserve(0, vs...));
+}
+
+template<class Delimiter, class...Ts>
+void reserveInTargetDelim(const Delimiter& d, const Ts&...vs) {
+  static_assert(sizeof...(vs) >= 2, "Needs at least 2 args");
+  size_t fordelim = (sizeof...(vs) - 2) * estimateSpaceToReserve(0, d);
+  getLastElement(vs...)->reserve(estimateSpaceToReserve(fordelim, vs...));
+}
+
+/**
+ * Variadic base case: append one element
+ */
+template <class T, class Tgt>
+typename std::enable_if<
+  IsSomeString<typename std::remove_pointer<Tgt>::type>
+  ::value>::type
+toAppendStrImpl(const T& v, Tgt result) {
+  toAppend(v, result);
+}
+
+template <class T, class... Ts>
+typename std::enable_if<sizeof...(Ts) >= 2
+  && IsSomeString<
+  typename std::remove_pointer<
+    typename detail::last_element<Ts...>::type
+  >::type>::value>::type
+toAppendStrImpl(const T& v, const Ts&... vs) {
+  toAppend(v, getLastElement(vs...));
+  toAppendStrImpl(vs...);
+}
+
+template <class Delimiter, class T, class Tgt>
+typename std::enable_if<
+  IsSomeString<typename std::remove_pointer<Tgt>::type>
+  ::value>::type
+toAppendDelimStrImpl(const Delimiter& delim, const T& v, Tgt result) {
+  toAppend(v, result);
+}
+
+template <class Delimiter, class T, class... Ts>
+typename std::enable_if<sizeof...(Ts) >= 2
+  && IsSomeString<
+  typename std::remove_pointer<
+    typename detail::last_element<Ts...>::type
+  >::type>::value>::type
+toAppendDelimStrImpl(const Delimiter& delim, const T& v, const Ts&... vs) {
+  // we are really careful here, calling toAppend with just one element does
+  // not try to estimate space needed (as we already did that). If we call
+  // toAppend(v, delim, ....) we would do unnecesary size calculation
+  toAppend(v, detail::getLastElement(vs...));
+  toAppend(delim, detail::getLastElement(vs...));
+  toAppendDelimStrImpl(delim, vs...);
+}
+} // folly::detail
+
+
+/**
+ * Variadic conversion to string. Appends each element in turn.
+ * If we have two or more things to append, we it will not reserve
+ * the space for them and will depend on strings exponential growth.
+ * If you just append once consider using toAppendFit which reserves
+ * the space needed (but does not have exponential as a result).
+ */
+template <class... Ts>
+typename std::enable_if<sizeof...(Ts) >= 3
+  && IsSomeString<
+  typename std::remove_pointer<
+    typename detail::last_element<Ts...>::type
+  >::type>::value>::type
+toAppend(const Ts&... vs) {
+  ::folly::detail::toAppendStrImpl(vs...);
+}
+
+/**
+ * Special version of the call that preallocates exaclty as much memory
+ * as need for arguments to be stored in target. This means we are
+ * not doing exponential growth when we append. If you are using it
+ * in a loop you are aiming at your foot with a big perf-destroying
+ * bazooka.
+ * On the other hand if you are appending to a string once, this
+ * will probably save a few calls to malloc.
+ */
+template <class... Ts>
+typename std::enable_if<
+  IsSomeString<
+  typename std::remove_pointer<
+    typename detail::last_element<Ts...>::type
+  >::type>::value>::type
+toAppendFit(const Ts&... vs) {
+  ::folly::detail::reserveInTarget(vs...);
+  toAppend(vs...);
+}
+
+template <class Ts>
+void toAppendFit(const Ts&) {}
+
+/**
+ * Variadic base case: do nothing.
+ */
+template <class Tgt>
+typename std::enable_if<IsSomeString<Tgt>::value>::type
+toAppend(Tgt* result) {
+}
+
+/**
+ * Variadic base case: do nothing.
+ */
+template <class Delimiter, class Tgt>
+typename std::enable_if<IsSomeString<Tgt>::value>::type
+toAppendDelim(const Delimiter& delim, Tgt* result) {
+}
+
+/**
+ * 1 element: same as toAppend.
+ */
+template <class Delimiter, class T, class Tgt>
+typename std::enable_if<IsSomeString<Tgt>::value>::type
+toAppendDelim(const Delimiter& delim, const T& v, Tgt* tgt) {
+  toAppend(v, tgt);
+}
+
+/**
+ * Append to string with a delimiter in between elements. Check out
+ * comments for toAppend for details about memory allocation.
+ */
+template <class Delimiter, class... Ts>
+typename std::enable_if<sizeof...(Ts) >= 3
+  && IsSomeString<
+  typename std::remove_pointer<
+    typename detail::last_element<Ts...>::type
+  >::type>::value>::type
+toAppendDelim(const Delimiter& delim, const Ts&... vs) {
+  detail::toAppendDelimStrImpl(delim, vs...);
+}
+
+/**
+ * Detail in comment for toAppendFit
+ */
+template <class Delimiter, class... Ts>
+typename std::enable_if<
+  IsSomeString<
+  typename std::remove_pointer<
+    typename detail::last_element<Ts...>::type
+  >::type>::value>::type
+toAppendDelimFit(const Delimiter& delim, const Ts&... vs) {
+  detail::reserveInTargetDelim(delim, vs...);
+  toAppendDelim(delim, vs...);
+}
+
+template <class De, class Ts>
+void toAppendDelimFit(const De&, const Ts&) {}
+
+/**
+ * to<SomeString>(v1, v2, ...) uses toAppend() (see below) as back-end
+ * for all types.
+ */
+template <class Tgt, class... Ts>
+typename std::enable_if<
+  IsSomeString<Tgt>::value && (
+    sizeof...(Ts) != 1 ||
+    !std::is_same<Tgt, typename detail::last_element<Ts...>::type>::value),
+  Tgt>::type
+to(const Ts&... vs) {
+  Tgt result;
+  toAppendFit(vs..., &result);
+  return result;
+}
+
+/**
+ * toDelim<SomeString>(SomeString str) returns itself.
+ */
+template <class Tgt, class Delim, class Src>
+typename std::enable_if<
+  IsSomeString<Tgt>::value && std::is_same<Tgt, Src>::value,
+  Tgt>::type
+toDelim(const Delim& delim, const Src & value) {
+  return value;
+}
+
+/**
+ * toDelim<SomeString>(delim, v1, v2, ...) uses toAppendDelim() as
+ * back-end for all types.
+ */
+template <class Tgt, class Delim, class... Ts>
+typename std::enable_if<
+  IsSomeString<Tgt>::value && (
+    sizeof...(Ts) != 1 ||
+    !std::is_same<Tgt, typename detail::last_element<Ts...>::type>::value),
+  Tgt>::type
+toDelim(const Delim& delim, const Ts&... vs) {
+  Tgt result;
+  toAppendDelimFit(delim, vs..., &result);
+  return result;
+}
+
+/*******************************************************************************
+ * Conversions from string types to integral types.
+ ******************************************************************************/
+
+namespace detail {
+
+/**
+ * Finds the first non-digit in a string. The number of digits
+ * searched depends on the precision of the Tgt integral. Assumes the
+ * string starts with NO whitespace and NO sign.
+ *
+ * The semantics of the routine is:
+ *   for (;; ++b) {
+ *     if (b >= e || !isdigit(*b)) return b;
+ *   }
+ *
+ *  Complete unrolling marks bottom-line (i.e. entire conversion)
+ *  improvements of 20%.
+ */
+  template <class Tgt>
+  const char* findFirstNonDigit(const char* b, const char* e) {
+    for (; b < e; ++b) {
+      auto const c = static_cast<unsigned>(*b) - '0';
+      if (c >= 10) break;
+    }
+    return b;
+  }
+
+  // Maximum value of number when represented as a string
+  template <class T> struct MaxString {
+    static const char*const value;
+  };
+
+  // clang >= 209 now requires "extern" declaration of MaxString template
+#ifdef __clang__
+#if (__clang_major__ * 100 + __clang_minor__) >= 209
+  // see Conv.cpp:29-50
+  extern template struct MaxString<bool>;
+  extern template struct MaxString<uint8_t>;
+  extern template struct MaxString<uint16_t>;
+  extern template struct MaxString<uint32_t>;
+  extern template struct MaxString<unsigned long>;
+  extern template struct MaxString<unsigned long long>;
+#endif
+#endif
+
+
+/*
+ * Lookup tables that converts from a decimal character value to an integral
+ * binary value, shifted by a decimal "shift" multiplier.
+ * For all character values in the range '0'..'9', the table at those
+ * index locations returns the actual decimal value shifted by the multiplier.
+ * For all other values, the lookup table returns an invalid OOR value.
+ */
+// Out-of-range flag value, larger than the largest value that can fit in
+// four decimal bytes (9999), but four of these added up together should
+// still not overflow uint16_t.
+constexpr int32_t OOR = 10000;
+
+FOLLY_ALIGNED(16) constexpr uint16_t shift1[] = {
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 0-9
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  //  10
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  //  20
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  //  30
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, 0,         //  40
+  1, 2, 3, 4, 5, 6, 7, 8, 9, OOR, OOR,
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  //  60
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  //  70
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  //  80
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  //  90
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 100
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 110
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 120
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 130
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 140
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 150
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 160
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 170
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 180
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 190
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 200
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 210
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 220
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 230
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 240
+  OOR, OOR, OOR, OOR, OOR, OOR                       // 250
+};
+
+FOLLY_ALIGNED(16) constexpr uint16_t shift10[] = {
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 0-9
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  //  10
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  //  20
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  //  30
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, 0,         //  40
+  10, 20, 30, 40, 50, 60, 70, 80, 90, OOR, OOR,
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  //  60
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  //  70
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  //  80
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  //  90
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 100
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 110
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 120
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 130
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 140
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 150
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 160
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 170
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 180
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 190
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 200
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 210
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 220
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 230
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 240
+  OOR, OOR, OOR, OOR, OOR, OOR                       // 250
+};
+
+FOLLY_ALIGNED(16) constexpr uint16_t shift100[] = {
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 0-9
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  //  10
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  //  20
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  //  30
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, 0,         //  40
+  100, 200, 300, 400, 500, 600, 700, 800, 900, OOR, OOR,
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  //  60
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  //  70
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  //  80
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  //  90
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 100
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 110
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 120
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 130
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 140
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 150
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 160
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 170
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 180
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 190
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 200
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 210
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 220
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 230
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 240
+  OOR, OOR, OOR, OOR, OOR, OOR                       // 250
+};
+
+FOLLY_ALIGNED(16) constexpr uint16_t shift1000[] = {
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 0-9
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  //  10
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  //  20
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  //  30
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, 0,         //  40
+  1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, OOR, OOR,
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  //  60
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  //  70
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  //  80
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  //  90
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 100
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 110
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 120
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 130
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 140
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 150
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 160
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 170
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 180
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 190
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 200
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 210
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 220
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 230
+  OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR, OOR,  // 240
+  OOR, OOR, OOR, OOR, OOR, OOR                       // 250
+};
+
+/**
+ * String represented as a pair of pointers to char to unsigned
+ * integrals. Assumes NO whitespace before or after, and also that the
+ * string is composed entirely of digits. Tgt must be unsigned, and no
+ * sign is allowed in the string (even it's '+'). String may be empty,
+ * in which case digits_to throws.
+ */
+  template <class Tgt>
+  Tgt digits_to(const char * b, const char * e) {
+
+    static_assert(!std::is_signed<Tgt>::value, "Unsigned type expected");
+    assert(b <= e);
+
+    const size_t size = e - b;
+
+    /* Although the string is entirely made of digits, we still need to
+     * check for overflow.
+     */
+    if (size >= std::numeric_limits<Tgt>::digits10 + 1) {
+      // Leading zeros? If so, recurse to keep things simple
+      if (b < e && *b == '0') {
+        for (++b;; ++b) {
+          if (b == e) return 0; // just zeros, e.g. "0000"
+          if (*b != '0') return digits_to<Tgt>(b, e);
+        }
+      }
+      FOLLY_RANGE_CHECK_BEGIN_END(
+        size == std::numeric_limits<Tgt>::digits10 + 1 &&
+        strncmp(b, detail::MaxString<Tgt>::value, size) <= 0,
+        "Numeric overflow upon conversion", b, e);
+    }
+
+    // Here we know that the number won't overflow when
+    // converted. Proceed without checks.
+
+    Tgt result = 0;
+
+    for (; e - b >= 4; b += 4) {
+      result *= 10000;
+      const int32_t r0 = shift1000[static_cast<size_t>(b[0])];
+      const int32_t r1 = shift100[static_cast<size_t>(b[1])];
+      const int32_t r2 = shift10[static_cast<size_t>(b[2])];
+      const int32_t r3 = shift1[static_cast<size_t>(b[3])];
+      const auto sum = r0 + r1 + r2 + r3;
+      assert(sum < OOR && "Assumption: string only has digits");
+      result += sum;
+    }
+
+    switch (e - b) {
+      case 3: {
+        const int32_t r0 = shift100[static_cast<size_t>(b[0])];
+        const int32_t r1 = shift10[static_cast<size_t>(b[1])];
+        const int32_t r2 = shift1[static_cast<size_t>(b[2])];
+        const auto sum = r0 + r1 + r2;
+        assert(sum < OOR && "Assumption: string only has digits");
+        return result * 1000 + sum;
+      }
+      case 2: {
+        const int32_t r0 = shift10[static_cast<size_t>(b[0])];
+        const int32_t r1 = shift1[static_cast<size_t>(b[1])];
+        const auto sum = r0 + r1;
+        assert(sum < OOR && "Assumption: string only has digits");
+        return result * 100 + sum;
+      }
+      case 1: {
+        const int32_t sum = shift1[static_cast<size_t>(b[0])];
+        assert(sum < OOR && "Assumption: string only has digits");
+        return result * 10 + sum;
+      }
+    }
+
+    assert(b == e);
+    FOLLY_RANGE_CHECK_BEGIN_END(size > 0,
+                                "Found no digits to convert in input", b, e);
+    return result;
+  }
+
+
+  bool str_to_bool(StringPiece * src);
+
+}                                 // namespace detail
+
+/**
+ * String represented as a pair of pointers to char to unsigned
+ * integrals. Assumes NO whitespace before or after.
+ */
+template <class Tgt>
+typename std::enable_if<
+  std::is_integral<Tgt>::value && !std::is_signed<Tgt>::value
+  && !std::is_same<typename std::remove_cv<Tgt>::type, bool>::value,
+  Tgt>::type
+to(const char * b, const char * e) {
+  return detail::digits_to<Tgt>(b, e);
+}
+
+/**
+ * String represented as a pair of pointers to char to signed
+ * integrals. Assumes NO whitespace before or after. Allows an
+ * optional leading sign.
+ */
+template <class Tgt>
+typename std::enable_if<
+  std::is_integral<Tgt>::value && std::is_signed<Tgt>::value,
+  Tgt>::type
+to(const char * b, const char * e) {
+  FOLLY_RANGE_CHECK(b < e, "Empty input string in conversion to integral",
+                    to<std::string>("b: ", intptr_t(b), " e: ", intptr_t(e)));
+  if (!isdigit(*b)) {
+    if (*b == '-') {
+      Tgt result = -to<typename std::make_unsigned<Tgt>::type>(b + 1, e);
+      FOLLY_RANGE_CHECK_BEGIN_END(result <= 0, "Negative overflow.", b, e);
+      return result;
+    }
+    FOLLY_RANGE_CHECK_BEGIN_END(*b == '+', "Invalid lead character", b, e);
+    ++b;
+  }
+  Tgt result = to<typename std::make_unsigned<Tgt>::type>(b, e);
+  FOLLY_RANGE_CHECK_BEGIN_END(result >= 0, "Overflow", b, e);
+  return result;
+}
+
+/**
+ * Parsing strings to integrals. These routines differ from
+ * to<integral>(string) in that they take a POINTER TO a StringPiece
+ * and alter that StringPiece to reflect progress information.
+ */
+
+/**
+ * StringPiece to integrals, with progress information. Alters the
+ * StringPiece parameter to munch the already-parsed characters.
+ */
+template <class Tgt>
+typename std::enable_if<
+  std::is_integral<Tgt>::value
+  && !std::is_same<typename std::remove_cv<Tgt>::type, bool>::value,
+  Tgt>::type
+to(StringPiece * src) {
+
+  auto b = src->data(), past = src->data() + src->size();
+  for (;; ++b) {
+    FOLLY_RANGE_CHECK_STRINGPIECE(b < past,
+                                  "No digits found in input string", *src);
+    if (!isspace(*b)) break;
+  }
+
+  auto m = b;
+
+  // First digit is customized because we test for sign
+  bool negative = false;
+  /* static */ if (std::is_signed<Tgt>::value) {
+    if (!isdigit(*m)) {
+      if (*m == '-') {
+        negative = true;
+      } else {
+        FOLLY_RANGE_CHECK_STRINGPIECE(*m == '+', "Invalid leading character in "
+                                      "conversion to integral", *src);
+      }
+      ++b;
+      ++m;
+    }
+  }
+  FOLLY_RANGE_CHECK_STRINGPIECE(m < past, "No digits found in input string",
+                                *src);
+  FOLLY_RANGE_CHECK_STRINGPIECE(isdigit(*m), "Non-digit character found", *src);
+  m = detail::findFirstNonDigit<Tgt>(m + 1, past);
+
+  Tgt result;
+  /* static */ if (!std::is_signed<Tgt>::value) {
+    result = detail::digits_to<typename std::make_unsigned<Tgt>::type>(b, m);
+  } else {
+    auto t = detail::digits_to<typename std::make_unsigned<Tgt>::type>(b, m);
+    if (negative) {
+      result = -t;
+      FOLLY_RANGE_CHECK_STRINGPIECE(is_non_positive(result),
+                                    "Negative overflow", *src);
+    } else {
+      result = t;
+      FOLLY_RANGE_CHECK_STRINGPIECE(is_non_negative(result), "Overflow", *src);
+    }
+  }
+  src->advance(m - src->data());
+  return result;
+}
+
+/**
+ * StringPiece to bool, with progress information. Alters the
+ * StringPiece parameter to munch the already-parsed characters.
+ */
+template <class Tgt>
+typename std::enable_if<
+  std::is_same<typename std::remove_cv<Tgt>::type, bool>::value,
+  Tgt>::type
+to(StringPiece * src) {
+  return detail::str_to_bool(src);
+}
+
+namespace detail {
+
+/**
+ * Enforce that the suffix following a number is made up only of whitespace.
+ */
+inline void enforceWhitespace(const char* b, const char* e) {
+  for (; b != e; ++b) {
+    FOLLY_RANGE_CHECK_BEGIN_END(isspace(*b),
+                                to<std::string>("Non-whitespace: ", *b),
+                                b, e);
+  }
+}
+
+}  // namespace detail
+
+/**
+ * String or StringPiece to integrals. Accepts leading and trailing
+ * whitespace, but no non-space trailing characters.
+ */
+template <class Tgt>
+typename std::enable_if<
+  std::is_integral<Tgt>::value,
+  Tgt>::type
+to(StringPiece src) {
+  Tgt result = to<Tgt>(&src);
+  detail::enforceWhitespace(src.data(), src.data() + src.size());
+  return result;
+}
+
+/*******************************************************************************
+ * Conversions from string types to floating-point types.
+ ******************************************************************************/
+
+/**
+ * StringPiece to double, with progress information. Alters the
+ * StringPiece parameter to munch the already-parsed characters.
+ */
+template <class Tgt>
+inline typename std::enable_if<
+  std::is_floating_point<Tgt>::value,
+  Tgt>::type
+to(StringPiece *const src) {
+  using namespace double_conversion;
+  static StringToDoubleConverter
+    conv(StringToDoubleConverter::ALLOW_TRAILING_JUNK
+         | StringToDoubleConverter::ALLOW_LEADING_SPACES,
+         0.0,
+         // return this for junk input string
+         std::numeric_limits<double>::quiet_NaN(),
+         nullptr, nullptr);
+
+  FOLLY_RANGE_CHECK_STRINGPIECE(!src->empty(),
+                                "No digits found in input string", *src);
+
+  int length;
+  auto result = conv.StringToDouble(src->data(),
+                                    static_cast<int>(src->size()),
+                                    &length); // processed char count
+
+  if (!std::isnan(result)) {
+    src->advance(length);
+    return result;
+  }
+
+  for (;; src->advance(1)) {
+    if (src->empty()) {
+      throw std::range_error("Unable to convert an empty string"
+                             " to a floating point value.");
+    }
+    if (!isspace(src->front())) {
+      break;
+    }
+  }
+
+  // Was that "inf[inity]"?
+  if (src->size() >= 3 && toupper((*src)[0]) == 'I'
+        && toupper((*src)[1]) == 'N' && toupper((*src)[2]) == 'F') {
+    if (src->size() >= 8 &&
+        toupper((*src)[3]) == 'I' &&
+        toupper((*src)[4]) == 'N' &&
+        toupper((*src)[5]) == 'I' &&
+        toupper((*src)[6]) == 'T' &&
+        toupper((*src)[7]) == 'Y') {
+      src->advance(8);
+    } else {
+      src->advance(3);
+    }
+    return std::numeric_limits<Tgt>::infinity();
+  }
+
+  // Was that "-inf[inity]"?
+  if (src->size() >= 4 && toupper((*src)[0]) == '-'
+      && toupper((*src)[1]) == 'I' && toupper((*src)[2]) == 'N'
+      && toupper((*src)[3]) == 'F') {
+    if (src->size() >= 9 &&
+        toupper((*src)[4]) == 'I' &&
+        toupper((*src)[5]) == 'N' &&
+        toupper((*src)[6]) == 'I' &&
+        toupper((*src)[7]) == 'T' &&
+        toupper((*src)[8]) == 'Y') {
+      src->advance(9);
+    } else {
+      src->advance(4);
+    }
+    return -std::numeric_limits<Tgt>::infinity();
+  }
+
+  // "nan"?
+  if (src->size() >= 3 && toupper((*src)[0]) == 'N'
+        && toupper((*src)[1]) == 'A' && toupper((*src)[2]) == 'N') {
+    src->advance(3);
+    return std::numeric_limits<Tgt>::quiet_NaN();
+  }
+
+  // "-nan"?
+  if (src->size() >= 4 &&
+      toupper((*src)[0]) == '-' &&
+      toupper((*src)[1]) == 'N' &&
+      toupper((*src)[2]) == 'A' &&
+      toupper((*src)[3]) == 'N') {
+    src->advance(4);
+    return -std::numeric_limits<Tgt>::quiet_NaN();
+  }
+
+  // All bets are off
+  throw std::range_error("Unable to convert \"" + src->toString()
+                         + "\" to a floating point value.");
+}
+
+/**
+ * Any string, const char*, or StringPiece to double.
+ */
+template <class Tgt>
+typename std::enable_if<
+  std::is_floating_point<Tgt>::value,
+  Tgt>::type
+to(StringPiece src) {
+  Tgt result = to<double>(&src);
+  detail::enforceWhitespace(src.data(), src.data() + src.size());
+  return result;
+}
+
+/*******************************************************************************
+ * Integral to floating point and back
+ ******************************************************************************/
+
+/**
+ * Checked conversion from integral to flating point and back. The
+ * result must be convertible back to the source type without loss of
+ * precision. This seems Draconian but sometimes is what's needed, and
+ * complements existing routines nicely. For various rounding
+ * routines, see <math>.
+ */
+template <class Tgt, class Src>
+typename std::enable_if<
+  (std::is_integral<Src>::value && std::is_floating_point<Tgt>::value)
+  ||
+  (std::is_floating_point<Src>::value && std::is_integral<Tgt>::value),
+  Tgt>::type
+to(const Src & value) {
+  Tgt result = value;
+  auto witness = static_cast<Src>(result);
+  if (value != witness) {
+    throw std::range_error(
+      to<std::string>("to<>: loss of precision when converting ", value,
+#ifdef FOLLY_HAS_RTTI
+                      " to type ", typeid(Tgt).name()
+#else
+                      " to other type"
+#endif
+                      ).c_str());
+  }
+  return result;
+}
+
+/*******************************************************************************
+ * Enum to anything and back
+ ******************************************************************************/
+
+#if defined(__clang__) || __GNUC_PREREQ(4, 7)
+// std::underlying_type became available by gcc 4.7.0
+
+template <class Tgt, class Src>
+typename std::enable_if<
+  std::is_enum<Src>::value && !std::is_same<Src, Tgt>::value, Tgt>::type
+to(const Src & value) {
+  return to<Tgt>(static_cast<typename std::underlying_type<Src>::type>(value));
+}
+
+template <class Tgt, class Src>
+typename std::enable_if<
+  std::is_enum<Tgt>::value && !std::is_same<Src, Tgt>::value, Tgt>::type
+to(const Src & value) {
+  return static_cast<Tgt>(to<typename std::underlying_type<Tgt>::type>(value));
+}
+
+#else
+
+template <class Tgt, class Src>
+typename std::enable_if<
+  std::is_enum<Src>::value && !std::is_same<Src, Tgt>::value, Tgt>::type
+to(const Src & value) {
+  /* static */ if (Src(-1) < 0) {
+    /* static */ if (sizeof(Src) <= sizeof(int)) {
+      return to<Tgt>(static_cast<int>(value));
+    } else {
+      return to<Tgt>(static_cast<long>(value));
+    }
+  } else {
+    /* static */ if (sizeof(Src) <= sizeof(int)) {
+      return to<Tgt>(static_cast<unsigned int>(value));
+    } else {
+      return to<Tgt>(static_cast<unsigned long>(value));
+    }
+  }
+}
+
+template <class Tgt, class Src>
+typename std::enable_if<
+  std::is_enum<Tgt>::value && !std::is_same<Src, Tgt>::value, Tgt>::type
+to(const Src & value) {
+  /* static */ if (Tgt(-1) < 0) {
+    /* static */ if (sizeof(Tgt) <= sizeof(int)) {
+      return static_cast<Tgt>(to<int>(value));
+    } else {
+      return static_cast<Tgt>(to<long>(value));
+    }
+  } else {
+    /* static */ if (sizeof(Tgt) <= sizeof(int)) {
+      return static_cast<Tgt>(to<unsigned int>(value));
+    } else {
+      return static_cast<Tgt>(to<unsigned long>(value));
+    }
+  }
+}
+
+#endif // gcc 4.7 onwards
+
+} // namespace folly
+
+// FOLLY_CONV_INTERNAL is defined by Conv.cpp.  Keep the FOLLY_RANGE_CHECK
+// macro for use in Conv.cpp, but #undefine it everywhere else we are included,
+// to avoid defining this global macro name in other files that include Conv.h.
+#ifndef FOLLY_CONV_INTERNAL
+#undef FOLLY_RANGE_CHECK
+#undef FOLLY_RANGE_CHECK_BEGIN_END
+#undef FOLLY_RANGE_CHECK_STRINGPIECE
+#undef FOLLY_RANGE_CHECK_STRINGIZE
+#undef FOLLY_RANGE_CHECK_STRINGIZE2
+#endif
+
+#endif /* FOLLY_BASE_CONV_H_ */
diff --git a/faux-folly/folly/Demangle.cpp b/faux-folly/folly/Demangle.cpp
new file mode 100644
index 0000000..ee58726
--- /dev/null
+++ b/faux-folly/folly/Demangle.cpp
@@ -0,0 +1,142 @@
+/*
+ * Copyright 2015 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <folly/Demangle.h>
+
+#include <algorithm>
+#include <string.h>
+
+#include <folly/Malloc.h>
+
+#if FOLLY_HAVE_CPLUS_DEMANGLE_V3_CALLBACK
+# include <cxxabi.h>
+
+// From libiberty
+//
+// TODO(tudorb): Detect this with autoconf for the open-source version.
+//
+// __attribute__((__weak__)) doesn't work, because cplus_demangle_v3_callback
+// is exported by an object file in libiberty.a, and the ELF spec says
+// "The link editor does not extract archive members to resolve undefined weak
+// symbols" (but, interestingly enough, will resolve undefined weak symbols
+// with definitions from archive members that were extracted in order to
+// resolve an undefined global (strong) symbol)
+
+# ifndef DMGL_NO_OPTS
+#  define FOLLY_DEFINED_DMGL 1
+#  define DMGL_NO_OPTS    0          /* For readability... */
+#  define DMGL_PARAMS     (1 << 0)   /* Include function args */
+#  define DMGL_ANSI       (1 << 1)   /* Include const, volatile, etc */
+#  define DMGL_JAVA       (1 << 2)   /* Demangle as Java rather than C++. */
+#  define DMGL_VERBOSE    (1 << 3)   /* Include implementation details.  */
+#  define DMGL_TYPES      (1 << 4)   /* Also try to demangle type encodings.  */
+#  define DMGL_RET_POSTFIX (1 << 5)  /* Print function return types (when
+                                        present) after function signature */
+# endif
+
+extern "C" int cplus_demangle_v3_callback(
+    const char* mangled,
+    int options,  // We use DMGL_PARAMS | DMGL_TYPES, aka 0x11
+    void (*callback)(const char*, size_t, void*),
+    void* arg);
+
+#endif
+
+namespace {
+
+// glibc doesn't have strlcpy
+size_t my_strlcpy(char* dest, const char* src, size_t size) {
+  size_t len = strlen(src);
+  if (size != 0) {
+    size_t n = std::min(len, size - 1);  // always null terminate!
+    memcpy(dest, src, n);
+    dest[n] = '\0';
+  }
+  return len;
+}
+
+}  // namespace
+
+namespace folly {
+
+#if FOLLY_HAVE_CPLUS_DEMANGLE_V3_CALLBACK
+
+fbstring demangle(const char* name) {
+  int status;
+  size_t len = 0;
+  // malloc() memory for the demangled type name
+  char* demangled = abi::__cxa_demangle(name, nullptr, &len, &status);
+  if (status != 0) {
+    return name;
+  }
+  // len is the length of the buffer (including NUL terminator and maybe
+  // other junk)
+  return fbstring(demangled, strlen(demangled), len, AcquireMallocatedString());
+}
+
+namespace {
+
+struct DemangleBuf {
+  char* dest;
+  size_t remaining;
+  size_t total;
+};
+
+void demangleCallback(const char* str, size_t size, void* p) {
+  DemangleBuf* buf = static_cast<DemangleBuf*>(p);
+  size_t n = std::min(buf->remaining, size);
+  memcpy(buf->dest, str, n);
+  buf->dest += n;
+  buf->remaining -= n;
+  buf->total += size;
+}
+
+}  // namespace
+
+size_t demangle(const char* name, char* out, size_t outSize) {
+  DemangleBuf dbuf;
+  dbuf.dest = out;
+  dbuf.remaining = outSize ? outSize - 1 : 0;   // leave room for null term
+  dbuf.total = 0;
+
+  // Unlike most library functions, this returns 1 on success and 0 on failure
+  int status = cplus_demangle_v3_callback(
+      name,
+      DMGL_PARAMS | DMGL_ANSI | DMGL_TYPES,
+      demangleCallback,
+      &dbuf);
+  if (status == 0) {  // failed, return original
+    return my_strlcpy(out, name, outSize);
+  }
+  if (outSize != 0) {
+    *dbuf.dest = '\0';
+  }
+  return dbuf.total;
+}
+
+#else
+
+fbstring demangle(const char* name) {
+  return name;
+}
+
+size_t demangle(const char* name, char* out, size_t outSize) {
+  return my_strlcpy(out, name, outSize);
+}
+
+#endif
+
+} // folly
diff --git a/faux-folly/folly/Demangle.h b/faux-folly/folly/Demangle.h
new file mode 100644
index 0000000..1422e81
--- /dev/null
+++ b/faux-folly/folly/Demangle.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2015 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <folly/FBString.h>
+
+namespace folly {
+
+/**
+ * Return the demangled (prettyfied) version of a C++ type.
+ *
+ * This function tries to produce a human-readable type, but the type name will
+ * be returned unchanged in case of error or if demangling isn't supported on
+ * your system.
+ *
+ * Use for debugging -- do not rely on demangle() returning anything useful.
+ *
+ * This function may allocate memory (and therefore throw std::bad_alloc).
+ */
+fbstring demangle(const char* name);
+inline fbstring demangle(const std::type_info& type) {
+  return demangle(type.name());
+}
+
+/**
+ * Return the demangled (prettyfied) version of a C++ type in a user-provided
+ * buffer.
+ *
+ * The semantics are the same as for snprintf or strlcpy: bufSize is the size
+ * of the buffer, the string is always null-terminated, and the return value is
+ * the number of characters (not including the null terminator) that would have
+ * been written if the buffer was big enough. (So a return value >= bufSize
+ * indicates that the output was truncated)
+ *
+ * This function does not allocate memory and is async-signal-safe.
+ *
+ * Note that the underlying function for the fbstring-returning demangle is
+ * somewhat standard (abi::__cxa_demangle, which uses malloc), the underlying
+ * function for this version is less so (cplus_demangle_v3_callback from
+ * libiberty), so it is possible for the fbstring version to work, while this
+ * version returns the original, mangled name.
+ */
+size_t demangle(const char* name, char* buf, size_t bufSize);
+inline size_t demangle(const std::type_info& type, char* buf, size_t bufSize) {
+  return demangle(type.name(), buf, bufSize);
+}
+
+}
diff --git a/faux-folly/folly/DynamicConverter.h b/faux-folly/folly/DynamicConverter.h
new file mode 100644
index 0000000..4c8b910
--- /dev/null
+++ b/faux-folly/folly/DynamicConverter.h
@@ -0,0 +1,346 @@
+/*
+ * Copyright 2015 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// @author Nicholas Ormrod <njormrod@fb.com>
+
+#ifndef DYNAMIC_CONVERTER_H
+#define DYNAMIC_CONVERTER_H
+
+#include <folly/dynamic.h>
+namespace folly {
+  template <typename T> T convertTo(const dynamic&);
+  template <typename T> dynamic toDynamic(const T&);
+}
+
+/**
+ * convertTo returns a well-typed representation of the input dynamic.
+ *
+ * Example:
+ *
+ *   dynamic d = { { 1, 2, 3 }, { 4, 5 } }; // a vector of vector of int
+ *   auto vvi = convertTo<fbvector<fbvector<int>>>(d);
+ *
+ * See docs/DynamicConverter.md for supported types and customization
+ */
+
+
+#include <type_traits>
+#include <iterator>
+#include <boost/iterator/iterator_adaptor.hpp>
+#include <boost/mpl/has_xxx.hpp>
+#include <folly/Likely.h>
+
+namespace folly {
+
+///////////////////////////////////////////////////////////////////////////////
+// traits
+
+namespace dynamicconverter_detail {
+
+BOOST_MPL_HAS_XXX_TRAIT_DEF(value_type);
+BOOST_MPL_HAS_XXX_TRAIT_DEF(iterator);
+BOOST_MPL_HAS_XXX_TRAIT_DEF(mapped_type);
+
+template <typename T> struct iterator_class_is_container {
+  typedef std::reverse_iterator<typename T::iterator> some_iterator;
+  enum { value = has_value_type<T>::value &&
+              std::is_constructible<T, some_iterator, some_iterator>::value };
+};
+
+template <typename T>
+using class_is_container = typename
+  std::conditional<
+    has_iterator<T>::value,
+    iterator_class_is_container<T>,
+    std::false_type
+  >::type;
+
+template <typename T> struct class_is_range {
+  enum { value = has_value_type<T>::value &&
+                 has_iterator<T>::value };
+};
+
+
+template <typename T> struct is_container
+  : std::conditional<
+      std::is_class<T>::value,
+      class_is_container<T>,
+      std::false_type
+    >::type {};
+
+template <typename T> struct is_range
+  : std::conditional<
+      std::is_class<T>::value,
+      class_is_range<T>,
+      std::false_type
+    >::type {};
+
+template <typename T> struct is_map
+  : std::integral_constant<
+      bool,
+      is_range<T>::value && has_mapped_type<T>::value
+    > {};
+
+} // namespace dynamicconverter_detail
+
+///////////////////////////////////////////////////////////////////////////////
+// custom iterators
+
+/**
+ * We have iterators that dereference to dynamics, but need iterators
+ * that dereference to typename T.
+ *
+ * Implementation details:
+ *   1. We cache the value of the dereference operator. This is necessary
+ *      because boost::iterator_adaptor requires *it to return a
+ *      reference.
+ *   2. For const reasons, we cannot call operator= to refresh the
+ *      cache: we must call the destructor then placement new.
+ */
+
+namespace dynamicconverter_detail {
+
+template<typename T>
+struct Dereferencer {
+  static inline void
+  derefToCache(T* mem, const dynamic::const_item_iterator& it) {
+    throw TypeError("array", dynamic::Type::OBJECT);
+  }
+
+  static inline void derefToCache(T* mem, const dynamic::const_iterator& it) {
+    new (mem) T(convertTo<T>(*it));
+  }
+};
+
+template<typename F, typename S>
+struct Dereferencer<std::pair<F, S>> {
+  static inline void
+  derefToCache(std::pair<F, S>* mem, const dynamic::const_item_iterator& it) {
+    new (mem) std::pair<F, S>(
+        convertTo<F>(it->first), convertTo<S>(it->second)
+    );
+  }
+
+  // Intentional duplication of the code in Dereferencer
+  template <typename T>
+  static inline void derefToCache(T* mem, const dynamic::const_iterator& it) {
+    new (mem) T(convertTo<T>(*it));
+  }
+};
+
+template <typename T, typename It>
+class Transformer : public boost::iterator_adaptor<
+                             Transformer<T, It>,
+                             It,
+                             typename T::value_type
+                           > {
+  friend class boost::iterator_core_access;
+
+  typedef typename T::value_type ttype;
+
+  mutable ttype cache_;
+  mutable bool valid_;
+
+  void increment() {
+    ++this->base_reference();
+    valid_ = false;
+  }
+
+  ttype& dereference() const {
+    if (LIKELY(!valid_)) {
+      cache_.~ttype();
+      Dereferencer<ttype>::derefToCache(&cache_, this->base_reference());
+      valid_ = true;
+    }
+    return cache_;
+  }
+
+public:
+  explicit Transformer(const It& it)
+    : Transformer::iterator_adaptor_(it), valid_(false) {}
+};
+
+// conversion factory
+template <typename T, typename It>
+inline std::move_iterator<Transformer<T, It>>
+conversionIterator(const It& it) {
+  return std::make_move_iterator(Transformer<T, It>(it));
+}
+
+} // namespace dynamicconverter_detail
+
+///////////////////////////////////////////////////////////////////////////////
+// DynamicConverter specializations
+
+/**
+ * Each specialization of DynamicConverter has the function
+ *     'static T convert(const dynamic&);'
+ */
+
+// default - intentionally unimplemented
+template <typename T, typename Enable = void> struct DynamicConverter;
+
+// boolean
+template <>
+struct DynamicConverter<bool> {
+  static bool convert(const dynamic& d) {
+    return d.asBool();
+  }
+};
+
+// integrals
+template <typename T>
+struct DynamicConverter<T,
+    typename std::enable_if<std::is_integral<T>::value &&
+                            !std::is_same<T, bool>::value>::type> {
+  static T convert(const dynamic& d) {
+    return folly::to<T>(d.asInt());
+  }
+};
+
+// floating point
+template <typename T>
+struct DynamicConverter<T,
+    typename std::enable_if<std::is_floating_point<T>::value>::type> {
+  static T convert(const dynamic& d) {
+    return folly::to<T>(d.asDouble());
+  }
+};
+
+// fbstring
+template <>
+struct DynamicConverter<folly::fbstring> {
+  static folly::fbstring convert(const dynamic& d) {
+    return d.asString();
+  }
+};
+
+// std::string
+template <>
+struct DynamicConverter<std::string> {
+  static std::string convert(const dynamic& d) {
+    return d.asString().toStdString();
+  }
+};
+
+// std::pair
+template <typename F, typename S>
+struct DynamicConverter<std::pair<F,S>> {
+  static std::pair<F, S> convert(const dynamic& d) {
+    if (d.isArray() && d.size() == 2) {
+      return std::make_pair(convertTo<F>(d[0]), convertTo<S>(d[1]));
+    } else if (d.isObject() && d.size() == 1) {
+      auto it = d.items().begin();
+      return std::make_pair(convertTo<F>(it->first), convertTo<S>(it->second));
+    } else {
+      throw TypeError("array (size 2) or object (size 1)", d.type());
+    }
+  }
+};
+
+// containers
+template <typename C>
+struct DynamicConverter<C,
+    typename std::enable_if<
+      dynamicconverter_detail::is_container<C>::value>::type> {
+  static C convert(const dynamic& d) {
+    if (d.isArray()) {
+      return C(dynamicconverter_detail::conversionIterator<C>(d.begin()),
+               dynamicconverter_detail::conversionIterator<C>(d.end()));
+    } else if (d.isObject()) {
+      return C(dynamicconverter_detail::conversionIterator<C>
+                 (d.items().begin()),
+               dynamicconverter_detail::conversionIterator<C>
+                 (d.items().end()));
+    } else {
+      throw TypeError("object or array", d.type());
+    }
+  }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// DynamicConstructor specializations
+
+/**
+ * Each specialization of DynamicConstructor has the function
+ *     'static dynamic construct(const C&);'
+ */
+
+// default
+template <typename C, typename Enable = void>
+struct DynamicConstructor {
+  static dynamic construct(const C& x) {
+    return dynamic(x);
+  }
+};
+
+// maps
+template<typename C>
+struct DynamicConstructor<C,
+    typename std::enable_if<
+      dynamicconverter_detail::is_map<C>::value>::type> {
+  static dynamic construct(const C& x) {
+    dynamic d = dynamic::object;
+    for (auto& pair : x) {
+      d.insert(toDynamic(pair.first), toDynamic(pair.second));
+    }
+    return d;
+  }
+};
+
+// other ranges
+template<typename C>
+struct DynamicConstructor<C,
+    typename std::enable_if<
+      !dynamicconverter_detail::is_map<C>::value &&
+      !std::is_constructible<StringPiece, const C&>::value &&
+      dynamicconverter_detail::is_range<C>::value>::type> {
+  static dynamic construct(const C& x) {
+    dynamic d = {};
+    for (auto& item : x) {
+      d.push_back(toDynamic(item));
+    }
+    return d;
+  }
+};
+
+// pair
+template<typename A, typename B>
+struct DynamicConstructor<std::pair<A, B>, void> {
+  static dynamic construct(const std::pair<A, B>& x) {
+    dynamic d = {};
+    d.push_back(toDynamic(x.first));
+    d.push_back(toDynamic(x.second));
+    return d;
+  }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// implementation
+
+template <typename T>
+T convertTo(const dynamic& d) {
+  return DynamicConverter<typename std::remove_cv<T>::type>::convert(d);
+}
+
+template<typename T>
+dynamic toDynamic(const T& x) {
+  return DynamicConstructor<typename std::remove_cv<T>::type>::construct(x);
+}
+
+} // namespace folly
+
+#endif // DYNAMIC_CONVERTER_H
diff --git a/faux-folly/folly/Exception.h b/faux-folly/folly/Exception.h
new file mode 100644
index 0000000..28188c6
--- /dev/null
+++ b/faux-folly/folly/Exception.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2015 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FOLLY_EXCEPTION_H_
+#define FOLLY_EXCEPTION_H_
+
+#include <errno.h>
+
+#include <cstdio>
+#include <stdexcept>
+#include <system_error>
+
+#include <folly/Conv.h>
+#include <folly/FBString.h>
+#include <folly/Likely.h>
+#include <folly/Portability.h>
+
+namespace folly {
+
+// Various helpers to throw appropriate std::system_error exceptions from C
+// library errors (returned in errno, as positive return values (many POSIX
+// functions), or as negative return values (Linux syscalls))
+//
+// The *Explicit functions take an explicit value for errno.
+
+// Helper to throw std::system_error
+FOLLY_NORETURN void throwSystemErrorExplicit(int err, const char*);
+inline void throwSystemErrorExplicit(int err, const char* msg) {
+  throw std::system_error(err, std::system_category(), msg);
+}
+
+template <class... Args>
+FOLLY_NORETURN void throwSystemErrorExplicit(int, Args&&... args);
+template <class... Args>
+void throwSystemErrorExplicit(int err, Args&&... args) {
+  throwSystemErrorExplicit(
+      err, to<fbstring>(std::forward<Args>(args)...).c_str());
+}
+
+// Helper to throw std::system_error from errno and components of a string
+template <class... Args>
+FOLLY_NORETURN void throwSystemError(Args&&... args);
+template <class... Args>
+void throwSystemError(Args&&... args) {
+  throwSystemErrorExplicit(errno, std::forward<Args>(args)...);
+}
+
+// Check a Posix return code (0 on success, error number on error), throw
+// on error.
+template <class... Args>
+void checkPosixError(int err, Args&&... args) {
+  if (UNLIKELY(err != 0)) {
+    throwSystemErrorExplicit(err, std::forward<Args>(args)...);
+  }
+}
+
+// Check a Linux kernel-style return code (>= 0 on success, negative error
+// number on error), throw on error.
+template <class... Args>
+void checkKernelError(ssize_t ret, Args&&... args) {
+  if (UNLIKELY(ret < 0)) {
+    throwSystemErrorExplicit(-ret, std::forward<Args>(args)...);
+  }
+}
+
+// Check a traditional Unix return code (-1 and sets errno on error), throw
+// on error.
+template <class... Args>
+void checkUnixError(ssize_t ret, Args&&... args) {
+  if (UNLIKELY(ret == -1)) {
+    throwSystemError(std::forward<Args>(args)...);
+  }
+}
+
+template <class... Args>
+void checkUnixErrorExplicit(ssize_t ret, int savedErrno, Args&&... args) {
+  if (UNLIKELY(ret == -1)) {
+    throwSystemErrorExplicit(savedErrno, std::forward<Args>(args)...);
+  }
+}
+
+// Check the return code from a fopen-style function (returns a non-nullptr
+// FILE* on success, nullptr on error, sets errno).  Works with fopen, fdopen,
+// freopen, tmpfile, etc.
+template <class... Args>
+void checkFopenError(FILE* fp, Args&&... args) {
+  if (UNLIKELY(!fp)) {
+    throwSystemError(std::forward<Args>(args)...);
+  }
+}
+
+template <class... Args>
+void checkFopenErrorExplicit(FILE* fp, int savedErrno, Args&&... args) {
+  if (UNLIKELY(!fp)) {
+    throwSystemErrorExplicit(savedErrno, std::forward<Args>(args)...);
+  }
+}
+
+template <typename E, typename V, typename... Args>
+void throwOnFail(V&& value, Args&&... args) {
+  if (!value) {
+    throw E(std::forward<Args>(args)...);
+  }
+}
+
+/**
+ * If cond is not true, raise an exception of type E.  E must have a ctor that
+ * works with const char* (a description of the failure).
+ */
+#define CHECK_THROW(cond, E) \
+  ::folly::throwOnFail<E>((cond), "Check failed: " #cond)
+
+}  // namespace folly
+
+#endif /* FOLLY_EXCEPTION_H_ */
diff --git a/faux-folly/folly/ExceptionWrapper.h b/faux-folly/folly/ExceptionWrapper.h
new file mode 100644
index 0000000..98ff9cb
--- /dev/null
+++ b/faux-folly/folly/ExceptionWrapper.h
@@ -0,0 +1,448 @@
+/*
+ * Copyright 2015 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FOLLY_EXCEPTIONWRAPPER_H
+#define FOLLY_EXCEPTIONWRAPPER_H
+
+#include <cassert>
+#include <exception>
+#include <memory>
+#include <folly/String.h>
+#include <folly/detail/ExceptionWrapper.h>
+
+namespace folly {
+
+/*
+ * Throwing exceptions can be a convenient way to handle errors. Storing
+ * exceptions in an exception_ptr makes it easy to handle exceptions in a
+ * different thread or at a later time. exception_ptr can also be used in a very
+ * generic result/exception wrapper.
+ *
+ * However, there are some issues with throwing exceptions and
+ * std::exception_ptr. These issues revolve around throw being expensive,
+ * particularly in a multithreaded environment (see
+ * ExceptionWrapperBenchmark.cpp).
+ *
+ * Imagine we have a library that has an API which returns a result/exception
+ * wrapper. Let's consider some approaches for implementing this wrapper.
+ * First, we could store a std::exception. This approach loses the derived
+ * exception type, which can make exception handling more difficult for users
+ * that prefer rethrowing the exception. We could use a folly::dynamic for every
+ * possible type of exception. This is not very flexible - adding new types of
+ * exceptions requires a change to the result/exception wrapper. We could use an
+ * exception_ptr. However, constructing an exception_ptr as well as accessing
+ * the error requires a call to throw. That means that there will be two calls
+ * to throw in order to process the exception. For performance sensitive
+ * applications, this may be unacceptable.
+ *
+ * exception_wrapper is designed to handle exception management for both
+ * convenience and high performance use cases. make_exception_wrapper is
+ * templated on derived type, allowing us to rethrow the exception properly for
+ * users that prefer convenience. These explicitly named exception types can
+ * therefore be handled without any peformance penalty.  exception_wrapper is
+ * also flexible enough to accept any type. If a caught exception is not of an
+ * explicitly named type, then std::exception_ptr is used to preserve the
+ * exception state. For performance sensitive applications, the accessor methods
+ * can test or extract a pointer to a specific exception type with very little
+ * overhead.
+ *
+ * Example usage:
+ *
+ * exception_wrapper globalExceptionWrapper;
+ *
+ * // Thread1
+ * void doSomethingCrazy() {
+ *   int rc = doSomethingCrazyWithLameReturnCodes();
+ *   if (rc == NAILED_IT) {
+ *     globalExceptionWrapper = exception_wrapper();
+ *   } else if (rc == FACE_PLANT) {
+ *     globalExceptionWrapper = make_exception_wrapper<FacePlantException>();
+ *   } else if (rc == FAIL_WHALE) {
+ *     globalExceptionWrapper = make_exception_wrapper<FailWhaleException>();
+ *   }
+ * }
+ *
+ * // Thread2: Exceptions are ok!
+ * void processResult() {
+ *   try {
+ *     globalExceptionWrapper.throwException();
+ *   } catch (const FacePlantException& e) {
+ *     LOG(ERROR) << "FACEPLANT!";
+ *   } catch (const FailWhaleException& e) {
+ *     LOG(ERROR) << "FAILWHALE!";
+ *   }
+ * }
+ *
+ * // Thread2: Exceptions are bad!
+ * void processResult() {
+ *   auto ep = globalExceptionWrapper.get();
+ *   if (!ep.with_exception<FacePlantException>([&](
+ *     FacePlantException& faceplant) {
+ *       LOG(ERROR) << "FACEPLANT";
+ *     })) {
+ *     ep.with_exception<FailWhaleException>([&](
+ *       FailWhaleException& failwhale) {
+ *         LOG(ERROR) << "FAILWHALE!";
+ *       });
+ *   }
+ * }
+ *
+ */
+class exception_wrapper {
+ protected:
+  template <typename Ex>
+  struct optimize;
+
+ public:
+  exception_wrapper() = default;
+
+  // Implicitly construct an exception_wrapper from a qualifying exception.
+  // See the optimize struct for details.
+  template <typename Ex, typename =
+    typename std::enable_if<optimize<typename std::decay<Ex>::type>::value>
+    ::type>
+  /* implicit */ exception_wrapper(Ex&& exn) {
+    typedef typename std::decay<Ex>::type DEx;
+    item_ = std::make_shared<DEx>(std::forward<Ex>(exn));
+    throwfn_ = folly::detail::Thrower<DEx>::doThrow;
+  }
+
+  // The following two constructors are meant to emulate the behavior of
+  // try_and_catch in performance sensitive code as well as to be flexible
+  // enough to wrap exceptions of unknown type. There is an overload that
+  // takes an exception reference so that the wrapper can extract and store
+  // the exception's type and what() when possible.
+  //
+  // The canonical use case is to construct an all-catching exception wrapper
+  // with minimal overhead like so:
+  //
+  //   try {
+  //     // some throwing code
+  //   } catch (const std::exception& e) {
+  //     // won't lose e's type and what()
+  //     exception_wrapper ew{std::current_exception(), e};
+  //   } catch (...) {
+  //     // everything else
+  //     exception_wrapper ew{std::current_exception()};
+  //   }
+  //
+  // try_and_catch is cleaner and preferable. Use it unless you're sure you need
+  // something like this instead.
+  template <typename Ex>
+  explicit exception_wrapper(std::exception_ptr eptr, Ex& exn) {
+    assign_eptr(eptr, exn);
+  }
+
+  explicit exception_wrapper(std::exception_ptr eptr) {
+    assign_eptr(eptr);
+  }
+
+  void throwException() const {
+    if (throwfn_) {
+      throwfn_(item_.get());
+    } else if (eptr_) {
+      std::rethrow_exception(eptr_);
+    }
+  }
+
+  explicit operator bool() const {
+    return item_ || eptr_;
+  }
+
+  // This implementation is similar to std::exception_ptr's implementation
+  // where two exception_wrappers are equal when the address in the underlying
+  // reference field both point to the same exception object.  The reference
+  // field remains the same when the exception_wrapper is copied or when
+  // the exception_wrapper is "rethrown".
+  bool operator==(const exception_wrapper& a) const {
+    if (item_) {
+      return a.item_ && item_.get() == a.item_.get();
+    } else {
+      return eptr_ == a.eptr_;
+    }
+  }
+
+  bool operator!=(const exception_wrapper& a) const {
+    return !(*this == a);
+  }
+
+  // This will return a non-nullptr only if the exception is held as a
+  // copy.  It is the only interface which will distinguish between an
+  // exception held this way, and by exception_ptr.  You probably
+  // shouldn't use it at all.
+  std::exception* getCopied() { return item_.get(); }
+  const std::exception* getCopied() const { return item_.get(); }
+
+  fbstring what() const {
+    if (item_) {
+      return exceptionStr(*item_);
+    } else if (eptr_) {
+      return estr_;
+    } else {
+      return fbstring();
+    }
+  }
+
+  fbstring class_name() const {
+    if (item_) {
+      auto& i = *item_;
+      return demangle(typeid(i));
+    } else if (eptr_) {
+      return ename_;
+    } else {
+      return fbstring();
+    }
+  }
+
+  template <class Ex>
+  bool is_compatible_with() const {
+    if (item_) {
+      return dynamic_cast<const Ex*>(item_.get());
+    } else if (eptr_) {
+      try {
+        std::rethrow_exception(eptr_);
+      } catch (std::exception& e) {
+        return dynamic_cast<const Ex*>(&e);
+      } catch (...) {
+        // fall through
+      }
+    }
+    return false;
+  }
+
+  // If this exception wrapper wraps an exception of type Ex, with_exception
+  // will call f with the wrapped exception as an argument and return true, and
+  // will otherwise return false.
+  template <class Ex, class F>
+  typename std::enable_if<
+    std::is_base_of<std::exception, typename std::decay<Ex>::type>::value,
+    bool>::type
+  with_exception(F f) {
+    return with_exception1<typename std::decay<Ex>::type>(f, this);
+  }
+
+  // Const overload
+  template <class Ex, class F>
+  typename std::enable_if<
+    std::is_base_of<std::exception, typename std::decay<Ex>::type>::value,
+    bool>::type
+  with_exception(F f) const {
+    return with_exception1<const typename std::decay<Ex>::type>(f, this);
+  }
+
+  // Overload for non-exceptions. Always rethrows.
+  template <class Ex, class F>
+  typename std::enable_if<
+    !std::is_base_of<std::exception, typename std::decay<Ex>::type>::value,
+    bool>::type
+  with_exception(F f) const {
+    try {
+      throwException();
+    } catch (typename std::decay<Ex>::type& e) {
+      f(e);
+      return true;
+    } catch (...) {
+      // fall through
+    }
+    return false;
+  }
+
+  std::exception_ptr getExceptionPtr() const {
+    if (eptr_) {
+      return eptr_;
+    }
+
+    try {
+      throwException();
+    } catch (...) {
+      return std::current_exception();
+    }
+    return std::exception_ptr();
+  }
+
+protected:
+  template <typename Ex>
+  struct optimize {
+    static const bool value =
+      std::is_base_of<std::exception, Ex>::value &&
+      std::is_copy_assignable<Ex>::value &&
+      !std::is_abstract<Ex>::value;
+  };
+
+  template <typename Ex>
+  void assign_eptr(std::exception_ptr eptr, Ex& e) {
+    this->eptr_ = eptr;
+    this->estr_ = exceptionStr(e).toStdString();
+    this->ename_ = demangle(typeid(e)).toStdString();
+  }
+
+  void assign_eptr(std::exception_ptr eptr) {
+    this->eptr_ = eptr;
+  }
+
+  // Optimized case: if we know what type the exception is, we can
+  // store a copy of the concrete type, and a helper function so we
+  // can rethrow it.
+  std::shared_ptr<std::exception> item_;
+  void (*throwfn_)(std::exception*){nullptr};
+  // Fallback case: store the library wrapper, which is less efficient
+  // but gets the job done.  Also store exceptionPtr() the name of the
+  // exception type, so we can at least get those back out without
+  // having to rethrow.
+  std::exception_ptr eptr_;
+  std::string estr_;
+  std::string ename_;
+
+  template <class T, class... Args>
+  friend exception_wrapper make_exception_wrapper(Args&&... args);
+
+private:
+  // What makes this useful is that T can be exception_wrapper* or
+  // const exception_wrapper*, and the compiler will use the
+  // instantiation which works with F.
+  template <class Ex, class F, class T>
+  static bool with_exception1(F f, T* that) {
+    if (that->item_) {
+      if (auto ex = dynamic_cast<Ex*>(that->item_.get())) {
+        f(*ex);
+        return true;
+      }
+    } else if (that->eptr_) {
+      try {
+        std::rethrow_exception(that->eptr_);
+      } catch (std::exception& e) {
+        if (auto ex = dynamic_cast<Ex*>(&e)) {
+          f(*ex);
+          return true;
+        }
+      } catch (...) {
+        // fall through
+      }
+    }
+    return false;
+  }
+};
+
+template <class T, class... Args>
+exception_wrapper make_exception_wrapper(Args&&... args) {
+  exception_wrapper ew;
+  ew.item_ = std::make_shared<T>(std::forward<Args>(args)...);
+  ew.throwfn_ = folly::detail::Thrower<T>::doThrow;
+  return ew;
+}
+
+// For consistency with exceptionStr() functions in String.h
+inline fbstring exceptionStr(const exception_wrapper& ew) {
+  return ew.what();
+}
+
+/*
+ * try_and_catch is a simple replacement for try {} catch(){} that allows you to
+ * specify which derived exceptions you would like to catch and store in an
+ * exception_wrapper.
+ *
+ * Because we cannot build an equivalent of std::current_exception(), we need
+ * to catch every derived exception that we are interested in catching.
+ *
+ * Exceptions should be listed in the reverse order that you would write your
+ * catch statements (that is, std::exception& should be first).
+ *
+ * NOTE: Although implemented as a derived class (for syntactic delight), don't
+ * be confused - you should not pass around try_and_catch objects!
+ *
+ * Example Usage:
+ *
+ * // This catches my runtime_error and if I call throwException() on ew, it
+ * // will throw a runtime_error
+ * auto ew = folly::try_and_catch<std::exception, std::runtime_error>([=]() {
+ *   if (badThingHappens()) {
+ *     throw std::runtime_error("ZOMG!");
+ *   }
+ * });
+ *
+ * // This will catch the exception and if I call throwException() on ew, it
+ * // will throw a std::exception
+ * auto ew = folly::try_and_catch<std::exception, std::runtime_error>([=]() {
+ *   if (badThingHappens()) {
+ *     throw std::exception();
+ *   }
+ * });
+ *
+ * // This will not catch the exception and it will be thrown.
+ * auto ew = folly::try_and_catch<std::runtime_error>([=]() {
+ *   if (badThingHappens()) {
+ *     throw std::exception();
+ *   }
+ * });
+ */
+
+template <typename... Exceptions>
+class try_and_catch;
+
+template <typename LastException, typename... Exceptions>
+class try_and_catch<LastException, Exceptions...> :
+    public try_and_catch<Exceptions...> {
+ public:
+  template <typename F>
+  explicit try_and_catch(F&& fn) : Base() {
+    call_fn(fn);
+  }
+
+ protected:
+  typedef try_and_catch<Exceptions...> Base;
+
+  try_and_catch() : Base() {}
+
+  template <typename Ex>
+  typename std::enable_if<!exception_wrapper::optimize<Ex>::value>::type
+  assign_exception(Ex& e, std::exception_ptr eptr) {
+    exception_wrapper::assign_eptr(eptr, e);
+  }
+
+  template <typename Ex>
+  typename std::enable_if<exception_wrapper::optimize<Ex>::value>::type
+  assign_exception(Ex& e, std::exception_ptr /*eptr*/) {
+    this->item_ = std::make_shared<Ex>(e);
+    this->throwfn_ = folly::detail::Thrower<Ex>::doThrow;
+  }
+
+  template <typename F>
+  void call_fn(F&& fn) {
+    try {
+      Base::call_fn(std::move(fn));
+    } catch (LastException& e) {
+      if (typeid(e) == typeid(LastException&)) {
+        assign_exception(e, std::current_exception());
+      } else {
+        exception_wrapper::assign_eptr(std::current_exception(), e);
+      }
+    }
+  }
+};
+
+template<>
+class try_and_catch<> : public exception_wrapper {
+ public:
+  try_and_catch() = default;
+
+ protected:
+  template <typename F>
+  void call_fn(F&& fn) {
+    fn();
+  }
+};
+}
+#endif
diff --git a/faux-folly/folly/Executor.h b/faux-folly/folly/Executor.h
new file mode 100644
index 0000000..168b659
--- /dev/null
+++ b/faux-folly/folly/Executor.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2015 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <atomic>
+#include <climits>
+#include <functional>
+#include <stdexcept>
+
+namespace folly {
+
+typedef std::function<void()> Func;
+
+/// An Executor accepts units of work with add(), which should be
+/// threadsafe.
+class Executor {
+ public:
+  virtual ~Executor() = default;
+
+  /// Enqueue a function to executed by this executor. This and all
+  /// variants must be threadsafe.
+  virtual void add(Func) = 0;
+
+  /// Enqueue a function with a given priority, where 0 is the medium priority
+  /// This is up to the implementation to enforce
+  virtual void addWithPriority(Func, int8_t /*priority*/) {
+    throw std::runtime_error(
+        "addWithPriority() is not implemented for this Executor");
+  }
+
+  virtual uint8_t getNumPriorities() const {
+    return 1;
+  }
+
+  static const int8_t LO_PRI  = SCHAR_MIN;
+  static const int8_t MID_PRI = 0;
+  static const int8_t HI_PRI  = SCHAR_MAX;
+
+  /// A convenience function for shared_ptr to legacy functors.
+  ///
+  /// Sometimes you have a functor that is move-only, and therefore can't be
+  /// converted to a std::function (e.g. std::packaged_task). In that case,
+  /// wrap it in a shared_ptr (or maybe folly::MoveWrapper) and use this.
+  template <class P>
+  void addPtr(P fn) {
+    this->add([fn]() mutable { (*fn)(); });
+  }
+};
+
+} // folly
diff --git a/faux-folly/folly/ExecutorWithCancellation.h b/faux-folly/folly/ExecutorWithCancellation.h
new file mode 100644
index 0000000..5ffd5ca
--- /dev/null
+++ b/faux-folly/folly/ExecutorWithCancellation.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2015 Nest Labs, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <folly/Executor.h>
+#include <folly/Cancellation.h>
+
+namespace folly {
+
+/**
+ * An Executor that has a single Cancellation associated with it
+ *
+ * This is a convenience interface for situations where you want an
+ * event loop Executor that has a single Cancellation object
+ * associated with it. When shutting down the Executor, the
+ * Cancellation object can be used to cancel all pending transactions.
+ */
+class ExecutorWithCancellation : virtual public Executor {
+public:
+    /**
+     * Replaces the Cancellation object, cancelling the current one.
+     *
+     * This should be called ONCE, before ever calling
+     * getCancellation(). Before setting the new Cancellation, the
+     * existing cancellation will be cancelled (thus cancelling all
+     * in-flight transactions that referred to that Cancellation
+     * object).
+     *
+     * @param cx the new Cancellation object for the Executor.
+     */
+    virtual void setCancellation(Cancellation cx) = 0;
+
+    /**
+     * Access the Cancellation object
+     *
+     * This allows the caller to access or create a copy of the
+     * Cancellation object.
+     *
+     * @return the Cancellation object that is used to cancel all
+     * transactions for this Executor.
+     */
+    virtual Cancellation& getCancellation() = 0;
+};
+
+} // folly
diff --git a/faux-folly/folly/FBString.h b/faux-folly/folly/FBString.h
new file mode 100644
index 0000000..4afce74
--- /dev/null
+++ b/faux-folly/folly/FBString.h
@@ -0,0 +1,2533 @@
+/*
+ * Copyright 2015 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// @author: Andrei Alexandrescu (aalexandre)
+// String type.
+
+#ifndef FOLLY_BASE_FBSTRING_H_
+#define FOLLY_BASE_FBSTRING_H_
+
+#include <atomic>
+#include <limits>
+#include <type_traits>
+
+// This file appears in two locations: inside fbcode and in the
+// libstdc++ source code (when embedding fbstring as std::string).
+// To aid in this schizophrenic use, _LIBSTDCXX_FBSTRING is defined in
+// libstdc++'s c++config.h, to gate use inside fbcode v. libstdc++.
+#ifdef _LIBSTDCXX_FBSTRING
+
+#pragma GCC system_header
+
+// Handle the cases where the fbcode version (folly/Malloc.h) is included
+// either before or after this inclusion.
+#ifdef FOLLY_MALLOC_H_
+#undef FOLLY_MALLOC_H_
+#include "basic_fbstring_malloc.h" // nolint
+#else
+#include "basic_fbstring_malloc.h" // nolint
+#undef FOLLY_MALLOC_H_
+#endif
+
+#else // !_LIBSTDCXX_FBSTRING
+
+#include <folly/Portability.h>
+
+// libc++ doesn't provide this header, nor does msvc
+#ifdef FOLLY_HAVE_BITS_CXXCONFIG_H
+#include <bits/c++config.h>
+#endif
+
+#include <string>
+#include <cstring>
+#include <cassert>
+#include <algorithm>
+
+#include <folly/Traits.h>
+#include <folly/Malloc.h>
+#include <folly/Hash.h>
+#include <folly/ScopeGuard.h>
+
+#if FOLLY_HAVE_DEPRECATED_ASSOC
+#ifdef _GLIBCXX_SYMVER
+#include <ext/hash_set>
+#include <ext/hash_map>
+#endif
+#endif
+
+#endif
+
+// We defined these here rather than including Likely.h to avoid
+// redefinition errors when fbstring is imported into libstdc++.
+#if defined(__GNUC__) && __GNUC__ >= 4
+#define FBSTRING_LIKELY(x)   (__builtin_expect((x), 1))
+#define FBSTRING_UNLIKELY(x) (__builtin_expect((x), 0))
+#else
+#define FBSTRING_LIKELY(x)   (x)
+#define FBSTRING_UNLIKELY(x) (x)
+#endif
+
+// Ignore shadowing warnings within this file, so includers can use -Wshadow.
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
+
+// FBString cannot use throw when replacing std::string, though it may still
+// use std::__throw_*
+// nolint
+#define throw FOLLY_FBSTRING_MAY_NOT_USE_THROW
+
+#ifdef _LIBSTDCXX_FBSTRING
+namespace std _GLIBCXX_VISIBILITY(default) {
+_GLIBCXX_BEGIN_NAMESPACE_VERSION
+#else
+namespace folly {
+#endif
+
+// Different versions of gcc/clang support different versions of
+// the address sanitizer attribute.  Unfortunately, this attribute
+// has issues when inlining is used, so disable that as well.
+#if defined(__clang__)
+# if __has_feature(address_sanitizer)
+#  if __has_attribute(__no_address_safety_analysis__)
+#   define FBSTRING_DISABLE_ADDRESS_SANITIZER \
+      __attribute__((__no_address_safety_analysis__, __noinline__))
+#  elif __has_attribute(__no_sanitize_address__)
+#   define FBSTRING_DISABLE_ADDRESS_SANITIZER \
+      __attribute__((__no_sanitize_address__, __noinline__))
+#  endif
+# endif
+#elif defined (__GNUC__) && \
+      (__GNUC__ == 4) && \
+      (__GNUC_MINOR__ >= 8) && \
+      __SANITIZE_ADDRESS__
+# define FBSTRING_DISABLE_ADDRESS_SANITIZER \
+    __attribute__((__no_address_safety_analysis__, __noinline__))
+#endif
+#ifndef FBSTRING_DISABLE_ADDRESS_SANITIZER
+# define FBSTRING_DISABLE_ADDRESS_SANITIZER
+#endif
+
+namespace fbstring_detail {
+
+template <class InIt, class OutIt>
+inline
+OutIt copy_n(InIt b,
+             typename std::iterator_traits<InIt>::difference_type n,
+             OutIt d) {
+  for (; n != 0; --n, ++b, ++d) {
+    *d = *b;
+  }
+  return d;
+}
+
+template <class Pod, class T>
+inline void pod_fill(Pod* b, Pod* e, T c) {
+  assert(b && e && b <= e);
+  /*static*/ if (sizeof(T) == 1) {
+    memset(b, c, e - b);
+  } else {
+    auto const ee = b + ((e - b) & ~7u);
+    for (; b != ee; b += 8) {
+      b[0] = c;
+      b[1] = c;
+      b[2] = c;
+      b[3] = c;
+      b[4] = c;
+      b[5] = c;
+      b[6] = c;
+      b[7] = c;
+    }
+    // Leftovers
+    for (; b != e; ++b) {
+      *b = c;
+    }
+  }
+}
+
+/*
+ * Lightly structured memcpy, simplifies copying PODs and introduces
+ * some asserts. Unfortunately using this function may cause
+ * measurable overhead (presumably because it adjusts from a begin/end
+ * convention to a pointer/size convention, so it does some extra
+ * arithmetic even though the caller might have done the inverse
+ * adaptation outside).
+ */
+template <class Pod>
+inline void pod_copy(const Pod* b, const Pod* e, Pod* d) {
+  assert(e >= b);
+  assert(d >= e || d + (e - b) <= b);
+  memcpy(d, b, (e - b) * sizeof(Pod));
+}
+
+/*
+ * Lightly structured memmove, simplifies copying PODs and introduces
+ * some asserts
+ */
+template <class Pod>
+inline void pod_move(const Pod* b, const Pod* e, Pod* d) {
+  assert(e >= b);
+  memmove(d, b, (e - b) * sizeof(*b));
+}
+
+} // namespace fbstring_detail
+
+/**
+ * Defines a special acquisition method for constructing fbstring
+ * objects. AcquireMallocatedString means that the user passes a
+ * pointer to a malloc-allocated string that the fbstring object will
+ * take into custody.
+ */
+enum class AcquireMallocatedString {};
+
+/*
+ * fbstring_core_model is a mock-up type that defines all required
+ * signatures of a fbstring core. The fbstring class itself uses such
+ * a core object to implement all of the numerous member functions
+ * required by the standard.
+ *
+ * If you want to define a new core, copy the definition below and
+ * implement the primitives. Then plug the core into basic_fbstring as
+ * a template argument.
+
+template <class Char>
+class fbstring_core_model {
+public:
+  fbstring_core_model();
+  fbstring_core_model(const fbstring_core_model &);
+  ~fbstring_core_model();
+  // Returns a pointer to string's buffer (currently only contiguous
+  // strings are supported). The pointer is guaranteed to be valid
+  // until the next call to a non-const member function.
+  const Char * data() const;
+  // Much like data(), except the string is prepared to support
+  // character-level changes. This call is a signal for
+  // e.g. reference-counted implementation to fork the data. The
+  // pointer is guaranteed to be valid until the next call to a
+  // non-const member function.
+  Char * mutable_data();
+  // Returns a pointer to string's buffer and guarantees that a
+  // readable '\0' lies right after the buffer. The pointer is
+  // guaranteed to be valid until the next call to a non-const member
+  // function.
+  const Char * c_str() const;
+  // Shrinks the string by delta characters. Asserts that delta <=
+  // size().
+  void shrink(size_t delta);
+  // Expands the string by delta characters (i.e. after this call
+  // size() will report the old size() plus delta) but without
+  // initializing the expanded region. Returns a pointer to the memory
+  // to be initialized (the beginning of the expanded portion). The
+  // caller is expected to fill the expanded area appropriately.
+  Char* expand_noinit(size_t delta);
+  // Expands the string by one character and sets the last character
+  // to c.
+  void push_back(Char c);
+  // Returns the string's size.
+  size_t size() const;
+  // Returns the string's capacity, i.e. maximum size that the string
+  // can grow to without reallocation. Note that for reference counted
+  // strings that's technically a lie - even assigning characters
+  // within the existing size would cause a reallocation.
+  size_t capacity() const;
+  // Returns true if the data underlying the string is actually shared
+  // across multiple strings (in a refcounted fashion).
+  bool isShared() const;
+  // Makes sure that at least minCapacity characters are available for
+  // the string without reallocation. For reference-counted strings,
+  // it should fork the data even if minCapacity < size().
+  void reserve(size_t minCapacity);
+private:
+  // Do not implement
+  fbstring_core_model& operator=(const fbstring_core_model &);
+};
+*/
+
+/**
+ * gcc-4.7 throws what appears to be some false positive uninitialized
+ * warnings for the members of the MediumLarge struct.  So, mute them here.
+ */
+#if defined(__GNUC__) && !defined(__clang__)
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wuninitialized"
+#endif
+
+/**
+ * This is the core of the string. The code should work on 32- and
+ * 64-bit architectures and with any Char size. Porting to big endian
+ * architectures would require some changes.
+ *
+ * The storage is selected as follows (assuming we store one-byte
+ * characters on a 64-bit machine): (a) "small" strings between 0 and
+ * 23 chars are stored in-situ without allocation (the rightmost byte
+ * stores the size); (b) "medium" strings from 24 through 254 chars
+ * are stored in malloc-allocated memory that is copied eagerly; (c)
+ * "large" strings of 255 chars and above are stored in a similar
+ * structure as medium arrays, except that the string is
+ * reference-counted and copied lazily. the reference count is
+ * allocated right before the character array.
+ *
+ * The discriminator between these three strategies sits in the two
+ * most significant bits of the rightmost char of the storage. If
+ * neither is set, then the string is small (and its length sits in
+ * the lower-order bits of that rightmost character). If the MSb is
+ * set, the string is medium width. If the second MSb is set, then the
+ * string is large.
+ */
+template <class Char> class fbstring_core {
+public:
+  fbstring_core() noexcept {
+    // Only initialize the tag, will set the MSBs (i.e. the small
+    // string size) to zero too
+    ml_.capacity_ = maxSmallSize << (8 * (sizeof(size_t) - sizeof(Char)));
+    // or: setSmallSize(0);
+    writeTerminator();
+    assert(category() == Category::isSmall && size() == 0);
+  }
+
+  fbstring_core(const fbstring_core & rhs) {
+    assert(&rhs != this);
+    // Simplest case first: small strings are bitblitted
+    if (rhs.category() == Category::isSmall) {
+      static_assert(offsetof(MediumLarge, data_) == 0,
+          "fbstring layout failure");
+      static_assert(offsetof(MediumLarge, size_) == sizeof(ml_.data_),
+          "fbstring layout failure");
+      static_assert(offsetof(MediumLarge, capacity_) == 2 * sizeof(ml_.data_),
+          "fbstring layout failure");
+      const size_t size = rhs.smallSize();
+      if (size == 0) {
+        ml_.capacity_ = rhs.ml_.capacity_;
+        writeTerminator();
+      } else {
+        // Just write the whole thing, don't look at details. In
+        // particular we need to copy capacity anyway because we want
+        // to set the size (don't forget that the last character,
+        // which stores a short string's length, is shared with the
+        // ml_.capacity field).
+        ml_ = rhs.ml_;
+      }
+      assert(category() == Category::isSmall && this->size() == rhs.size());
+    } else if (rhs.category() == Category::isLarge) {
+      // Large strings are just refcounted
+      ml_ = rhs.ml_;
+      RefCounted::incrementRefs(ml_.data_);
+      assert(category() == Category::isLarge && size() == rhs.size());
+    } else {
+      // Medium strings are copied eagerly. Don't forget to allocate
+      // one extra Char for the null terminator.
+      auto const allocSize =
+           goodMallocSize((1 + rhs.ml_.size_) * sizeof(Char));
+      ml_.data_ = static_cast<Char*>(checkedMalloc(allocSize));
+      fbstring_detail::pod_copy(rhs.ml_.data_,
+                                // 1 for terminator
+                                rhs.ml_.data_ + rhs.ml_.size_ + 1,
+                                ml_.data_);
+      // No need for writeTerminator() here, we copied one extra
+      // element just above.
+      ml_.size_ = rhs.ml_.size_;
+      ml_.capacity_ = (allocSize / sizeof(Char) - 1)
+                      | static_cast<category_type>(Category::isMedium);
+      assert(category() == Category::isMedium);
+    }
+    assert(size() == rhs.size());
+    assert(memcmp(data(), rhs.data(), size() * sizeof(Char)) == 0);
+  }
+
+  fbstring_core(fbstring_core&& goner) noexcept {
+    if (goner.category() == Category::isSmall) {
+      // Just copy, leave the goner in peace
+      new(this) fbstring_core(goner.small_, goner.smallSize());
+    } else {
+      // Take goner's guts
+      ml_ = goner.ml_;
+      // Clean goner's carcass
+      goner.setSmallSize(0);
+    }
+  }
+
+  // NOTE(agallagher): The word-aligned copy path copies bytes which are
+  // outside the range of the string, and makes address sanitizer unhappy,
+  // so just disable it on this function.
+  fbstring_core(const Char *const data, const size_t size)
+      FBSTRING_DISABLE_ADDRESS_SANITIZER {
+#ifndef NDEBUG
+#ifndef _LIBSTDCXX_FBSTRING
+    SCOPE_EXIT {
+      assert(this->size() == size);
+      assert(memcmp(this->data(), data, size * sizeof(Char)) == 0);
+    };
+#endif
+#endif
+
+    // Simplest case first: small strings are bitblitted
+    if (size <= maxSmallSize) {
+      // Layout is: Char* data_, size_t size_, size_t capacity_
+      static_assert(sizeof(*this) == sizeof(Char*) + 2 * sizeof(size_t),
+          "fbstring has unexpected size");
+      static_assert(sizeof(Char*) == sizeof(size_t),
+          "fbstring size assumption violation");
+      // sizeof(size_t) must be a power of 2
+      static_assert((sizeof(size_t) & (sizeof(size_t) - 1)) == 0,
+          "fbstring size assumption violation");
+
+      // If data is aligned, use fast word-wise copying. Otherwise,
+      // use conservative memcpy.
+      if (reinterpret_cast<size_t>(data) & (sizeof(size_t) - 1)) {
+        fbstring_detail::pod_copy(data, data + size, small_);
+      } else {
+        // Copy one word (64 bits) at a time
+        const size_t byteSize = size * sizeof(Char);
+        if (byteSize > 2 * sizeof(size_t)) {
+          // Copy three words
+          ml_.capacity_ = reinterpret_cast<const size_t*>(data)[2];
+          copyTwo:
+          ml_.size_ = reinterpret_cast<const size_t*>(data)[1];
+          copyOne:
+          ml_.data_ = *reinterpret_cast<Char**>(const_cast<Char*>(data));
+        } else if (byteSize > sizeof(size_t)) {
+          // Copy two words
+          goto copyTwo;
+        } else if (size > 0) {
+          // Copy one word
+          goto copyOne;
+        }
+      }
+      setSmallSize(size);
+      return;
+    } else if (size <= maxMediumSize) {
+      // Medium strings are allocated normally. Don't forget to
+      // allocate one extra Char for the terminating null.
+      auto const allocSize = goodMallocSize((1 + size) * sizeof(Char));
+      ml_.data_ = static_cast<Char*>(checkedMalloc(allocSize));
+      fbstring_detail::pod_copy(data, data + size, ml_.data_);
+      ml_.size_ = size;
+      ml_.capacity_ = (allocSize / sizeof(Char) - 1)
+                      | static_cast<category_type>(Category::isMedium);
+    } else {
+      // Large strings are allocated differently
+      size_t effectiveCapacity = size;
+      auto const newRC = RefCounted::create(data, & effectiveCapacity);
+      ml_.data_ = newRC->data_;
+      ml_.size_ = size;
+      ml_.capacity_ = effectiveCapacity
+                      | static_cast<category_type>(Category::isLarge);
+    }
+    writeTerminator();
+  }
+
+  ~fbstring_core() noexcept {
+    auto const c = category();
+    if (c == Category::isSmall) {
+      return;
+    }
+    if (c == Category::isMedium) {
+      free(ml_.data_);
+      return;
+    }
+    RefCounted::decrementRefs(ml_.data_);
+  }
+
+  // Snatches a previously mallocated string. The parameter "size"
+  // is the size of the string, and the parameter "allocatedSize"
+  // is the size of the mallocated block.  The string must be
+  // \0-terminated, so allocatedSize >= size + 1 and data[size] == '\0'.
+  //
+  // So if you want a 2-character string, pass malloc(3) as "data",
+  // pass 2 as "size", and pass 3 as "allocatedSize".
+  fbstring_core(Char * const data,
+                const size_t size,
+                const size_t allocatedSize,
+                AcquireMallocatedString) {
+    if (size > 0) {
+      assert(allocatedSize >= size + 1);
+      assert(data[size] == '\0');
+      // Use the medium string storage
+      ml_.data_ = data;
+      ml_.size_ = size;
+      // Don't forget about null terminator
+      ml_.capacity_ = (allocatedSize - 1)
+                      | static_cast<category_type>(Category::isMedium);
+    } else {
+      // No need for the memory
+      free(data);
+      setSmallSize(0);
+    }
+  }
+
+  // swap below doesn't test whether &rhs == this (and instead
+  // potentially does extra work) on the premise that the rarity of
+  // that situation actually makes the check more expensive than is
+  // worth.
+  void swap(fbstring_core & rhs) {
+    auto const t = ml_;
+    ml_ = rhs.ml_;
+    rhs.ml_ = t;
+  }
+
+  // In C++11 data() and c_str() are 100% equivalent.
+  const Char * data() const {
+    return c_str();
+  }
+
+  Char * mutable_data() {
+    auto const c = category();
+    if (c == Category::isSmall) {
+      return small_;
+    }
+    assert(c == Category::isMedium || c == Category::isLarge);
+    if (c == Category::isLarge && RefCounted::refs(ml_.data_) > 1) {
+      // Ensure unique.
+      size_t effectiveCapacity = ml_.capacity();
+      auto const newRC = RefCounted::create(& effectiveCapacity);
+      // If this fails, someone placed the wrong capacity in an
+      // fbstring.
+      assert(effectiveCapacity >= ml_.capacity());
+      fbstring_detail::pod_copy(ml_.data_, ml_.data_ + ml_.size_ + 1,
+                                newRC->data_);
+      RefCounted::decrementRefs(ml_.data_);
+      ml_.data_ = newRC->data_;
+      // No need to call writeTerminator(), we have + 1 above.
+    }
+    return ml_.data_;
+  }
+
+  const Char * c_str() const {
+    auto const c = category();
+    if (c == Category::isSmall) {
+      assert(small_[smallSize()] == '\0');
+      return small_;
+    }
+    assert(c == Category::isMedium || c == Category::isLarge);
+    assert(ml_.data_[ml_.size_] == '\0');
+    return ml_.data_;
+  }
+
+  void shrink(const size_t delta) {
+    if (category() == Category::isSmall) {
+      // Check for underflow
+      assert(delta <= smallSize());
+      setSmallSize(smallSize() - delta);
+    } else if (category() == Category::isMedium ||
+               RefCounted::refs(ml_.data_) == 1) {
+      // Medium strings and unique large strings need no special
+      // handling.
+      assert(ml_.size_ >= delta);
+      ml_.size_ -= delta;
+      writeTerminator();
+    } else {
+      assert(ml_.size_ >= delta);
+      // Shared large string, must make unique. This is because of the
+      // durn terminator must be written, which may trample the shared
+      // data.
+      if (delta) {
+        fbstring_core(ml_.data_, ml_.size_ - delta).swap(*this);
+      }
+      // No need to write the terminator.
+    }
+  }
+
+  void reserve(size_t minCapacity) {
+    if (category() == Category::isLarge) {
+      // Ensure unique
+      if (RefCounted::refs(ml_.data_) > 1) {
+        // We must make it unique regardless; in-place reallocation is
+        // useless if the string is shared. In order to not surprise
+        // people, reserve the new block at current capacity or
+        // more. That way, a string's capacity never shrinks after a
+        // call to reserve.
+        minCapacity = std::max(minCapacity, ml_.capacity());
+        auto const newRC = RefCounted::create(& minCapacity);
+        fbstring_detail::pod_copy(ml_.data_, ml_.data_ + ml_.size_ + 1,
+                                   newRC->data_);
+        // Done with the old data. No need to call writeTerminator(),
+        // we have + 1 above.
+        RefCounted::decrementRefs(ml_.data_);
+        ml_.data_ = newRC->data_;
+        ml_.capacity_ = minCapacity
+                        | static_cast<category_type>(Category::isLarge);
+        // size remains unchanged
+      } else {
+        // String is not shared, so let's try to realloc (if needed)
+        if (minCapacity > ml_.capacity()) {
+          // Asking for more memory
+          auto const newRC =
+               RefCounted::reallocate(ml_.data_, ml_.size_,
+                                      ml_.capacity(), minCapacity);
+          ml_.data_ = newRC->data_;
+          ml_.capacity_ = minCapacity
+                          | static_cast<category_type>(Category::isLarge);
+          writeTerminator();
+        }
+        assert(capacity() >= minCapacity);
+      }
+    } else if (category() == Category::isMedium) {
+      // String is not shared
+      if (minCapacity <= ml_.capacity()) {
+        return; // nothing to do, there's enough room
+      }
+      if (minCapacity <= maxMediumSize) {
+        // Keep the string at medium size. Don't forget to allocate
+        // one extra Char for the terminating null.
+        size_t capacityBytes = goodMallocSize((1 + minCapacity) * sizeof(Char));
+        ml_.data_ = static_cast<Char *>(
+          smartRealloc(
+            ml_.data_,
+            ml_.size_ * sizeof(Char),
+            (ml_.capacity() + 1) * sizeof(Char),
+            capacityBytes));
+        writeTerminator();
+        ml_.capacity_ = (capacityBytes / sizeof(Char) - 1)
+                        | static_cast<category_type>(Category::isMedium);
+      } else {
+        // Conversion from medium to large string
+        fbstring_core nascent;
+        // Will recurse to another branch of this function
+        nascent.reserve(minCapacity);
+        nascent.ml_.size_ = ml_.size_;
+        fbstring_detail::pod_copy(ml_.data_, ml_.data_ + ml_.size_,
+                                  nascent.ml_.data_);
+        nascent.swap(*this);
+        writeTerminator();
+        assert(capacity() >= minCapacity);
+      }
+    } else {
+      assert(category() == Category::isSmall);
+      if (minCapacity > maxMediumSize) {
+        // large
+        auto const newRC = RefCounted::create(& minCapacity);
+        auto const size = smallSize();
+        fbstring_detail::pod_copy(small_, small_ + size + 1, newRC->data_);
+        // No need for writeTerminator(), we wrote it above with + 1.
+        ml_.data_ = newRC->data_;
+        ml_.size_ = size;
+        ml_.capacity_ = minCapacity
+                        | static_cast<category_type>(Category::isLarge);
+        assert(capacity() >= minCapacity);
+      } else if (minCapacity > maxSmallSize) {
+        // medium
+        // Don't forget to allocate one extra Char for the terminating null
+        auto const allocSizeBytes =
+          goodMallocSize((1 + minCapacity) * sizeof(Char));
+        auto const data = static_cast<Char*>(checkedMalloc(allocSizeBytes));
+        auto const size = smallSize();
+        fbstring_detail::pod_copy(small_, small_ + size + 1, data);
+        // No need for writeTerminator(), we wrote it above with + 1.
+        ml_.data_ = data;
+        ml_.size_ = size;
+        ml_.capacity_ = (allocSizeBytes / sizeof(Char) - 1)
+                        | static_cast<category_type>(Category::isMedium);
+      } else {
+        // small
+        // Nothing to do, everything stays put
+      }
+    }
+    assert(capacity() >= minCapacity);
+  }
+
+  Char * expand_noinit(const size_t delta) {
+    // Strategy is simple: make room, then change size
+    assert(capacity() >= size());
+    size_t sz, newSz;
+    if (category() == Category::isSmall) {
+      sz = smallSize();
+      newSz = sz + delta;
+      if (newSz <= maxSmallSize) {
+        setSmallSize(newSz);
+        return small_ + sz;
+      }
+      reserve(newSz);
+    } else {
+      sz = ml_.size_;
+      newSz = ml_.size_ + delta;
+      if (newSz > capacity()) {
+        reserve(newSz);
+      }
+    }
+    assert(capacity() >= newSz);
+    // Category can't be small - we took care of that above
+    assert(category() == Category::isMedium || category() == Category::isLarge);
+    ml_.size_ = newSz;
+    writeTerminator();
+    assert(size() == newSz);
+    return ml_.data_ + sz;
+  }
+
+  void push_back(Char c) {
+    assert(capacity() >= size());
+    size_t sz;
+    if (category() == Category::isSmall) {
+      sz = smallSize();
+      if (sz < maxSmallSize) {
+        small_[sz] = c;
+        setSmallSize(sz + 1);
+        return;
+      }
+      reserve(maxSmallSize * 2);
+    } else {
+      sz = ml_.size_;
+      if (sz == capacity()) {  // always true for isShared()
+        reserve(1 + sz * 3 / 2);  // ensures not shared
+      }
+    }
+    assert(!isShared());
+    assert(capacity() >= sz + 1);
+    // Category can't be small - we took care of that above
+    assert(category() == Category::isMedium || category() == Category::isLarge);
+    ml_.size_ = sz + 1;
+    ml_.data_[sz] = c;
+    writeTerminator();
+  }
+
+  size_t size() const {
+    return category() == Category::isSmall ? smallSize() : ml_.size_;
+  }
+
+  size_t capacity() const {
+    switch (category()) {
+      case Category::isSmall:
+        return maxSmallSize;
+      case Category::isLarge:
+        // For large-sized strings, a multi-referenced chunk has no
+        // available capacity. This is because any attempt to append
+        // data would trigger a new allocation.
+        if (RefCounted::refs(ml_.data_) > 1) return ml_.size_;
+      default: {}
+    }
+    return ml_.capacity();
+  }
+
+  bool isShared() const {
+    return category() == Category::isLarge && RefCounted::refs(ml_.data_) > 1;
+  }
+
+  void writeTerminator() {
+    if (category() == Category::isSmall) {
+      const auto s = smallSize();
+      if (s != maxSmallSize) {
+        small_[s] = '\0';
+      }
+    } else {
+      ml_.data_[ml_.size_] = '\0';
+    }
+  }
+
+private:
+  // Disabled
+  fbstring_core & operator=(const fbstring_core & rhs);
+
+  struct MediumLarge {
+    Char * data_;
+    size_t size_;
+    size_t capacity_;
+
+    size_t capacity() const {
+      return capacity_ & capacityExtractMask;
+    }
+  };
+
+  struct RefCounted {
+    std::atomic<size_t> refCount_;
+    Char data_[1];
+
+    static RefCounted * fromData(Char * p) {
+      return static_cast<RefCounted*>(
+        static_cast<void*>(
+          static_cast<unsigned char*>(static_cast<void*>(p))
+          - sizeof(refCount_)));
+    }
+
+    static size_t refs(Char * p) {
+      return fromData(p)->refCount_.load(std::memory_order_acquire);
+    }
+
+    static void incrementRefs(Char * p) {
+      fromData(p)->refCount_.fetch_add(1, std::memory_order_acq_rel);
+    }
+
+    static void decrementRefs(Char * p) {
+      auto const dis = fromData(p);
+      size_t oldcnt = dis->refCount_.fetch_sub(1, std::memory_order_acq_rel);
+      assert(oldcnt > 0);
+      if (oldcnt == 1) {
+        free(dis);
+      }
+    }
+
+    static RefCounted * create(size_t * size) {
+      // Don't forget to allocate one extra Char for the terminating
+      // null. In this case, however, one Char is already part of the
+      // struct.
+      const size_t allocSize = goodMallocSize(
+        sizeof(RefCounted) + *size * sizeof(Char));
+      auto result = static_cast<RefCounted*>(checkedMalloc(allocSize));
+      result->refCount_.store(1, std::memory_order_release);
+      *size = (allocSize - sizeof(RefCounted)) / sizeof(Char);
+      return result;
+    }
+
+    static RefCounted * create(const Char * data, size_t * size) {
+      const size_t effectiveSize = *size;
+      auto result = create(size);
+      fbstring_detail::pod_copy(data, data + effectiveSize, result->data_);
+      return result;
+    }
+
+    static RefCounted * reallocate(Char *const data,
+                                   const size_t currentSize,
+                                   const size_t currentCapacity,
+                                   const size_t newCapacity) {
+      assert(newCapacity > 0 && newCapacity > currentSize);
+      auto const dis = fromData(data);
+      assert(dis->refCount_.load(std::memory_order_acquire) == 1);
+      // Don't forget to allocate one extra Char for the terminating
+      // null. In this case, however, one Char is already part of the
+      // struct.
+      auto result = static_cast<RefCounted*>(
+             smartRealloc(dis,
+                          sizeof(RefCounted) + currentSize * sizeof(Char),
+                          sizeof(RefCounted) + currentCapacity * sizeof(Char),
+                          sizeof(RefCounted) + newCapacity * sizeof(Char)));
+      assert(result->refCount_.load(std::memory_order_acquire) == 1);
+      return result;
+    }
+  };
+
+  union {
+    Char small_[sizeof(MediumLarge) / sizeof(Char)];
+    MediumLarge ml_;
+  };
+
+  enum : size_t {
+    lastChar = sizeof(MediumLarge) - 1,
+    maxSmallSize = lastChar / sizeof(Char),
+    maxMediumSize = 254 / sizeof(Char),            // coincides with the small
+                                                   // bin size in dlmalloc
+    categoryExtractMask = sizeof(size_t) == 4 ? 0xC0000000 : 0xC000000000000000,
+    capacityExtractMask = ~categoryExtractMask,
+  };
+  static_assert(!(sizeof(MediumLarge) % sizeof(Char)),
+                "Corrupt memory layout for fbstring.");
+
+  typedef std::conditional<sizeof(size_t) == 4, uint32_t, uint64_t>::type
+          category_type;
+
+  enum class Category : category_type {
+    isSmall = 0,
+    isMedium = sizeof(size_t) == 4 ? 0x80000000 : 0x8000000000000000,
+    isLarge =  sizeof(size_t) == 4 ? 0x40000000 : 0x4000000000000000,
+  };
+
+  Category category() const {
+    // Assumes little endian
+    return static_cast<Category>(ml_.capacity_ & categoryExtractMask);
+  }
+
+  size_t smallSize() const {
+    assert(category() == Category::isSmall &&
+           static_cast<size_t>(small_[maxSmallSize])
+           <= static_cast<size_t>(maxSmallSize));
+    return static_cast<size_t>(maxSmallSize)
+      - static_cast<size_t>(small_[maxSmallSize]);
+  }
+
+  void setSmallSize(size_t s) {
+    // Warning: this should work with uninitialized strings too,
+    // so don't assume anything about the previous value of
+    // small_[maxSmallSize].
+    assert(s <= maxSmallSize);
+    small_[maxSmallSize] = maxSmallSize - s;
+    writeTerminator();
+  }
+};
+
+#if defined(__GNUC__) && !defined(__clang__)
+# pragma GCC diagnostic pop
+#endif
+
+#ifndef _LIBSTDCXX_FBSTRING
+/**
+ * Dummy fbstring core that uses an actual std::string. This doesn't
+ * make any sense - it's just for testing purposes.
+ */
+template <class Char>
+class dummy_fbstring_core {
+public:
+  dummy_fbstring_core() {
+  }
+  dummy_fbstring_core(const dummy_fbstring_core& another)
+      : backend_(another.backend_) {
+  }
+  dummy_fbstring_core(const Char * s, size_t n)
+      : backend_(s, n) {
+  }
+  void swap(dummy_fbstring_core & rhs) {
+    backend_.swap(rhs.backend_);
+  }
+  const Char * data() const {
+    return backend_.data();
+  }
+  Char * mutable_data() {
+    //assert(!backend_.empty());
+    return &*backend_.begin();
+  }
+  void shrink(size_t delta) {
+    assert(delta <= size());
+    backend_.resize(size() - delta);
+  }
+  Char * expand_noinit(size_t delta) {
+    auto const sz = size();
+    backend_.resize(size() + delta);
+    return backend_.data() + sz;
+  }
+  void push_back(Char c) {
+    backend_.push_back(c);
+  }
+  size_t size() const {
+    return backend_.size();
+  }
+  size_t capacity() const {
+    return backend_.capacity();
+  }
+  bool isShared() const {
+    return false;
+  }
+  void reserve(size_t minCapacity) {
+    backend_.reserve(minCapacity);
+  }
+
+private:
+  std::basic_string<Char> backend_;
+};
+#endif // !_LIBSTDCXX_FBSTRING
+
+/**
+ * This is the basic_string replacement. For conformity,
+ * basic_fbstring takes the same template parameters, plus the last
+ * one which is the core.
+ */
+#ifdef _LIBSTDCXX_FBSTRING
+template <typename E, class T, class A, class Storage>
+#else
+template <typename E,
+          class T = std::char_traits<E>,
+          class A = std::allocator<E>,
+          class Storage = fbstring_core<E> >
+#endif
+class basic_fbstring {
+
+  static void enforce(
+      bool condition,
+      void (*throw_exc)(const char*),
+      const char* msg) {
+    if (!condition) throw_exc(msg);
+  }
+
+  bool isSane() const {
+    return
+      begin() <= end() &&
+      empty() == (size() == 0) &&
+      empty() == (begin() == end()) &&
+      size() <= max_size() &&
+      capacity() <= max_size() &&
+      size() <= capacity() &&
+      begin()[size()] == '\0';
+  }
+
+  struct Invariant;
+  friend struct Invariant;
+  struct Invariant {
+#ifndef NDEBUG
+    explicit Invariant(const basic_fbstring& s) : s_(s) {
+      assert(s_.isSane());
+    }
+    ~Invariant() {
+      assert(s_.isSane());
+    }
+  private:
+    const basic_fbstring& s_;
+#else
+    explicit Invariant(const basic_fbstring&) {}
+#endif
+    Invariant& operator=(const Invariant&);
+  };
+
+public:
+  // types
+  typedef T traits_type;
+  typedef typename traits_type::char_type value_type;
+  typedef A allocator_type;
+  typedef typename A::size_type size_type;
+  typedef typename A::difference_type difference_type;
+
+  typedef typename A::reference reference;
+  typedef typename A::const_reference const_reference;
+  typedef typename A::pointer pointer;
+  typedef typename A::const_pointer const_pointer;
+
+  typedef E* iterator;
+  typedef const E* const_iterator;
+  typedef std::reverse_iterator<iterator
+#ifdef NO_ITERATOR_TRAITS
+                                , value_type
+#endif
+                                > reverse_iterator;
+  typedef std::reverse_iterator<const_iterator
+#ifdef NO_ITERATOR_TRAITS
+                                , const value_type
+#endif
+                                > const_reverse_iterator;
+
+  static const size_type npos;                     // = size_type(-1)
+
+private:
+  static void procrustes(size_type& n, size_type nmax) {
+    if (n > nmax) n = nmax;
+  }
+
+public:
+  // C++11 21.4.2 construct/copy/destroy
+  explicit basic_fbstring(const A& /*a*/ = A()) noexcept {
+  }
+
+  basic_fbstring(const basic_fbstring& str)
+      : store_(str.store_) {
+  }
+
+  // Move constructor
+  basic_fbstring(basic_fbstring&& goner) noexcept
+      : store_(std::move(goner.store_)) {
+  }
+
+#ifndef _LIBSTDCXX_FBSTRING
+  // This is defined for compatibility with std::string
+  /* implicit */ basic_fbstring(const std::string& str)
+      : store_(str.data(), str.size()) {
+  }
+#endif
+
+  basic_fbstring(const basic_fbstring& str, size_type pos,
+                 size_type n = npos, const A& a = A()) {
+    assign(str, pos, n);
+  }
+
+  /* implicit */ basic_fbstring(const value_type* s, const A& /*a*/ = A())
+      : store_(s, s
+          ? traits_type::length(s)
+          : (std::__throw_logic_error(
+                "basic_fbstring: null pointer initializer not valid"),
+             0)) {
+  }
+
+  basic_fbstring(const value_type* s, size_type n, const A& /*a*/ = A())
+      : store_(s, n) {
+  }
+
+  basic_fbstring(size_type n, value_type c, const A& /*a*/ = A()) {
+    auto const data = store_.expand_noinit(n);
+    fbstring_detail::pod_fill(data, data + n, c);
+    store_.writeTerminator();
+  }
+
+  template <class InIt>
+  basic_fbstring(InIt begin, InIt end,
+                 typename std::enable_if<
+                 !std::is_same<typename std::remove_const<InIt>::type,
+                 value_type*>::value, const A>::type & /*a*/ = A()) {
+    assign(begin, end);
+  }
+
+  // Specialization for const char*, const char*
+  basic_fbstring(const value_type* b, const value_type* e)
+      : store_(b, e - b) {
+  }
+
+  // Nonstandard constructor
+  basic_fbstring(value_type *s, size_type n, size_type c,
+                 AcquireMallocatedString a)
+      : store_(s, n, c, a) {
+  }
+
+  // Construction from initialization list
+  basic_fbstring(std::initializer_list<value_type> il) {
+    assign(il.begin(), il.end());
+  }
+
+  ~basic_fbstring() noexcept {
+  }
+
+  basic_fbstring& operator=(const basic_fbstring& lhs) {
+    if (FBSTRING_UNLIKELY(&lhs == this)) {
+      return *this;
+    }
+    auto const oldSize = size();
+    auto const srcSize = lhs.size();
+    if (capacity() >= srcSize && !store_.isShared()) {
+      // great, just copy the contents
+      if (oldSize < srcSize)
+        store_.expand_noinit(srcSize - oldSize);
+      else
+        store_.shrink(oldSize - srcSize);
+      assert(size() == srcSize);
+      fbstring_detail::pod_copy(lhs.begin(), lhs.end(), begin());
+      store_.writeTerminator();
+    } else {
+      // need to reallocate, so we may as well create a brand new string
+      basic_fbstring(lhs).swap(*this);
+    }
+    return *this;
+  }
+
+  // Move assignment
+  basic_fbstring& operator=(basic_fbstring&& goner) noexcept {
+    if (FBSTRING_UNLIKELY(&goner == this)) {
+      // Compatibility with std::basic_string<>,
+      // C++11 21.4.2 [string.cons] / 23 requires self-move-assignment support.
+      return *this;
+    }
+    // No need of this anymore
+    this->~basic_fbstring();
+    // Move the goner into this
+    new(&store_) fbstring_core<E>(std::move(goner.store_));
+    return *this;
+  }
+
+#ifndef _LIBSTDCXX_FBSTRING
+  // Compatibility with std::string
+  basic_fbstring & operator=(const std::string & rhs) {
+    return assign(rhs.data(), rhs.size());
+  }
+
+  // Compatibility with std::string
+  std::string toStdString() const {
+    return std::string(data(), size());
+  }
+#else
+  // A lot of code in fbcode still uses this method, so keep it here for now.
+  const basic_fbstring& toStdString() const {
+    return *this;
+  }
+#endif
+
+  basic_fbstring& operator=(const value_type* s) {
+    return assign(s);
+  }
+
+  basic_fbstring& operator=(value_type c) {
+    if (empty()) {
+      store_.expand_noinit(1);
+    } else if (store_.isShared()) {
+      basic_fbstring(1, c).swap(*this);
+      return *this;
+    } else {
+      store_.shrink(size() - 1);
+    }
+    *store_.mutable_data() = c;
+    store_.writeTerminator();
+    return *this;
+  }
+
+  basic_fbstring& operator=(std::initializer_list<value_type> il) {
+    return assign(il.begin(), il.end());
+  }
+
+  // C++11 21.4.3 iterators:
+  iterator begin() { return store_.mutable_data(); }
+
+  const_iterator begin() const { return store_.data(); }
+
+  const_iterator cbegin() const { return begin(); }
+
+  iterator end() {
+    return store_.mutable_data() + store_.size();
+  }
+
+  const_iterator end() const {
+    return store_.data() + store_.size();
+  }
+
+  const_iterator cend() const { return end(); }
+
+  reverse_iterator rbegin() {
+    return reverse_iterator(end());
+  }
+
+  const_reverse_iterator rbegin() const {
+    return const_reverse_iterator(end());
+  }
+
+  const_reverse_iterator crbegin() const { return rbegin(); }
+
+  reverse_iterator rend() {
+    return reverse_iterator(begin());
+  }
+
+  const_reverse_iterator rend() const {
+    return const_reverse_iterator(begin());
+  }
+
+  const_reverse_iterator crend() const { return rend(); }
+
+  // Added by C++11
+  // C++11 21.4.5, element access:
+  const value_type& front() const { return *begin(); }
+  const value_type& back() const {
+    assert(!empty());
+    // Should be begin()[size() - 1], but that branches twice
+    return *(end() - 1);
+  }
+  value_type& front() { return *begin(); }
+  value_type& back() {
+    assert(!empty());
+    // Should be begin()[size() - 1], but that branches twice
+    return *(end() - 1);
+  }
+  void pop_back() {
+    assert(!empty());
+    store_.shrink(1);
+  }
+
+  // C++11 21.4.4 capacity:
+  size_type size() const { return store_.size(); }
+
+  size_type length() const { return size(); }
+
+  size_type max_size() const {
+    return std::numeric_limits<size_type>::max();
+  }
+
+  void resize(const size_type n, const value_type c = value_type()) {
+    auto size = this->size();
+    if (n <= size) {
+      store_.shrink(size - n);
+    } else {
+      // Do this in two steps to minimize slack memory copied (see
+      // smartRealloc).
+      auto const capacity = this->capacity();
+      assert(capacity >= size);
+      if (size < capacity) {
+        auto delta = std::min(n, capacity) - size;
+        store_.expand_noinit(delta);
+        fbstring_detail::pod_fill(begin() + size, end(), c);
+        size += delta;
+        if (size == n) {
+          store_.writeTerminator();
+          return;
+        }
+        assert(size < n);
+      }
+      auto const delta = n - size;
+      store_.expand_noinit(delta);
+      fbstring_detail::pod_fill(end() - delta, end(), c);
+      store_.writeTerminator();
+    }
+    assert(this->size() == n);
+  }
+
+  size_type capacity() const { return store_.capacity(); }
+
+  void reserve(size_type res_arg = 0) {
+    enforce(res_arg <= max_size(), std::__throw_length_error, "");
+    store_.reserve(res_arg);
+  }
+
+  void shrink_to_fit() {
+    // Shrink only if slack memory is sufficiently large
+    if (capacity() < size() * 3 / 2) {
+      return;
+    }
+    basic_fbstring(cbegin(), cend()).swap(*this);
+  }
+
+  void clear() { resize(0); }
+
+  bool empty() const { return size() == 0; }
+
+  // C++11 21.4.5 element access:
+  const_reference operator[](size_type pos) const {
+    return *(begin() + pos);
+  }
+
+  reference operator[](size_type pos) {
+    return *(begin() + pos);
+  }
+
+  const_reference at(size_type n) const {
+    enforce(n <= size(), std::__throw_out_of_range, "");
+    return (*this)[n];
+  }
+
+  reference at(size_type n) {
+    enforce(n < size(), std::__throw_out_of_range, "");
+    return (*this)[n];
+  }
+
+  // C++11 21.4.6 modifiers:
+  basic_fbstring& operator+=(const basic_fbstring& str) {
+    return append(str);
+  }
+
+  basic_fbstring& operator+=(const value_type* s) {
+    return append(s);
+  }
+
+  basic_fbstring& operator+=(const value_type c) {
+    push_back(c);
+    return *this;
+  }
+
+  basic_fbstring& operator+=(std::initializer_list<value_type> il) {
+    append(il);
+    return *this;
+  }
+
+  basic_fbstring& append(const basic_fbstring& str) {
+#ifndef NDEBUG
+    auto desiredSize = size() + str.size();
+#endif
+    append(str.data(), str.size());
+    assert(size() == desiredSize);
+    return *this;
+  }
+
+  basic_fbstring& append(const basic_fbstring& str, const size_type pos,
+                         size_type n) {
+    const size_type sz = str.size();
+    enforce(pos <= sz, std::__throw_out_of_range, "");
+    procrustes(n, sz - pos);
+    return append(str.data() + pos, n);
+  }
+
+  basic_fbstring& append(const value_type* s, size_type n) {
+#ifndef NDEBUG
+    Invariant checker(*this);
+    (void) checker;
+#endif
+    if (FBSTRING_UNLIKELY(!n)) {
+      // Unlikely but must be done
+      return *this;
+    }
+    auto const oldSize = size();
+    auto const oldData = data();
+    // Check for aliasing (rare). We could use "<=" here but in theory
+    // those do not work for pointers unless the pointers point to
+    // elements in the same array. For that reason we use
+    // std::less_equal, which is guaranteed to offer a total order
+    // over pointers. See discussion at http://goo.gl/Cy2ya for more
+    // info.
+    std::less_equal<const value_type*> le;
+    if (FBSTRING_UNLIKELY(le(oldData, s) && !le(oldData + oldSize, s))) {
+      assert(le(s + n, oldData + oldSize));
+      const size_type offset = s - oldData;
+      store_.reserve(oldSize + n);
+      // Restore the source
+      s = data() + offset;
+    }
+    // Warning! Repeated appends with short strings may actually incur
+    // practically quadratic performance. Avoid that by pushing back
+    // the first character (which ensures exponential growth) and then
+    // appending the rest normally. Worst case the append may incur a
+    // second allocation but that will be rare.
+    push_back(*s++);
+    --n;
+    memcpy(store_.expand_noinit(n), s, n * sizeof(value_type));
+    assert(size() == oldSize + n + 1);
+    return *this;
+  }
+
+  basic_fbstring& append(const value_type* s) {
+    return append(s, traits_type::length(s));
+  }
+
+  basic_fbstring& append(size_type n, value_type c) {
+    resize(size() + n, c);
+    return *this;
+  }
+
+  template<class InputIterator>
+  basic_fbstring& append(InputIterator first, InputIterator last) {
+    insert(end(), first, last);
+    return *this;
+  }
+
+  basic_fbstring& append(std::initializer_list<value_type> il) {
+    return append(il.begin(), il.end());
+  }
+
+  void push_back(const value_type c) {             // primitive
+    store_.push_back(c);
+  }
+
+  basic_fbstring& assign(const basic_fbstring& str) {
+    if (&str == this) return *this;
+    return assign(str.data(), str.size());
+  }
+
+  basic_fbstring& assign(basic_fbstring&& str) {
+    return *this = std::move(str);
+  }
+
+  basic_fbstring& assign(const basic_fbstring& str, const size_type pos,
+                         size_type n) {
+    const size_type sz = str.size();
+    enforce(pos <= sz, std::__throw_out_of_range, "");
+    procrustes(n, sz - pos);
+    return assign(str.data() + pos, n);
+  }
+
+  basic_fbstring& assign(const value_type* s, const size_type n) {
+    Invariant checker(*this);
+    (void) checker;
+    if (size() >= n) {
+      std::copy(s, s + n, begin());
+      resize(n);
+      assert(size() == n);
+    } else {
+      const value_type *const s2 = s + size();
+      std::copy(s, s2, begin());
+      append(s2, n - size());
+      assert(size() == n);
+    }
+    store_.writeTerminator();
+    assert(size() == n);
+    return *this;
+  }
+
+  basic_fbstring& assign(const value_type* s) {
+    return assign(s, traits_type::length(s));
+  }
+
+  basic_fbstring& assign(std::initializer_list<value_type> il) {
+    return assign(il.begin(), il.end());
+  }
+
+  template <class ItOrLength, class ItOrChar>
+  basic_fbstring& assign(ItOrLength first_or_n, ItOrChar last_or_c) {
+    return replace(begin(), end(), first_or_n, last_or_c);
+  }
+
+  basic_fbstring& insert(size_type pos1, const basic_fbstring& str) {
+    return insert(pos1, str.data(), str.size());
+  }
+
+  basic_fbstring& insert(size_type pos1, const basic_fbstring& str,
+                         size_type pos2, size_type n) {
+    enforce(pos2 <= str.length(), std::__throw_out_of_range, "");
+    procrustes(n, str.length() - pos2);
+    return insert(pos1, str.data() + pos2, n);
+  }
+
+  basic_fbstring& insert(size_type pos, const value_type* s, size_type n) {
+    enforce(pos <= length(), std::__throw_out_of_range, "");
+    insert(begin() + pos, s, s + n);
+    return *this;
+  }
+
+  basic_fbstring& insert(size_type pos, const value_type* s) {
+    return insert(pos, s, traits_type::length(s));
+  }
+
+  basic_fbstring& insert(size_type pos, size_type n, value_type c) {
+    enforce(pos <= length(), std::__throw_out_of_range, "");
+    insert(begin() + pos, n, c);
+    return *this;
+  }
+
+  iterator insert(const_iterator p, const value_type c) {
+    const size_type pos = p - begin();
+    insert(p, 1, c);
+    return begin() + pos;
+  }
+
+private:
+  template <int i> class Selector {};
+
+  iterator insertImplDiscr(const_iterator p,
+                           size_type n, value_type c, Selector<1>) {
+    Invariant checker(*this);
+    (void) checker;
+    auto const pos = p - begin();
+    assert(p >= begin() && p <= end());
+    if (capacity() - size() < n) {
+      const size_type sz = p - begin();
+      reserve(size() + n);
+      p = begin() + sz;
+    }
+    const iterator oldEnd = end();
+    if (n < size_type(oldEnd - p)) {
+      append(oldEnd - n, oldEnd);
+      //std::copy(
+      //    reverse_iterator(oldEnd - n),
+      //    reverse_iterator(p),
+      //    reverse_iterator(oldEnd));
+      fbstring_detail::pod_move(&*p, &*oldEnd - n,
+                                begin() + pos + n);
+      std::fill(begin() + pos, begin() + pos + n, c);
+    } else {
+      append(n - (end() - p), c);
+      append(iterator(p), oldEnd);
+      std::fill(iterator(p), oldEnd, c);
+    }
+    store_.writeTerminator();
+    return begin() + pos;
+  }
+
+  template<class InputIter>
+  iterator insertImplDiscr(const_iterator i,
+                           InputIter b, InputIter e, Selector<0>) {
+    return insertImpl(i, b, e,
+               typename std::iterator_traits<InputIter>::iterator_category());
+  }
+
+  template <class FwdIterator>
+  iterator insertImpl(const_iterator i,
+                  FwdIterator s1, FwdIterator s2, std::forward_iterator_tag) {
+    Invariant checker(*this);
+    (void) checker;
+    const size_type pos = i - begin();
+    const typename std::iterator_traits<FwdIterator>::difference_type n2 =
+      std::distance(s1, s2);
+    assert(n2 >= 0);
+    using namespace fbstring_detail;
+    assert(pos <= size());
+
+    const typename std::iterator_traits<FwdIterator>::difference_type maxn2 =
+      capacity() - size();
+    if (maxn2 < n2) {
+      // realloc the string
+      reserve(size() + n2);
+      i = begin() + pos;
+    }
+    if (pos + n2 <= size()) {
+      const iterator tailBegin = end() - n2;
+      store_.expand_noinit(n2);
+      fbstring_detail::pod_copy(tailBegin, tailBegin + n2, end() - n2);
+      std::copy(const_reverse_iterator(tailBegin), const_reverse_iterator(i),
+                reverse_iterator(tailBegin + n2));
+      std::copy(s1, s2, begin() + pos);
+    } else {
+      FwdIterator t = s1;
+      const size_type old_size = size();
+      std::advance(t, old_size - pos);
+      const size_t newElems = std::distance(t, s2);
+      store_.expand_noinit(n2);
+      std::copy(t, s2, begin() + old_size);
+      fbstring_detail::pod_copy(data() + pos, data() + old_size,
+                                 begin() + old_size + newElems);
+      std::copy(s1, t, begin() + pos);
+    }
+    store_.writeTerminator();
+    return begin() + pos;
+  }
+
+  template <class InputIterator>
+  iterator insertImpl(const_iterator i,
+                      InputIterator b, InputIterator e,
+                      std::input_iterator_tag) {
+    const auto pos = i - begin();
+    basic_fbstring temp(begin(), i);
+    for (; b != e; ++b) {
+      temp.push_back(*b);
+    }
+    temp.append(i, cend());
+    swap(temp);
+    return begin() + pos;
+  }
+
+public:
+  template <class ItOrLength, class ItOrChar>
+  iterator insert(const_iterator p, ItOrLength first_or_n, ItOrChar last_or_c) {
+    Selector<std::numeric_limits<ItOrLength>::is_specialized> sel;
+    return insertImplDiscr(p, first_or_n, last_or_c, sel);
+  }
+
+  iterator insert(const_iterator p, std::initializer_list<value_type> il) {
+    return insert(p, il.begin(), il.end());
+  }
+
+  basic_fbstring& erase(size_type pos = 0, size_type n = npos) {
+    Invariant checker(*this);
+    (void) checker;
+    enforce(pos <= length(), std::__throw_out_of_range, "");
+    procrustes(n, length() - pos);
+    std::copy(begin() + pos + n, end(), begin() + pos);
+    resize(length() - n);
+    return *this;
+  }
+
+  iterator erase(iterator position) {
+    const size_type pos(position - begin());
+    enforce(pos <= size(), std::__throw_out_of_range, "");
+    erase(pos, 1);
+    return begin() + pos;
+  }
+
+  iterator erase(iterator first, iterator last) {
+    const size_type pos(first - begin());
+    erase(pos, last - first);
+    return begin() + pos;
+  }
+
+  // Replaces at most n1 chars of *this, starting with pos1 with the
+  // content of str
+  basic_fbstring& replace(size_type pos1, size_type n1,
+                          const basic_fbstring& str) {
+    return replace(pos1, n1, str.data(), str.size());
+  }
+
+  // Replaces at most n1 chars of *this, starting with pos1,
+  // with at most n2 chars of str starting with pos2
+  basic_fbstring& replace(size_type pos1, size_type n1,
+                          const basic_fbstring& str,
+                          size_type pos2, size_type n2) {
+    enforce(pos2 <= str.length(), std::__throw_out_of_range, "");
+    return replace(pos1, n1, str.data() + pos2,
+                   std::min(n2, str.size() - pos2));
+  }
+
+  // Replaces at most n1 chars of *this, starting with pos, with chars from s
+  basic_fbstring& replace(size_type pos, size_type n1, const value_type* s) {
+    return replace(pos, n1, s, traits_type::length(s));
+  }
+
+  // Replaces at most n1 chars of *this, starting with pos, with n2
+  // occurrences of c
+  //
+  // consolidated with
+  //
+  // Replaces at most n1 chars of *this, starting with pos, with at
+  // most n2 chars of str.  str must have at least n2 chars.
+  template <class StrOrLength, class NumOrChar>
+  basic_fbstring& replace(size_type pos, size_type n1,
+                          StrOrLength s_or_n2, NumOrChar n_or_c) {
+    Invariant checker(*this);
+    (void) checker;
+    enforce(pos <= size(), std::__throw_out_of_range, "");
+    procrustes(n1, length() - pos);
+    const iterator b = begin() + pos;
+    return replace(b, b + n1, s_or_n2, n_or_c);
+  }
+
+  basic_fbstring& replace(iterator i1, iterator i2, const basic_fbstring& str) {
+    return replace(i1, i2, str.data(), str.length());
+  }
+
+  basic_fbstring& replace(iterator i1, iterator i2, const value_type* s) {
+    return replace(i1, i2, s, traits_type::length(s));
+  }
+
+private:
+  basic_fbstring& replaceImplDiscr(iterator i1, iterator i2,
+                                   const value_type* s, size_type n,
+                                   Selector<2>) {
+    assert(i1 <= i2);
+    assert(begin() <= i1 && i1 <= end());
+    assert(begin() <= i2 && i2 <= end());
+    return replace(i1, i2, s, s + n);
+  }
+
+  basic_fbstring& replaceImplDiscr(iterator i1, iterator i2,
+                                   size_type n2, value_type c, Selector<1>) {
+    const size_type n1 = i2 - i1;
+    if (n1 > n2) {
+      std::fill(i1, i1 + n2, c);
+      erase(i1 + n2, i2);
+    } else {
+      std::fill(i1, i2, c);
+      insert(i2, n2 - n1, c);
+    }
+    assert(isSane());
+    return *this;
+  }
+
+  template <class InputIter>
+  basic_fbstring& replaceImplDiscr(iterator i1, iterator i2,
+                                   InputIter b, InputIter e,
+                                   Selector<0>) {
+    replaceImpl(i1, i2, b, e,
+                typename std::iterator_traits<InputIter>::iterator_category());
+    return *this;
+  }
+
+private:
+  template <class FwdIterator>
+  bool replaceAliased(iterator i1, iterator i2,
+                      FwdIterator s1, FwdIterator s2, std::false_type) {
+    return false;
+  }
+
+  template <class FwdIterator>
+  bool replaceAliased(iterator i1, iterator i2,
+                      FwdIterator s1, FwdIterator s2, std::true_type) {
+    static const std::less_equal<const value_type*> le =
+      std::less_equal<const value_type*>();
+    const bool aliased = le(&*begin(), &*s1) && le(&*s1, &*end());
+    if (!aliased) {
+      return false;
+    }
+    // Aliased replace, copy to new string
+    basic_fbstring temp;
+    temp.reserve(size() - (i2 - i1) + std::distance(s1, s2));
+    temp.append(begin(), i1).append(s1, s2).append(i2, end());
+    swap(temp);
+    return true;
+  }
+
+  template <class FwdIterator>
+  void replaceImpl(iterator i1, iterator i2,
+                   FwdIterator s1, FwdIterator s2, std::forward_iterator_tag) {
+    Invariant checker(*this);
+    (void) checker;
+
+    // Handle aliased replace
+    if (replaceAliased(i1, i2, s1, s2,
+          std::integral_constant<bool,
+            std::is_same<FwdIterator, iterator>::value ||
+            std::is_same<FwdIterator, const_iterator>::value>())) {
+      return;
+    }
+
+    auto const n1 = i2 - i1;
+    assert(n1 >= 0);
+    auto const n2 = std::distance(s1, s2);
+    assert(n2 >= 0);
+
+    if (n1 > n2) {
+      // shrinks
+      std::copy(s1, s2, i1);
+      erase(i1 + n2, i2);
+    } else {
+      // grows
+      fbstring_detail::copy_n(s1, n1, i1);
+      std::advance(s1, n1);
+      insert(i2, s1, s2);
+    }
+    assert(isSane());
+  }
+
+  template <class InputIterator>
+  void replaceImpl(iterator i1, iterator i2,
+                   InputIterator b, InputIterator e, std::input_iterator_tag) {
+    basic_fbstring temp(begin(), i1);
+    temp.append(b, e).append(i2, end());
+    swap(temp);
+  }
+
+public:
+  template <class T1, class T2>
+  basic_fbstring& replace(iterator i1, iterator i2,
+                          T1 first_or_n_or_s, T2 last_or_c_or_n) {
+    const bool
+      num1 = std::numeric_limits<T1>::is_specialized,
+      num2 = std::numeric_limits<T2>::is_specialized;
+    return replaceImplDiscr(
+      i1, i2, first_or_n_or_s, last_or_c_or_n,
+      Selector<num1 ? (num2 ? 1 : -1) : (num2 ? 2 : 0)>());
+  }
+
+  size_type copy(value_type* s, size_type n, size_type pos = 0) const {
+    enforce(pos <= size(), std::__throw_out_of_range, "");
+    procrustes(n, size() - pos);
+
+    fbstring_detail::pod_copy(
+      data() + pos,
+      data() + pos + n,
+      s);
+    return n;
+  }
+
+  void swap(basic_fbstring& rhs) {
+    store_.swap(rhs.store_);
+  }
+
+  const value_type* c_str() const {
+    return store_.c_str();
+  }
+
+  const value_type* data() const { return c_str(); }
+
+  allocator_type get_allocator() const {
+    return allocator_type();
+  }
+
+  size_type find(const basic_fbstring& str, size_type pos = 0) const {
+    return find(str.data(), pos, str.length());
+  }
+
+  size_type find(const value_type* needle, const size_type pos,
+                 const size_type nsize) const {
+    if (!nsize) return pos;
+    auto const size = this->size();
+    // nsize + pos can overflow (eg pos == npos), guard against that by checking
+    // that nsize + pos does not wrap around.
+    if (nsize + pos > size || nsize + pos < pos) return npos;
+    // Don't use std::search, use a Boyer-Moore-like trick by comparing
+    // the last characters first
+    auto const haystack = data();
+    auto const nsize_1 = nsize - 1;
+    auto const lastNeedle = needle[nsize_1];
+
+    // Boyer-Moore skip value for the last char in the needle. Zero is
+    // not a valid value; skip will be computed the first time it's
+    // needed.
+    size_type skip = 0;
+
+    const E * i = haystack + pos;
+    auto iEnd = haystack + size - nsize_1;
+
+    while (i < iEnd) {
+      // Boyer-Moore: match the last element in the needle
+      while (i[nsize_1] != lastNeedle) {
+        if (++i == iEnd) {
+          // not found
+          return npos;
+        }
+      }
+      // Here we know that the last char matches
+      // Continue in pedestrian mode
+      for (size_t j = 0; ; ) {
+        assert(j < nsize);
+        if (i[j] != needle[j]) {
+          // Not found, we can skip
+          // Compute the skip value lazily
+          if (skip == 0) {
+            skip = 1;
+            while (skip <= nsize_1 && needle[nsize_1 - skip] != lastNeedle) {
+              ++skip;
+            }
+          }
+          i += skip;
+          break;
+        }
+        // Check if done searching
+        if (++j == nsize) {
+          // Yay
+          return i - haystack;
+        }
+      }
+    }
+    return npos;
+  }
+
+  size_type find(const value_type* s, size_type pos = 0) const {
+    return find(s, pos, traits_type::length(s));
+  }
+
+  size_type find (value_type c, size_type pos = 0) const {
+    return find(&c, pos, 1);
+  }
+
+  size_type rfind(const basic_fbstring& str, size_type pos = npos) const {
+    return rfind(str.data(), pos, str.length());
+  }
+
+  size_type rfind(const value_type* s, size_type pos, size_type n) const {
+    if (n > length()) return npos;
+    pos = std::min(pos, length() - n);
+    if (n == 0) return pos;
+
+    const_iterator i(begin() + pos);
+    for (; ; --i) {
+      if (traits_type::eq(*i, *s)
+          && traits_type::compare(&*i, s, n) == 0) {
+        return i - begin();
+      }
+      if (i == begin()) break;
+    }
+    return npos;
+  }
+
+  size_type rfind(const value_type* s, size_type pos = npos) const {
+    return rfind(s, pos, traits_type::length(s));
+  }
+
+  size_type rfind(value_type c, size_type pos = npos) const {
+    return rfind(&c, pos, 1);
+  }
+
+  size_type find_first_of(const basic_fbstring& str, size_type pos = 0) const {
+    return find_first_of(str.data(), pos, str.length());
+  }
+
+  size_type find_first_of(const value_type* s,
+                          size_type pos, size_type n) const {
+    if (pos > length() || n == 0) return npos;
+    const_iterator i(begin() + pos),
+      finish(end());
+    for (; i != finish; ++i) {
+      if (traits_type::find(s, n, *i) != 0) {
+        return i - begin();
+      }
+    }
+    return npos;
+  }
+
+  size_type find_first_of(const value_type* s, size_type pos = 0) const {
+    return find_first_of(s, pos, traits_type::length(s));
+  }
+
+  size_type find_first_of(value_type c, size_type pos = 0) const {
+    return find_first_of(&c, pos, 1);
+  }
+
+  size_type find_last_of (const basic_fbstring& str,
+                          size_type pos = npos) const {
+    return find_last_of(str.data(), pos, str.length());
+  }
+
+  size_type find_last_of (const value_type* s, size_type pos,
+                          size_type n) const {
+    if (!empty() && n > 0) {
+      pos = std::min(pos, length() - 1);
+      const_iterator i(begin() + pos);
+      for (;; --i) {
+        if (traits_type::find(s, n, *i) != 0) {
+          return i - begin();
+        }
+        if (i == begin()) break;
+      }
+    }
+    return npos;
+  }
+
+  size_type find_last_of (const value_type* s,
+                          size_type pos = npos) const {
+    return find_last_of(s, pos, traits_type::length(s));
+  }
+
+  size_type find_last_of (value_type c, size_type pos = npos) const {
+    return find_last_of(&c, pos, 1);
+  }
+
+  size_type find_first_not_of(const basic_fbstring& str,
+                              size_type pos = 0) const {
+    return find_first_not_of(str.data(), pos, str.size());
+  }
+
+  size_type find_first_not_of(const value_type* s, size_type pos,
+                              size_type n) const {
+    if (pos < length()) {
+      const_iterator
+        i(begin() + pos),
+        finish(end());
+      for (; i != finish; ++i) {
+        if (traits_type::find(s, n, *i) == 0) {
+          return i - begin();
+        }
+      }
+    }
+    return npos;
+  }
+
+  size_type find_first_not_of(const value_type* s,
+                              size_type pos = 0) const {
+    return find_first_not_of(s, pos, traits_type::length(s));
+  }
+
+  size_type find_first_not_of(value_type c, size_type pos = 0) const {
+    return find_first_not_of(&c, pos, 1);
+  }
+
+  size_type find_last_not_of(const basic_fbstring& str,
+                             size_type pos = npos) const {
+    return find_last_not_of(str.data(), pos, str.length());
+  }
+
+  size_type find_last_not_of(const value_type* s, size_type pos,
+                             size_type n) const {
+    if (!this->empty()) {
+      pos = std::min(pos, size() - 1);
+      const_iterator i(begin() + pos);
+      for (;; --i) {
+        if (traits_type::find(s, n, *i) == 0) {
+          return i - begin();
+        }
+        if (i == begin()) break;
+      }
+    }
+    return npos;
+  }
+
+  size_type find_last_not_of(const value_type* s,
+                             size_type pos = npos) const {
+    return find_last_not_of(s, pos, traits_type::length(s));
+  }
+
+  size_type find_last_not_of (value_type c, size_type pos = npos) const {
+    return find_last_not_of(&c, pos, 1);
+  }
+
+  basic_fbstring substr(size_type pos = 0, size_type n = npos) const& {
+    enforce(pos <= size(), std::__throw_out_of_range, "");
+    return basic_fbstring(data() + pos, std::min(n, size() - pos));
+  }
+
+  basic_fbstring substr(size_type pos = 0, size_type n = npos) && {
+    enforce(pos <= size(), std::__throw_out_of_range, "");
+    erase(0, pos);
+    if (n < size()) resize(n);
+    return std::move(*this);
+  }
+
+  int compare(const basic_fbstring& str) const {
+    // FIX due to Goncalo N M de Carvalho July 18, 2005
+    return compare(0, size(), str);
+  }
+
+  int compare(size_type pos1, size_type n1,
+              const basic_fbstring& str) const {
+    return compare(pos1, n1, str.data(), str.size());
+  }
+
+  int compare(size_type pos1, size_type n1,
+              const value_type* s) const {
+    return compare(pos1, n1, s, traits_type::length(s));
+  }
+
+  int compare(size_type pos1, size_type n1,
+              const value_type* s, size_type n2) const {
+    enforce(pos1 <= size(), std::__throw_out_of_range, "");
+    procrustes(n1, size() - pos1);
+    // The line below fixed by Jean-Francois Bastien, 04-23-2007. Thanks!
+    const int r = traits_type::compare(pos1 + data(), s, std::min(n1, n2));
+    return r != 0 ? r : n1 > n2 ? 1 : n1 < n2 ? -1 : 0;
+  }
+
+  int compare(size_type pos1, size_type n1,
+              const basic_fbstring& str,
+              size_type pos2, size_type n2) const {
+    enforce(pos2 <= str.size(), std::__throw_out_of_range, "");
+    return compare(pos1, n1, str.data() + pos2,
+                   std::min(n2, str.size() - pos2));
+  }
+
+  // Code from Jean-Francois Bastien (03/26/2007)
+  int compare(const value_type* s) const {
+    // Could forward to compare(0, size(), s, traits_type::length(s))
+    // but that does two extra checks
+    const size_type n1(size()), n2(traits_type::length(s));
+    const int r = traits_type::compare(data(), s, std::min(n1, n2));
+    return r != 0 ? r : n1 > n2 ? 1 : n1 < n2 ? -1 : 0;
+  }
+
+private:
+  // Data
+  Storage store_;
+};
+
+// non-member functions
+// C++11 21.4.8.1/1
+template <typename E, class T, class A, class S>
+inline
+basic_fbstring<E, T, A, S> operator+(const basic_fbstring<E, T, A, S>& lhs,
+                                     const basic_fbstring<E, T, A, S>& rhs) {
+
+  basic_fbstring<E, T, A, S> result;
+  result.reserve(lhs.size() + rhs.size());
+  result.append(lhs).append(rhs);
+  return std::move(result);
+}
+
+// C++11 21.4.8.1/2
+template <typename E, class T, class A, class S>
+inline
+basic_fbstring<E, T, A, S> operator+(basic_fbstring<E, T, A, S>&& lhs,
+                                     const basic_fbstring<E, T, A, S>& rhs) {
+  return std::move(lhs.append(rhs));
+}
+
+// C++11 21.4.8.1/3
+template <typename E, class T, class A, class S>
+inline
+basic_fbstring<E, T, A, S> operator+(const basic_fbstring<E, T, A, S>& lhs,
+                                     basic_fbstring<E, T, A, S>&& rhs) {
+  if (rhs.capacity() >= lhs.size() + rhs.size()) {
+    // Good, at least we don't need to reallocate
+    return std::move(rhs.insert(0, lhs));
+  }
+  // Meh, no go. Forward to operator+(const&, const&).
+  auto const& rhsC = rhs;
+  return lhs + rhsC;
+}
+
+// C++11 21.4.8.1/4
+template <typename E, class T, class A, class S>
+inline
+basic_fbstring<E, T, A, S> operator+(basic_fbstring<E, T, A, S>&& lhs,
+                                     basic_fbstring<E, T, A, S>&& rhs) {
+  return std::move(lhs.append(rhs));
+}
+
+// C++11 21.4.8.1/5
+template <typename E, class T, class A, class S>
+inline
+basic_fbstring<E, T, A, S> operator+(
+  const E* lhs,
+  const basic_fbstring<E, T, A, S>& rhs) {
+  //
+  basic_fbstring<E, T, A, S> result;
+  const auto len = basic_fbstring<E, T, A, S>::traits_type::length(lhs);
+  result.reserve(len + rhs.size());
+  result.append(lhs, len).append(rhs);
+  return result;
+}
+
+// C++11 21.4.8.1/6
+template <typename E, class T, class A, class S>
+inline
+basic_fbstring<E, T, A, S> operator+(
+  const E* lhs,
+  basic_fbstring<E, T, A, S>&& rhs) {
+  //
+  const auto len = basic_fbstring<E, T, A, S>::traits_type::length(lhs);
+  if (rhs.capacity() >= len + rhs.size()) {
+    // Good, at least we don't need to reallocate
+    return std::move(rhs.insert(rhs.begin(), lhs, lhs + len));
+  }
+  // Meh, no go. Do it by hand since we have len already.
+  basic_fbstring<E, T, A, S> result;
+  result.reserve(len + rhs.size());
+  result.append(lhs, len).append(rhs);
+  return result;
+}
+
+// C++11 21.4.8.1/7
+template <typename E, class T, class A, class S>
+inline
+basic_fbstring<E, T, A, S> operator+(
+  E lhs,
+  const basic_fbstring<E, T, A, S>& rhs) {
+
+  basic_fbstring<E, T, A, S> result;
+  result.reserve(1 + rhs.size());
+  result.push_back(lhs);
+  result.append(rhs);
+  return result;
+}
+
+// C++11 21.4.8.1/8
+template <typename E, class T, class A, class S>
+inline
+basic_fbstring<E, T, A, S> operator+(
+  E lhs,
+  basic_fbstring<E, T, A, S>&& rhs) {
+  //
+  if (rhs.capacity() > rhs.size()) {
+    // Good, at least we don't need to reallocate
+    return std::move(rhs.insert(rhs.begin(), lhs));
+  }
+  // Meh, no go. Forward to operator+(E, const&).
+  auto const& rhsC = rhs;
+  return lhs + rhsC;
+}
+
+// C++11 21.4.8.1/9
+template <typename E, class T, class A, class S>
+inline
+basic_fbstring<E, T, A, S> operator+(
+  const basic_fbstring<E, T, A, S>& lhs,
+  const E* rhs) {
+
+  typedef typename basic_fbstring<E, T, A, S>::size_type size_type;
+  typedef typename basic_fbstring<E, T, A, S>::traits_type traits_type;
+
+  basic_fbstring<E, T, A, S> result;
+  const size_type len = traits_type::length(rhs);
+  result.reserve(lhs.size() + len);
+  result.append(lhs).append(rhs, len);
+  return result;
+}
+
+// C++11 21.4.8.1/10
+template <typename E, class T, class A, class S>
+inline
+basic_fbstring<E, T, A, S> operator+(
+  basic_fbstring<E, T, A, S>&& lhs,
+  const E* rhs) {
+  //
+  return std::move(lhs += rhs);
+}
+
+// C++11 21.4.8.1/11
+template <typename E, class T, class A, class S>
+inline
+basic_fbstring<E, T, A, S> operator+(
+  const basic_fbstring<E, T, A, S>& lhs,
+  E rhs) {
+
+  basic_fbstring<E, T, A, S> result;
+  result.reserve(lhs.size() + 1);
+  result.append(lhs);
+  result.push_back(rhs);
+  return result;
+}
+
+// C++11 21.4.8.1/12
+template <typename E, class T, class A, class S>
+inline
+basic_fbstring<E, T, A, S> operator+(
+  basic_fbstring<E, T, A, S>&& lhs,
+  E rhs) {
+  //
+  return std::move(lhs += rhs);
+}
+
+template <typename E, class T, class A, class S>
+inline
+bool operator==(const basic_fbstring<E, T, A, S>& lhs,
+                const basic_fbstring<E, T, A, S>& rhs) {
+  return lhs.size() == rhs.size() && lhs.compare(rhs) == 0; }
+
+template <typename E, class T, class A, class S>
+inline
+bool operator==(const typename basic_fbstring<E, T, A, S>::value_type* lhs,
+                const basic_fbstring<E, T, A, S>& rhs) {
+  return rhs == lhs; }
+
+template <typename E, class T, class A, class S>
+inline
+bool operator==(const basic_fbstring<E, T, A, S>& lhs,
+                const typename basic_fbstring<E, T, A, S>::value_type* rhs) {
+  return lhs.compare(rhs) == 0; }
+
+template <typename E, class T, class A, class S>
+inline
+bool operator!=(const basic_fbstring<E, T, A, S>& lhs,
+                const basic_fbstring<E, T, A, S>& rhs) {
+  return !(lhs == rhs); }
+
+template <typename E, class T, class A, class S>
+inline
+bool operator!=(const typename basic_fbstring<E, T, A, S>::value_type* lhs,
+                const basic_fbstring<E, T, A, S>& rhs) {
+  return !(lhs == rhs); }
+
+template <typename E, class T, class A, class S>
+inline
+bool operator!=(const basic_fbstring<E, T, A, S>& lhs,
+                const typename basic_fbstring<E, T, A, S>::value_type* rhs) {
+  return !(lhs == rhs); }
+
+template <typename E, class T, class A, class S>
+inline
+bool operator<(const basic_fbstring<E, T, A, S>& lhs,
+               const basic_fbstring<E, T, A, S>& rhs) {
+  return lhs.compare(rhs) < 0; }
+
+template <typename E, class T, class A, class S>
+inline
+bool operator<(const basic_fbstring<E, T, A, S>& lhs,
+               const typename basic_fbstring<E, T, A, S>::value_type* rhs) {
+  return lhs.compare(rhs) < 0; }
+
+template <typename E, class T, class A, class S>
+inline
+bool operator<(const typename basic_fbstring<E, T, A, S>::value_type* lhs,
+               const basic_fbstring<E, T, A, S>& rhs) {
+  return rhs.compare(lhs) > 0; }
+
+template <typename E, class T, class A, class S>
+inline
+bool operator>(const basic_fbstring<E, T, A, S>& lhs,
+               const basic_fbstring<E, T, A, S>& rhs) {
+  return rhs < lhs; }
+
+template <typename E, class T, class A, class S>
+inline
+bool operator>(const basic_fbstring<E, T, A, S>& lhs,
+               const typename basic_fbstring<E, T, A, S>::value_type* rhs) {
+  return rhs < lhs; }
+
+template <typename E, class T, class A, class S>
+inline
+bool operator>(const typename basic_fbstring<E, T, A, S>::value_type* lhs,
+               const basic_fbstring<E, T, A, S>& rhs) {
+  return rhs < lhs; }
+
+template <typename E, class T, class A, class S>
+inline
+bool operator<=(const basic_fbstring<E, T, A, S>& lhs,
+                const basic_fbstring<E, T, A, S>& rhs) {
+  return !(rhs < lhs); }
+
+template <typename E, class T, class A, class S>
+inline
+bool operator<=(const basic_fbstring<E, T, A, S>& lhs,
+                const typename basic_fbstring<E, T, A, S>::value_type* rhs) {
+  return !(rhs < lhs); }
+
+template <typename E, class T, class A, class S>
+inline
+bool operator<=(const typename basic_fbstring<E, T, A, S>::value_type* lhs,
+                const basic_fbstring<E, T, A, S>& rhs) {
+  return !(rhs < lhs); }
+
+template <typename E, class T, class A, class S>
+inline
+bool operator>=(const basic_fbstring<E, T, A, S>& lhs,
+                const basic_fbstring<E, T, A, S>& rhs) {
+  return !(lhs < rhs); }
+
+template <typename E, class T, class A, class S>
+inline
+bool operator>=(const basic_fbstring<E, T, A, S>& lhs,
+                const typename basic_fbstring<E, T, A, S>::value_type* rhs) {
+  return !(lhs < rhs); }
+
+template <typename E, class T, class A, class S>
+inline
+bool operator>=(const typename basic_fbstring<E, T, A, S>::value_type* lhs,
+                const basic_fbstring<E, T, A, S>& rhs) {
+ return !(lhs < rhs);
+}
+
+// C++11 21.4.8.8
+template <typename E, class T, class A, class S>
+void swap(basic_fbstring<E, T, A, S>& lhs, basic_fbstring<E, T, A, S>& rhs) {
+  lhs.swap(rhs);
+}
+
+// TODO: make this faster.
+template <typename E, class T, class A, class S>
+inline
+std::basic_istream<
+  typename basic_fbstring<E, T, A, S>::value_type,
+  typename basic_fbstring<E, T, A, S>::traits_type>&
+  operator>>(
+    std::basic_istream<typename basic_fbstring<E, T, A, S>::value_type,
+    typename basic_fbstring<E, T, A, S>::traits_type>& is,
+    basic_fbstring<E, T, A, S>& str) {
+  typename std::basic_istream<E, T>::sentry sentry(is);
+  typedef std::basic_istream<typename basic_fbstring<E, T, A, S>::value_type,
+                             typename basic_fbstring<E, T, A, S>::traits_type>
+                        __istream_type;
+  typedef typename __istream_type::ios_base __ios_base;
+  size_t extracted = 0;
+  auto err = __ios_base::goodbit;
+  if (sentry) {
+    auto n = is.width();
+    if (n <= 0) {
+      n = str.max_size();
+    }
+    str.erase();
+    for (auto got = is.rdbuf()->sgetc(); extracted != size_t(n); ++extracted) {
+      if (got == T::eof()) {
+        err |= __ios_base::eofbit;
+        is.width(0);
+        break;
+      }
+      if (isspace(got)) break;
+      str.push_back(got);
+      got = is.rdbuf()->snextc();
+    }
+  }
+  if (!extracted) {
+    err |= __ios_base::failbit;
+  }
+  if (err) {
+    is.setstate(err);
+  }
+  return is;
+}
+
+template <typename E, class T, class A, class S>
+inline
+std::basic_ostream<typename basic_fbstring<E, T, A, S>::value_type,
+                   typename basic_fbstring<E, T, A, S>::traits_type>&
+operator<<(
+  std::basic_ostream<typename basic_fbstring<E, T, A, S>::value_type,
+  typename basic_fbstring<E, T, A, S>::traits_type>& os,
+    const basic_fbstring<E, T, A, S>& str) {
+#if _LIBCPP_VERSION
+  typename std::basic_ostream<
+    typename basic_fbstring<E, T, A, S>::value_type,
+    typename basic_fbstring<E, T, A, S>::traits_type>::sentry __s(os);
+  if (__s) {
+    typedef std::ostreambuf_iterator<
+      typename basic_fbstring<E, T, A, S>::value_type,
+      typename basic_fbstring<E, T, A, S>::traits_type> _Ip;
+    size_t __len = str.size();
+    bool __left =
+      (os.flags() & std::ios_base::adjustfield) == std::ios_base::left;
+    if (__pad_and_output(_Ip(os),
+                         str.data(),
+                         __left ? str.data() + __len : str.data(),
+                         str.data() + __len,
+                         os,
+                         os.fill()).failed()) {
+      os.setstate(std::ios_base::badbit | std::ios_base::failbit);
+    }
+  }
+#elif defined(_MSC_VER)
+  // MSVC doesn't define __ostream_insert
+  os.write(str.data(), str.size());
+#else
+  std::__ostream_insert(os, str.data(), str.size());
+#endif
+  return os;
+}
+
+#ifndef _LIBSTDCXX_FBSTRING
+
+template <typename E, class T, class A, class S>
+inline
+std::basic_istream<typename basic_fbstring<E, T, A, S>::value_type,
+                   typename basic_fbstring<E, T, A, S>::traits_type>&
+getline(
+  std::basic_istream<typename basic_fbstring<E, T, A, S>::value_type,
+  typename basic_fbstring<E, T, A, S>::traits_type>& is,
+    basic_fbstring<E, T, A, S>& str,
+  typename basic_fbstring<E, T, A, S>::value_type delim) {
+  // Use the nonstandard getdelim()
+  char * buf = nullptr;
+  size_t size = 0;
+  for (;;) {
+    // This looks quadratic but it really depends on realloc
+    auto const newSize = size + 128;
+    buf = static_cast<char*>(checkedRealloc(buf, newSize));
+    is.getline(buf + size, newSize - size, delim);
+    if (is.bad() || is.eof() || !is.fail()) {
+      // done by either failure, end of file, or normal read
+      size += std::strlen(buf + size);
+      break;
+    }
+    // Here we have failed due to too short a buffer
+    // Minus one to discount the terminating '\0'
+    size = newSize - 1;
+    assert(buf[size] == 0);
+    // Clear the error so we can continue reading
+    is.clear();
+  }
+  basic_fbstring<E, T, A, S> result(buf, size, size + 1,
+                                    AcquireMallocatedString());
+  result.swap(str);
+  return is;
+}
+
+template <typename E, class T, class A, class S>
+inline
+std::basic_istream<typename basic_fbstring<E, T, A, S>::value_type,
+                   typename basic_fbstring<E, T, A, S>::traits_type>&
+getline(
+  std::basic_istream<typename basic_fbstring<E, T, A, S>::value_type,
+  typename basic_fbstring<E, T, A, S>::traits_type>& is,
+  basic_fbstring<E, T, A, S>& str) {
+  // Just forward to the version with a delimiter
+  return getline(is, str, '\n');
+}
+
+#endif
+
+template <typename E1, class T, class A, class S>
+const typename basic_fbstring<E1, T, A, S>::size_type
+basic_fbstring<E1, T, A, S>::npos =
+              static_cast<typename basic_fbstring<E1, T, A, S>::size_type>(-1);
+
+#ifndef _LIBSTDCXX_FBSTRING
+// basic_string compatibility routines
+
+template <typename E, class T, class A, class S>
+inline
+bool operator==(const basic_fbstring<E, T, A, S>& lhs,
+                const std::string& rhs) {
+  return lhs.compare(0, lhs.size(), rhs.data(), rhs.size()) == 0;
+}
+
+template <typename E, class T, class A, class S>
+inline
+bool operator==(const std::string& lhs,
+                const basic_fbstring<E, T, A, S>& rhs) {
+  return rhs == lhs;
+}
+
+template <typename E, class T, class A, class S>
+inline
+bool operator!=(const basic_fbstring<E, T, A, S>& lhs,
+                const std::string& rhs) {
+  return !(lhs == rhs);
+}
+
+template <typename E, class T, class A, class S>
+inline
+bool operator!=(const std::string& lhs,
+                const basic_fbstring<E, T, A, S>& rhs) {
+  return !(lhs == rhs);
+}
+
+#if !defined(_LIBSTDCXX_FBSTRING)
+typedef basic_fbstring<char> fbstring;
+#endif
+
+// fbstring is relocatable
+template <class T, class R, class A, class S>
+FOLLY_ASSUME_RELOCATABLE(basic_fbstring<T, R, A, S>);
+
+#else
+_GLIBCXX_END_NAMESPACE_VERSION
+#endif
+
+} // namespace folly
+
+#ifndef _LIBSTDCXX_FBSTRING
+
+// Hash functions to make fbstring usable with e.g. hash_map
+//
+// Handle interaction with different C++ standard libraries, which
+// expect these types to be in different namespaces.
+
+#define FOLLY_FBSTRING_HASH1(T) \
+  template <> \
+  struct hash< ::folly::basic_fbstring<T> > { \
+    size_t operator()(const ::folly::fbstring& s) const { \
+      return ::folly::hash::fnv32_buf(s.data(), s.size()); \
+    } \
+  };
+
+// The C++11 standard says that these four are defined
+#define FOLLY_FBSTRING_HASH \
+  FOLLY_FBSTRING_HASH1(char) \
+  FOLLY_FBSTRING_HASH1(char16_t) \
+  FOLLY_FBSTRING_HASH1(char32_t) \
+  FOLLY_FBSTRING_HASH1(wchar_t)
+
+namespace std {
+
+FOLLY_FBSTRING_HASH
+
+}  // namespace std
+
+#if FOLLY_HAVE_DEPRECATED_ASSOC
+#if defined(_GLIBCXX_SYMVER) && !defined(__BIONIC__)
+namespace __gnu_cxx {
+
+FOLLY_FBSTRING_HASH
+
+}  // namespace __gnu_cxx
+#endif // _GLIBCXX_SYMVER && !__BIONIC__
+#endif // FOLLY_HAVE_DEPRECATED_ASSOC
+
+#undef FOLLY_FBSTRING_HASH
+#undef FOLLY_FBSTRING_HASH1
+
+#endif // _LIBSTDCXX_FBSTRING
+
+#pragma GCC diagnostic pop
+
+#undef FBSTRING_DISABLE_ADDRESS_SANITIZER
+#undef throw
+#undef FBSTRING_LIKELY
+#undef FBSTRING_UNLIKELY
+
+#endif // FOLLY_BASE_FBSTRING_H_
diff --git a/faux-folly/folly/FBVector.h b/faux-folly/folly/FBVector.h
new file mode 100644
index 0000000..22d7603
--- /dev/null
+++ b/faux-folly/folly/FBVector.h
@@ -0,0 +1,1646 @@
+/*
+ * Copyright 2015 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Nicholas Ormrod      (njormrod)
+ * Andrei Alexandrescu  (aalexandre)
+ *
+ * FBVector is Facebook's drop-in implementation of std::vector. It has special
+ * optimizations for use with relocatable types and jemalloc.
+ */
+
+#ifndef FOLLY_FBVECTOR_H
+#define FOLLY_FBVECTOR_H
+
+//=============================================================================
+// headers
+
+#include <algorithm>
+#include <cassert>
+#include <iterator>
+#include <memory>
+#include <stdexcept>
+#include <type_traits>
+#include <utility>
+
+#include <folly/FormatTraits.h>
+#include <folly/Likely.h>
+#include <folly/Malloc.h>
+#include <folly/Traits.h>
+
+#include <boost/operators.hpp>
+
+//=============================================================================
+// forward declaration
+
+namespace folly {
+  template <class T, class Allocator = std::allocator<T>>
+  class fbvector;
+}
+
+//=============================================================================
+// unrolling
+
+#define FOLLY_FBV_UNROLL_PTR(first, last, OP) do {  \
+  for (; (last) - (first) >= 4; (first) += 4) {     \
+    OP(((first) + 0));                              \
+    OP(((first) + 1));                              \
+    OP(((first) + 2));                              \
+    OP(((first) + 3));                              \
+  }                                                 \
+  for (; (first) != (last); ++(first)) OP((first)); \
+} while(0);
+
+//=============================================================================
+///////////////////////////////////////////////////////////////////////////////
+//                                                                           //
+//                              fbvector class                               //
+//                                                                           //
+///////////////////////////////////////////////////////////////////////////////
+
+namespace folly {
+
+template <class T, class Allocator>
+class fbvector : private boost::totally_ordered<fbvector<T, Allocator>> {
+
+  //===========================================================================
+  //---------------------------------------------------------------------------
+  // implementation
+private:
+
+  typedef std::allocator_traits<Allocator> A;
+
+  struct Impl : public Allocator {
+    // typedefs
+    typedef typename A::pointer pointer;
+    typedef typename A::size_type size_type;
+
+    // data
+    pointer b_, e_, z_;
+
+    // constructors
+    Impl() : Allocator(), b_(nullptr), e_(nullptr), z_(nullptr) {}
+    /* implicit */ Impl(const Allocator& a)
+      : Allocator(a), b_(nullptr), e_(nullptr), z_(nullptr) {}
+    /* implicit */ Impl(Allocator&& a)
+      : Allocator(std::move(a)), b_(nullptr), e_(nullptr), z_(nullptr) {}
+
+    /* implicit */ Impl(size_type n, const Allocator& a = Allocator())
+      : Allocator(a)
+      { init(n); }
+
+    Impl(Impl&& other) noexcept
+      : Allocator(std::move(other)),
+        b_(other.b_), e_(other.e_), z_(other.z_)
+      { other.b_ = other.e_ = other.z_ = nullptr; }
+
+    // destructor
+    ~Impl() {
+      destroy();
+    }
+
+    // allocation
+    // note that 'allocate' and 'deallocate' are inherited from Allocator
+    T* D_allocate(size_type n) {
+      if (usingStdAllocator::value) {
+        return static_cast<T*>(malloc(n * sizeof(T)));
+      } else {
+        return std::allocator_traits<Allocator>::allocate(*this, n);
+      }
+    }
+
+    void D_deallocate(T* p, size_type n) noexcept {
+      if (usingStdAllocator::value) {
+        free(p);
+      } else {
+        std::allocator_traits<Allocator>::deallocate(*this, p, n);
+      }
+    }
+
+    // helpers
+    void swapData(Impl& other) {
+      std::swap(b_, other.b_);
+      std::swap(e_, other.e_);
+      std::swap(z_, other.z_);
+    }
+
+    // data ops
+    inline void destroy() noexcept {
+      if (b_) {
+        // THIS DISPATCH CODE IS DUPLICATED IN fbvector::D_destroy_range_a.
+        // It has been inlined here for speed. It calls the static fbvector
+        //  methods to perform the actual destruction.
+        if (usingStdAllocator::value) {
+          S_destroy_range(b_, e_);
+        } else {
+          S_destroy_range_a(*this, b_, e_);
+        }
+
+        D_deallocate(b_, z_ - b_);
+      }
+    }
+
+    void init(size_type n) {
+      if (UNLIKELY(n == 0)) {
+        b_ = e_ = z_ = nullptr;
+      } else {
+        size_type sz = folly::goodMallocSize(n * sizeof(T)) / sizeof(T);
+        b_ = D_allocate(sz);
+        e_ = b_;
+        z_ = b_ + sz;
+      }
+    }
+
+    void
+    set(pointer newB, size_type newSize, size_type newCap) {
+      z_ = newB + newCap;
+      e_ = newB + newSize;
+      b_ = newB;
+    }
+
+    void reset(size_type newCap) {
+      destroy();
+      try {
+        init(newCap);
+      } catch (...) {
+        init(0);
+        throw;
+      }
+    }
+    void reset() { // same as reset(0)
+      destroy();
+      b_ = e_ = z_ = nullptr;
+    }
+  } impl_;
+
+  static void swap(Impl& a, Impl& b) {
+    using std::swap;
+    if (!usingStdAllocator::value) swap<Allocator>(a, b);
+    a.swapData(b);
+  }
+
+  //===========================================================================
+  //---------------------------------------------------------------------------
+  // types and constants
+public:
+
+  typedef T                                           value_type;
+  typedef value_type&                                 reference;
+  typedef const value_type&                           const_reference;
+  typedef T*                                          iterator;
+  typedef const T*                                    const_iterator;
+  typedef size_t                                      size_type;
+  typedef typename std::make_signed<size_type>::type  difference_type;
+  typedef Allocator                                   allocator_type;
+  typedef typename A::pointer                         pointer;
+  typedef typename A::const_pointer                   const_pointer;
+  typedef std::reverse_iterator<iterator>             reverse_iterator;
+  typedef std::reverse_iterator<const_iterator>       const_reverse_iterator;
+
+private:
+
+  typedef std::integral_constant<bool,
+      boost::has_trivial_copy_constructor<T>::value &&
+      sizeof(T) <= 16 // don't force large structures to be passed by value
+    > should_pass_by_value;
+  typedef typename std::conditional<
+      should_pass_by_value::value, T, const T&>::type VT;
+  typedef typename std::conditional<
+      should_pass_by_value::value, T, T&&>::type MT;
+
+  typedef std::integral_constant<bool,
+      std::is_same<Allocator, std::allocator<T>>::value> usingStdAllocator;
+  typedef std::integral_constant<bool,
+      usingStdAllocator::value ||
+      A::propagate_on_container_move_assignment::value> moveIsSwap;
+
+  //===========================================================================
+  //---------------------------------------------------------------------------
+  // allocator helpers
+private:
+
+  //---------------------------------------------------------------------------
+  // allocate
+
+  T* M_allocate(size_type n) {
+    return impl_.D_allocate(n);
+  }
+
+  //---------------------------------------------------------------------------
+  // deallocate
+
+  void M_deallocate(T* p, size_type n) noexcept {
+    impl_.D_deallocate(p, n);
+  }
+
+  //---------------------------------------------------------------------------
+  // construct
+
+  // GCC is very sensitive to the exact way that construct is called. For
+  //  that reason there are several different specializations of construct.
+
+  template <typename U, typename... Args>
+  void M_construct(U* p, Args&&... args) {
+    if (usingStdAllocator::value) {
+      new (p) U(std::forward<Args>(args)...);
+    } else {
+      std::allocator_traits<Allocator>::construct(
+        impl_, p, std::forward<Args>(args)...);
+    }
+  }
+
+  template <typename U, typename... Args>
+  static void S_construct(U* p, Args&&... args) {
+    new (p) U(std::forward<Args>(args)...);
+  }
+
+  template <typename U, typename... Args>
+  static void S_construct_a(Allocator& a, U* p, Args&&... args) {
+    std::allocator_traits<Allocator>::construct(
+      a, p, std::forward<Args>(args)...);
+  }
+
+  // scalar optimization
+  // TODO we can expand this optimization to: default copyable and assignable
+  template <typename U, typename Enable = typename
+    std::enable_if<std::is_scalar<U>::value>::type>
+  void M_construct(U* p, U arg) {
+    if (usingStdAllocator::value) {
+      *p = arg;
+    } else {
+      std::allocator_traits<Allocator>::construct(impl_, p, arg);
+    }
+  }
+
+  template <typename U, typename Enable = typename
+    std::enable_if<std::is_scalar<U>::value>::type>
+  static void S_construct(U* p, U arg) {
+    *p = arg;
+  }
+
+  template <typename U, typename Enable = typename
+    std::enable_if<std::is_scalar<U>::value>::type>
+  static void S_construct_a(Allocator& a, U* p, U arg) {
+    std::allocator_traits<Allocator>::construct(a, p, arg);
+  }
+
+  // const& optimization
+  template <typename U, typename Enable = typename
+    std::enable_if<!std::is_scalar<U>::value>::type>
+  void M_construct(U* p, const U& value) {
+    if (usingStdAllocator::value) {
+      new (p) U(value);
+    } else {
+      std::allocator_traits<Allocator>::construct(impl_, p, value);
+    }
+  }
+
+  template <typename U, typename Enable = typename
+    std::enable_if<!std::is_scalar<U>::value>::type>
+  static void S_construct(U* p, const U& value) {
+    new (p) U(value);
+  }
+
+  template <typename U, typename Enable = typename
+    std::enable_if<!std::is_scalar<U>::value>::type>
+  static void S_construct_a(Allocator& a, U* p, const U& value) {
+    std::allocator_traits<Allocator>::construct(a, p, value);
+  }
+
+  //---------------------------------------------------------------------------
+  // destroy
+
+  void M_destroy(T* p) noexcept {
+    if (usingStdAllocator::value) {
+      if (!boost::has_trivial_destructor<T>::value) p->~T();
+    } else {
+      std::allocator_traits<Allocator>::destroy(impl_, p);
+    }
+  }
+
+  //===========================================================================
+  //---------------------------------------------------------------------------
+  // algorithmic helpers
+private:
+
+  //---------------------------------------------------------------------------
+  // destroy_range
+
+  // wrappers
+  void M_destroy_range_e(T* pos) noexcept {
+    D_destroy_range_a(pos, impl_.e_);
+    impl_.e_ = pos;
+  }
+
+  // dispatch
+  // THIS DISPATCH CODE IS DUPLICATED IN IMPL. SEE IMPL FOR DETAILS.
+  void D_destroy_range_a(T* first, T* last) noexcept {
+    if (usingStdAllocator::value) {
+      S_destroy_range(first, last);
+    } else {
+      S_destroy_range_a(impl_, first, last);
+    }
+  }
+
+  // allocator
+  static void S_destroy_range_a(Allocator& a, T* first, T* last) noexcept {
+    for (; first != last; ++first)
+      std::allocator_traits<Allocator>::destroy(a, first);
+  }
+
+  // optimized
+  static void S_destroy_range(T* first, T* last) noexcept {
+    if (!boost::has_trivial_destructor<T>::value) {
+      // EXPERIMENTAL DATA on fbvector<vector<int>> (where each vector<int> has
+      //  size 0).
+      // The unrolled version seems to work faster for small to medium sized
+      //  fbvectors. It gets a 10% speedup on fbvectors of size 1024, 64, and
+      //  16.
+      // The simple loop version seems to work faster for large fbvectors. The
+      //  unrolled version is about 6% slower on fbvectors on size 16384.
+      // The two methods seem tied for very large fbvectors. The unrolled
+      //  version is about 0.5% slower on size 262144.
+
+      // for (; first != last; ++first) first->~T();
+      #define FOLLY_FBV_OP(p) (p)->~T()
+      FOLLY_FBV_UNROLL_PTR(first, last, FOLLY_FBV_OP)
+      #undef FOLLY_FBV_OP
+    }
+  }
+
+  //---------------------------------------------------------------------------
+  // uninitialized_fill_n
+
+  // wrappers
+  void M_uninitialized_fill_n_e(size_type sz) {
+    D_uninitialized_fill_n_a(impl_.e_, sz);
+    impl_.e_ += sz;
+  }
+
+  void M_uninitialized_fill_n_e(size_type sz, VT value) {
+    D_uninitialized_fill_n_a(impl_.e_, sz, value);
+    impl_.e_ += sz;
+  }
+
+  // dispatch
+  void D_uninitialized_fill_n_a(T* dest, size_type sz) {
+    if (usingStdAllocator::value) {
+      S_uninitialized_fill_n(dest, sz);
+    } else {
+      S_uninitialized_fill_n_a(impl_, dest, sz);
+    }
+  }
+
+  void D_uninitialized_fill_n_a(T* dest, size_type sz, VT value) {
+    if (usingStdAllocator::value) {
+      S_uninitialized_fill_n(dest, sz, value);
+    } else {
+      S_uninitialized_fill_n_a(impl_, dest, sz, value);
+    }
+  }
+
+  // allocator
+  template <typename... Args>
+  static void S_uninitialized_fill_n_a(Allocator& a, T* dest,
+                                       size_type sz, Args&&... args) {
+    auto b = dest;
+    auto e = dest + sz;
+    try {
+      for (; b != e; ++b)
+        std::allocator_traits<Allocator>::construct(a, b,
+          std::forward<Args>(args)...);
+    } catch (...) {
+      S_destroy_range_a(a, dest, b);
+      throw;
+    }
+  }
+
+  // optimized
+  static void S_uninitialized_fill_n(T* dest, size_type n) {
+    if (folly::IsZeroInitializable<T>::value) {
+      std::memset(dest, 0, sizeof(T) * n);
+    } else {
+      auto b = dest;
+      auto e = dest + n;
+      try {
+        for (; b != e; ++b) S_construct(b);
+      } catch (...) {
+        --b;
+        for (; b >= dest; --b) b->~T();
+        throw;
+      }
+    }
+  }
+
+  static void S_uninitialized_fill_n(T* dest, size_type n, const T& value) {
+    auto b = dest;
+    auto e = dest + n;
+    try {
+      for (; b != e; ++b) S_construct(b, value);
+    } catch (...) {
+      S_destroy_range(dest, b);
+      throw;
+    }
+  }
+
+  //---------------------------------------------------------------------------
+  // uninitialized_copy
+
+  // it is possible to add an optimization for the case where
+  // It = move(T*) and IsRelocatable<T> and Is0Initiailizable<T>
+
+  // wrappers
+  template <typename It>
+  void M_uninitialized_copy_e(It first, It last) {
+    D_uninitialized_copy_a(impl_.e_, first, last);
+    impl_.e_ += std::distance(first, last);
+  }
+
+  template <typename It>
+  void M_uninitialized_move_e(It first, It last) {
+    D_uninitialized_move_a(impl_.e_, first, last);
+    impl_.e_ += std::distance(first, last);
+  }
+
+  // dispatch
+  template <typename It>
+  void D_uninitialized_copy_a(T* dest, It first, It last) {
+    if (usingStdAllocator::value) {
+      if (folly::IsTriviallyCopyable<T>::value) {
+        S_uninitialized_copy_bits(dest, first, last);
+      } else {
+        S_uninitialized_copy(dest, first, last);
+      }
+    } else {
+      S_uninitialized_copy_a(impl_, dest, first, last);
+    }
+  }
+
+  template <typename It>
+  void D_uninitialized_move_a(T* dest, It first, It last) {
+    D_uninitialized_copy_a(dest,
+      std::make_move_iterator(first), std::make_move_iterator(last));
+  }
+
+  // allocator
+  template <typename It>
+  static void
+  S_uninitialized_copy_a(Allocator& a, T* dest, It first, It last) {
+    auto b = dest;
+    try {
+      for (; first != last; ++first, ++b)
+        std::allocator_traits<Allocator>::construct(a, b, *first);
+    } catch (...) {
+      S_destroy_range_a(a, dest, b);
+      throw;
+    }
+  }
+
+  // optimized
+  template <typename It>
+  static void S_uninitialized_copy(T* dest, It first, It last) {
+    auto b = dest;
+    try {
+      for (; first != last; ++first, ++b)
+        S_construct(b, *first);
+    } catch (...) {
+      S_destroy_range(dest, b);
+      throw;
+    }
+  }
+
+  static void
+  S_uninitialized_copy_bits(T* dest, const T* first, const T* last) {
+    std::memcpy((void*)dest, (void*)first, (last - first) * sizeof(T));
+  }
+
+  static void
+  S_uninitialized_copy_bits(T* dest, std::move_iterator<T*> first,
+                       std::move_iterator<T*> last) {
+    T* bFirst = first.base();
+    T* bLast = last.base();
+    std::memcpy((void*)dest, (void*)bFirst, (bLast - bFirst) * sizeof(T));
+  }
+
+  template <typename It>
+  static void
+  S_uninitialized_copy_bits(T* dest, It first, It last) {
+    S_uninitialized_copy(dest, first, last);
+  }
+
+  //---------------------------------------------------------------------------
+  // copy_n
+
+  // This function is "unsafe": it assumes that the iterator can be advanced at
+  //  least n times. However, as a private function, that unsafety is managed
+  //  wholly by fbvector itself.
+
+  template <typename It>
+  static It S_copy_n(T* dest, It first, size_type n) {
+    auto e = dest + n;
+    for (; dest != e; ++dest, ++first) *dest = *first;
+    return first;
+  }
+
+  static const T* S_copy_n(T* dest, const T* first, size_type n) {
+    if (folly::IsTriviallyCopyable<T>::value) {
+      std::memcpy((void*)dest, (void*)first, n * sizeof(T));
+      return first + n;
+    } else {
+      return S_copy_n<const T*>(dest, first, n);
+    }
+  }
+
+  static std::move_iterator<T*>
+  S_copy_n(T* dest, std::move_iterator<T*> mIt, size_type n) {
+    if (folly::IsTriviallyCopyable<T>::value) {
+      T* first = mIt.base();
+      std::memcpy((void*)dest, (void*)first, n * sizeof(T));
+      return std::make_move_iterator(first + n);
+    } else {
+      return S_copy_n<std::move_iterator<T*>>(dest, mIt, n);
+    }
+  }
+
+  //===========================================================================
+  //---------------------------------------------------------------------------
+  // relocation helpers
+private:
+
+  // Relocation is divided into three parts:
+  //
+  //  1: relocate_move
+  //     Performs the actual movement of data from point a to point b.
+  //
+  //  2: relocate_done
+  //     Destroys the old data.
+  //
+  //  3: relocate_undo
+  //     Destoys the new data and restores the old data.
+  //
+  // The three steps are used because there may be an exception after part 1
+  //  has completed. If that is the case, then relocate_undo can nullify the
+  //  initial move. Otherwise, relocate_done performs the last bit of tidying
+  //  up.
+  //
+  // The relocation trio may use either memcpy, move, or copy. It is decided
+  //  by the following case statement:
+  //
+  //  IsRelocatable && usingStdAllocator    -> memcpy
+  //  has_nothrow_move && usingStdAllocator -> move
+  //  cannot copy                           -> move
+  //  default                               -> copy
+  //
+  // If the class is non-copyable then it must be movable. However, if the
+  //  move constructor is not noexcept, i.e. an error could be thrown, then
+  //  relocate_undo will be unable to restore the old data, for fear of a
+  //  second exception being thrown. This is a known and unavoidable
+  //  deficiency. In lieu of a strong exception guarantee, relocate_undo does
+  //  the next best thing: it provides a weak exception guarantee by
+  //  destorying the new data, but leaving the old data in an indeterminate
+  //  state. Note that that indeterminate state will be valid, since the
+  //  old data has not been destroyed; it has merely been the source of a
+  //  move, which is required to leave the source in a valid state.
+
+  // wrappers
+  void M_relocate(T* newB) {
+    relocate_move(newB, impl_.b_, impl_.e_);
+    relocate_done(newB, impl_.b_, impl_.e_);
+  }
+
+  // dispatch type trait
+  typedef std::integral_constant<bool,
+      folly::IsRelocatable<T>::value && usingStdAllocator::value
+    > relocate_use_memcpy;
+
+  typedef std::integral_constant<bool,
+      (std::is_nothrow_move_constructible<T>::value
+       && usingStdAllocator::value)
+      || !std::is_copy_constructible<T>::value
+    > relocate_use_move;
+
+  // move
+  void relocate_move(T* dest, T* first, T* last) {
+    relocate_move_or_memcpy(dest, first, last, relocate_use_memcpy());
+  }
+
+  void relocate_move_or_memcpy(T* dest, T* first, T* last, std::true_type) {
+    std::memcpy((void*)dest, (void*)first, (last - first) * sizeof(T));
+  }
+
+  void relocate_move_or_memcpy(T* dest, T* first, T* last, std::false_type) {
+    relocate_move_or_copy(dest, first, last, relocate_use_move());
+  }
+
+  void relocate_move_or_copy(T* dest, T* first, T* last, std::true_type) {
+    D_uninitialized_move_a(dest, first, last);
+  }
+
+  void relocate_move_or_copy(T* dest, T* first, T* last, std::false_type) {
+    D_uninitialized_copy_a(dest, first, last);
+  }
+
+  // done
+  void relocate_done(T* /*dest*/, T* first, T* last) noexcept {
+    if (folly::IsRelocatable<T>::value && usingStdAllocator::value) {
+      // used memcpy; data has been relocated, do not call destructor
+    } else {
+      D_destroy_range_a(first, last);
+    }
+  }
+
+  // undo
+  void relocate_undo(T* dest, T* first, T* last) noexcept {
+    if (folly::IsRelocatable<T>::value && usingStdAllocator::value) {
+      // used memcpy, old data is still valid, nothing to do
+    } else if (std::is_nothrow_move_constructible<T>::value &&
+               usingStdAllocator::value) {
+      // noexcept move everything back, aka relocate_move
+      relocate_move(first, dest, dest + (last - first));
+    } else if (!std::is_copy_constructible<T>::value) {
+      // weak guarantee
+      D_destroy_range_a(dest, dest + (last - first));
+    } else {
+      // used copy, old data is still valid
+      D_destroy_range_a(dest, dest + (last - first));
+    }
+  }
+
+
+  //===========================================================================
+  //---------------------------------------------------------------------------
+  // construct/copy/destroy
+public:
+
+  fbvector() = default;
+
+  explicit fbvector(const Allocator& a) : impl_(a) {}
+
+  explicit fbvector(size_type n, const Allocator& a = Allocator())
+    : impl_(n, a)
+    { M_uninitialized_fill_n_e(n); }
+
+  fbvector(size_type n, VT value, const Allocator& a = Allocator())
+    : impl_(n, a)
+    { M_uninitialized_fill_n_e(n, value); }
+
+  template <class It, class Category = typename
+            std::iterator_traits<It>::iterator_category>
+  fbvector(It first, It last, const Allocator& a = Allocator())
+    : fbvector(first, last, a, Category()) {}
+
+  fbvector(const fbvector& other)
+    : impl_(other.size(), A::select_on_container_copy_construction(other.impl_))
+    { M_uninitialized_copy_e(other.begin(), other.end()); }
+
+  fbvector(fbvector&& other) noexcept : impl_(std::move(other.impl_)) {}
+
+  fbvector(const fbvector& other, const Allocator& a)
+    : fbvector(other.begin(), other.end(), a) {}
+
+  /* may throw */ fbvector(fbvector&& other, const Allocator& a) : impl_(a) {
+    if (impl_ == other.impl_) {
+      impl_.swapData(other.impl_);
+    } else {
+      impl_.init(other.size());
+      M_uninitialized_move_e(other.begin(), other.end());
+    }
+  }
+
+  fbvector(std::initializer_list<T> il, const Allocator& a = Allocator())
+    : fbvector(il.begin(), il.end(), a) {}
+
+  ~fbvector() = default; // the cleanup occurs in impl_
+
+  fbvector& operator=(const fbvector& other) {
+    if (UNLIKELY(this == &other)) return *this;
+
+    if (!usingStdAllocator::value &&
+        A::propagate_on_container_copy_assignment::value) {
+      if (impl_ != other.impl_) {
+        // can't use other's different allocator to clean up self
+        impl_.reset();
+      }
+      (Allocator&)impl_ = (Allocator&)other.impl_;
+    }
+
+    assign(other.begin(), other.end());
+    return *this;
+  }
+
+  fbvector& operator=(fbvector&& other) {
+    if (UNLIKELY(this == &other)) return *this;
+    moveFrom(std::move(other), moveIsSwap());
+    return *this;
+  }
+
+  fbvector& operator=(std::initializer_list<T> il) {
+    assign(il.begin(), il.end());
+    return *this;
+  }
+
+  template <class It, class Category = typename
+            std::iterator_traits<It>::iterator_category>
+  void assign(It first, It last) {
+    assign(first, last, Category());
+  }
+
+  void assign(size_type n, VT value) {
+    if (n > capacity()) {
+      // Not enough space. Do not reserve in place, since we will
+      // discard the old values anyways.
+      if (dataIsInternalAndNotVT(value)) {
+        T copy(std::move(value));
+        impl_.reset(n);
+        M_uninitialized_fill_n_e(n, copy);
+      } else {
+        impl_.reset(n);
+        M_uninitialized_fill_n_e(n, value);
+      }
+    } else if (n <= size()) {
+      auto newE = impl_.b_ + n;
+      std::fill(impl_.b_, newE, value);
+      M_destroy_range_e(newE);
+    } else {
+      std::fill(impl_.b_, impl_.e_, value);
+      M_uninitialized_fill_n_e(n - size(), value);
+    }
+  }
+
+  void assign(std::initializer_list<T> il) {
+    assign(il.begin(), il.end());
+  }
+
+  allocator_type get_allocator() const noexcept {
+    return impl_;
+  }
+
+private:
+
+  // contract dispatch for iterator types fbvector(It first, It last)
+  template <class ForwardIterator>
+  fbvector(ForwardIterator first, ForwardIterator last,
+           const Allocator& a, std::forward_iterator_tag)
+    : impl_(std::distance(first, last), a)
+    { M_uninitialized_copy_e(first, last); }
+
+  template <class InputIterator>
+  fbvector(InputIterator first, InputIterator last,
+           const Allocator& a, std::input_iterator_tag)
+    : impl_(a)
+    { for (; first != last; ++first) emplace_back(*first); }
+
+  // contract dispatch for allocator movement in operator=(fbvector&&)
+  void
+  moveFrom(fbvector&& other, std::true_type) {
+    swap(impl_, other.impl_);
+  }
+  void moveFrom(fbvector&& other, std::false_type) {
+    if (impl_ == other.impl_) {
+      impl_.swapData(other.impl_);
+    } else {
+      impl_.reset(other.size());
+      M_uninitialized_move_e(other.begin(), other.end());
+    }
+  }
+
+  // contract dispatch for iterator types in assign(It first, It last)
+  template <class ForwardIterator>
+  void assign(ForwardIterator first, ForwardIterator last,
+              std::forward_iterator_tag) {
+    const size_t newSize = std::distance(first, last);
+    if (newSize > capacity()) {
+      impl_.reset(newSize);
+      M_uninitialized_copy_e(first, last);
+    } else if (newSize <= size()) {
+      auto newEnd = std::copy(first, last, impl_.b_);
+      M_destroy_range_e(newEnd);
+    } else {
+      auto mid = S_copy_n(impl_.b_, first, size());
+      M_uninitialized_copy_e<decltype(last)>(mid, last);
+    }
+  }
+
+  template <class InputIterator>
+  void assign(InputIterator first, InputIterator last,
+              std::input_iterator_tag) {
+    auto p = impl_.b_;
+    for (; first != last && p != impl_.e_; ++first, ++p) {
+      *p = *first;
+    }
+    if (p != impl_.e_) {
+      M_destroy_range_e(p);
+    } else {
+      for (; first != last; ++first) emplace_back(*first);
+    }
+  }
+
+  // contract dispatch for aliasing under VT optimization
+  bool dataIsInternalAndNotVT(const T& t) {
+    if (should_pass_by_value::value) return false;
+    return dataIsInternal(t);
+  }
+  bool dataIsInternal(const T& t) {
+    return UNLIKELY(impl_.b_ <= std::addressof(t) &&
+                    std::addressof(t) < impl_.e_);
+  }
+
+
+  //===========================================================================
+  //---------------------------------------------------------------------------
+  // iterators
+public:
+
+  iterator begin() noexcept {
+    return impl_.b_;
+  }
+  const_iterator begin() const noexcept {
+    return impl_.b_;
+  }
+  iterator end() noexcept {
+    return impl_.e_;
+  }
+  const_iterator end() const noexcept {
+    return impl_.e_;
+  }
+  reverse_iterator rbegin() noexcept {
+    return reverse_iterator(end());
+  }
+  const_reverse_iterator rbegin() const noexcept {
+    return const_reverse_iterator(end());
+  }
+  reverse_iterator rend() noexcept {
+    return reverse_iterator(begin());
+  }
+  const_reverse_iterator rend() const noexcept {
+    return const_reverse_iterator(begin());
+  }
+
+  const_iterator cbegin() const noexcept {
+    return impl_.b_;
+  }
+  const_iterator cend() const noexcept {
+    return impl_.e_;
+  }
+  const_reverse_iterator crbegin() const noexcept {
+    return const_reverse_iterator(end());
+  }
+  const_reverse_iterator crend() const noexcept {
+    return const_reverse_iterator(begin());
+  }
+
+  //===========================================================================
+  //---------------------------------------------------------------------------
+  // capacity
+public:
+
+  size_type size() const noexcept {
+    return impl_.e_ - impl_.b_;
+  }
+
+  size_type max_size() const noexcept {
+    // good luck gettin' there
+    return ~size_type(0);
+  }
+
+  void resize(size_type n) {
+    if (n <= size()) {
+      M_destroy_range_e(impl_.b_ + n);
+    } else {
+      reserve(n);
+      M_uninitialized_fill_n_e(n - size());
+    }
+  }
+
+  void resize(size_type n, VT t) {
+    if (n <= size()) {
+      M_destroy_range_e(impl_.b_ + n);
+    } else if (dataIsInternalAndNotVT(t) && n > capacity()) {
+      T copy(t);
+      reserve(n);
+      M_uninitialized_fill_n_e(n - size(), copy);
+    } else {
+      reserve(n);
+      M_uninitialized_fill_n_e(n - size(), t);
+    }
+  }
+
+  size_type capacity() const noexcept {
+    return impl_.z_ - impl_.b_;
+  }
+
+  bool empty() const noexcept {
+    return impl_.b_ == impl_.e_;
+  }
+
+  void reserve(size_type n) {
+    if (n <= capacity()) return;
+    if (impl_.b_ && reserve_in_place(n)) return;
+
+    auto newCap = folly::goodMallocSize(n * sizeof(T)) / sizeof(T);
+    auto newB = M_allocate(newCap);
+    try {
+      M_relocate(newB);
+    } catch (...) {
+      M_deallocate(newB, newCap);
+      throw;
+    }
+    if (impl_.b_)
+      M_deallocate(impl_.b_, impl_.z_ - impl_.b_);
+    impl_.z_ = newB + newCap;
+    impl_.e_ = newB + (impl_.e_ - impl_.b_);
+    impl_.b_ = newB;
+  }
+
+  void shrink_to_fit() noexcept {
+    auto const newCapacityBytes = folly::goodMallocSize(size() * sizeof(T));
+    auto const newCap = newCapacityBytes / sizeof(T);
+    auto const oldCap = capacity();
+
+    if (newCap >= oldCap) return;
+
+    void* p = impl_.b_;
+    // xallocx() will shrink to precisely newCapacityBytes (which was generated
+    // by goodMallocSize()) if it successfully shrinks in place.
+    if ((usingJEMalloc() && usingStdAllocator::value) &&
+        newCapacityBytes >= folly::jemallocMinInPlaceExpandable &&
+        xallocx(p, newCapacityBytes, 0, 0) == newCapacityBytes) {
+      impl_.z_ += newCap - oldCap;
+    } else {
+      T* newB; // intentionally uninitialized
+      try {
+        newB = M_allocate(newCap);
+        try {
+          M_relocate(newB);
+        } catch (...) {
+          M_deallocate(newB, newCap);
+          return; // swallow the error
+        }
+      } catch (...) {
+        return;
+      }
+      if (impl_.b_)
+        M_deallocate(impl_.b_, impl_.z_ - impl_.b_);
+      impl_.z_ = newB + newCap;
+      impl_.e_ = newB + (impl_.e_ - impl_.b_);
+      impl_.b_ = newB;
+    }
+  }
+
+private:
+
+  bool reserve_in_place(size_type n) {
+    if (!usingStdAllocator::value || !usingJEMalloc()) return false;
+
+    // jemalloc can never grow in place blocks smaller than 4096 bytes.
+    if ((impl_.z_ - impl_.b_) * sizeof(T) <
+      folly::jemallocMinInPlaceExpandable) return false;
+
+    auto const newCapacityBytes = folly::goodMallocSize(n * sizeof(T));
+    void* p = impl_.b_;
+    if (xallocx(p, newCapacityBytes, 0, 0) == newCapacityBytes) {
+      impl_.z_ = impl_.b_ + newCapacityBytes / sizeof(T);
+      return true;
+    }
+    return false;
+  }
+
+  //===========================================================================
+  //---------------------------------------------------------------------------
+  // element access
+public:
+
+  reference operator[](size_type n) {
+    assert(n < size());
+    return impl_.b_[n];
+  }
+  const_reference operator[](size_type n) const {
+    assert(n < size());
+    return impl_.b_[n];
+  }
+  const_reference at(size_type n) const {
+    if (UNLIKELY(n >= size())) {
+      throw std::out_of_range("fbvector: index is greater than size.");
+    }
+    return (*this)[n];
+  }
+  reference at(size_type n) {
+    auto const& cThis = *this;
+    return const_cast<reference>(cThis.at(n));
+  }
+  reference front() {
+    assert(!empty());
+    return *impl_.b_;
+  }
+  const_reference front() const {
+    assert(!empty());
+    return *impl_.b_;
+  }
+  reference back()  {
+    assert(!empty());
+    return impl_.e_[-1];
+  }
+  const_reference back() const {
+    assert(!empty());
+    return impl_.e_[-1];
+  }
+
+  //===========================================================================
+  //---------------------------------------------------------------------------
+  // data access
+public:
+
+  T* data() noexcept {
+    return impl_.b_;
+  }
+  const T* data() const noexcept {
+    return impl_.b_;
+  }
+
+  //===========================================================================
+  //---------------------------------------------------------------------------
+  // modifiers (common)
+public:
+
+  template <class... Args>
+  void emplace_back(Args&&... args)  {
+    if (impl_.e_ != impl_.z_) {
+      M_construct(impl_.e_, std::forward<Args>(args)...);
+      ++impl_.e_;
+    } else {
+      emplace_back_aux(std::forward<Args>(args)...);
+    }
+  }
+
+  void
+  push_back(const T& value) {
+    if (impl_.e_ != impl_.z_) {
+      M_construct(impl_.e_, value);
+      ++impl_.e_;
+    } else {
+      emplace_back_aux(value);
+    }
+  }
+
+  void
+  push_back(T&& value) {
+    if (impl_.e_ != impl_.z_) {
+      M_construct(impl_.e_, std::move(value));
+      ++impl_.e_;
+    } else {
+      emplace_back_aux(std::move(value));
+    }
+  }
+
+  void pop_back() {
+    assert(!empty());
+    --impl_.e_;
+    M_destroy(impl_.e_);
+  }
+
+  void swap(fbvector& other) noexcept {
+    if (!usingStdAllocator::value &&
+        A::propagate_on_container_swap::value)
+      swap(impl_, other.impl_);
+    else impl_.swapData(other.impl_);
+  }
+
+  void clear() noexcept {
+    M_destroy_range_e(impl_.b_);
+  }
+
+private:
+
+  // std::vector implements a similar function with a different growth
+  //  strategy: empty() ? 1 : capacity() * 2.
+  //
+  // fbvector grows differently on two counts:
+  //
+  // (1) initial size
+  //     Instead of grwoing to size 1 from empty, and fbvector allocates at
+  //     least 64 bytes. You may still use reserve to reserve a lesser amount
+  //     of memory.
+  // (2) 1.5x
+  //     For medium-sized vectors, the growth strategy is 1.5x. See the docs
+  //     for details.
+  //     This does not apply to very small or very large fbvectors. This is a
+  //     heuristic.
+  //     A nice addition to fbvector would be the capability of having a user-
+  //     defined growth strategy, probably as part of the allocator.
+  //
+
+  size_type computePushBackCapacity() const {
+    if (capacity() == 0) {
+      return std::max(64 / sizeof(T), size_type(1));
+    }
+    if (capacity() < folly::jemallocMinInPlaceExpandable / sizeof(T)) {
+      return capacity() * 2;
+    }
+    if (capacity() > 4096 * 32 / sizeof(T)) {
+      return capacity() * 2;
+    }
+    return (capacity() * 3 + 1) / 2;
+  }
+
+  template <class... Args>
+  void emplace_back_aux(Args&&... args);
+
+  //===========================================================================
+  //---------------------------------------------------------------------------
+  // modifiers (erase)
+public:
+
+  iterator erase(const_iterator position) {
+    return erase(position, position + 1);
+  }
+
+  iterator erase(const_iterator first, const_iterator last) {
+    assert(isValid(first) && isValid(last));
+    assert(first <= last);
+    if (first != last) {
+      if (last == end()) {
+        M_destroy_range_e((iterator)first);
+      } else {
+        if (folly::IsRelocatable<T>::value && usingStdAllocator::value) {
+          D_destroy_range_a((iterator)first, (iterator)last);
+          if (last - first >= cend() - last) {
+            std::memcpy((void*)first, (void*)last, (cend() - last) * sizeof(T));
+          } else {
+            std::memmove((iterator)first, last, (cend() - last) * sizeof(T));
+          }
+          impl_.e_ -= (last - first);
+        } else {
+          std::copy(std::make_move_iterator((iterator)last),
+                    std::make_move_iterator(end()), (iterator)first);
+          auto newEnd = impl_.e_ - std::distance(first, last);
+          M_destroy_range_e(newEnd);
+        }
+      }
+    }
+    return (iterator)first;
+  }
+
+  //===========================================================================
+  //---------------------------------------------------------------------------
+  // modifiers (insert)
+private: // we have the private section first because it defines some macros
+
+  bool isValid(const_iterator it) {
+    return cbegin() <= it && it <= cend();
+  }
+
+  size_type computeInsertCapacity(size_type n) {
+    size_type nc = std::max(computePushBackCapacity(), size() + n);
+    size_type ac = folly::goodMallocSize(nc * sizeof(T)) / sizeof(T);
+    return ac;
+  }
+
+  //---------------------------------------------------------------------------
+  //
+  // make_window takes an fbvector, and creates an uninitialized gap (a
+  //  window) at the given position, of the given size. The fbvector must
+  //  have enough capacity.
+  //
+  // Explanation by picture.
+  //
+  //    123456789______
+  //        ^
+  //        make_window here of size 3
+  //
+  //    1234___56789___
+  //
+  // If something goes wrong and the window must be destroyed, use
+  //  undo_window to provide a weak exception guarantee. It destroys
+  //  the right ledge.
+  //
+  //    1234___________
+  //
+  //---------------------------------------------------------------------------
+  //
+  // wrap_frame takes an inverse window and relocates an fbvector around it.
+  //  The fbvector must have at least as many elements as the left ledge.
+  //
+  // Explanation by picture.
+  //
+  //        START
+  //    fbvector:             inverse window:
+  //    123456789______       _____abcde_______
+  //                          [idx][ n ]
+  //
+  //        RESULT
+  //    _______________       12345abcde6789___
+  //
+  //---------------------------------------------------------------------------
+  //
+  // insert_use_fresh_memory returns true iff the fbvector should use a fresh
+  //  block of memory for the insertion. If the fbvector does not have enough
+  //  spare capacity, then it must return true. Otherwise either true or false
+  //  may be returned.
+  //
+  //---------------------------------------------------------------------------
+  //
+  // These three functions, make_window, wrap_frame, and
+  //  insert_use_fresh_memory, can be combined into a uniform interface.
+  // Since that interface involves a lot of case-work, it is built into
+  //  some macros: FOLLY_FBVECTOR_INSERT_(START|TRY|END)
+  // Macros are used in an attempt to let GCC perform better optimizations,
+  //  especially control flow optimization.
+  //
+
+  //---------------------------------------------------------------------------
+  // window
+
+  void make_window(iterator position, size_type n) {
+    assert(isValid(position));
+    assert(size() + n <= capacity());
+    assert(n != 0);
+
+    // The result is guaranteed to be non-negative, so use an unsigned type:
+    size_type tail = std::distance(position, impl_.e_);
+
+    if (tail <= n) {
+      relocate_move(position + n, position, impl_.e_);
+      relocate_done(position + n, position, impl_.e_);
+      impl_.e_ += n;
+    } else {
+      if (folly::IsRelocatable<T>::value && usingStdAllocator::value) {
+        std::memmove(position + n, position, tail * sizeof(T));
+        impl_.e_ += n;
+      } else {
+        D_uninitialized_move_a(impl_.e_, impl_.e_ - n, impl_.e_);
+        try {
+          std::copy_backward(std::make_move_iterator(position),
+                             std::make_move_iterator(impl_.e_ - n), impl_.e_);
+        } catch (...) {
+          D_destroy_range_a(impl_.e_ - n, impl_.e_ + n);
+          impl_.e_ -= n;
+          throw;
+        }
+        impl_.e_ += n;
+        D_destroy_range_a(position, position + n);
+      }
+    }
+  }
+
+  void undo_window(iterator position, size_type n) noexcept {
+    D_destroy_range_a(position + n, impl_.e_);
+    impl_.e_ = position;
+  }
+
+  //---------------------------------------------------------------------------
+  // frame
+
+  void wrap_frame(T* ledge, size_type idx, size_type n) {
+    assert(size() >= idx);
+    assert(n != 0);
+
+    relocate_move(ledge, impl_.b_, impl_.b_ + idx);
+    try {
+      relocate_move(ledge + idx + n, impl_.b_ + idx, impl_.e_);
+    } catch (...) {
+      relocate_undo(ledge, impl_.b_, impl_.b_ + idx);
+      throw;
+    }
+    relocate_done(ledge, impl_.b_, impl_.b_ + idx);
+    relocate_done(ledge + idx + n, impl_.b_ + idx, impl_.e_);
+  }
+
+  //---------------------------------------------------------------------------
+  // use fresh?
+
+  bool insert_use_fresh(const_iterator cposition, size_type n) {
+    if (cposition == cend()) {
+      if (size() + n <= capacity()) return false;
+      if (reserve_in_place(size() + n)) return false;
+      return true;
+    }
+
+    if (size() + n > capacity()) return true;
+
+    return false;
+  }
+
+  //---------------------------------------------------------------------------
+  // interface
+
+  #define FOLLY_FBVECTOR_INSERT_START(cpos, n)                                \
+    assert(isValid(cpos));                                                    \
+    T* position = const_cast<T*>(cpos);                                       \
+    size_type idx = std::distance(impl_.b_, position);                        \
+    bool fresh = insert_use_fresh(position, n);                               \
+    T* b;                                                                     \
+    size_type newCap = 0;                                                     \
+                                                                              \
+    if (fresh) {                                                              \
+      newCap = computeInsertCapacity(n);                                      \
+      b = M_allocate(newCap);                                                 \
+    } else {                                                                  \
+      make_window(position, n);                                               \
+      b = impl_.b_;                                                           \
+    }                                                                         \
+                                                                              \
+    T* start = b + idx;                                                       \
+                                                                              \
+    try {                                                                     \
+
+    // construct the inserted elements
+
+  #define FOLLY_FBVECTOR_INSERT_TRY(cpos, n)                                  \
+    } catch (...) {                                                           \
+      if (fresh) {                                                            \
+        M_deallocate(b, newCap);                                              \
+      } else {                                                                \
+        undo_window(position, n);                                             \
+      }                                                                       \
+      throw;                                                                  \
+    }                                                                         \
+                                                                              \
+    if (fresh) {                                                              \
+      try {                                                                   \
+        wrap_frame(b, idx, n);                                                \
+      } catch (...) {                                                         \
+
+
+    // delete the inserted elements (exception has been thrown)
+
+  #define FOLLY_FBVECTOR_INSERT_END(cpos, n)                                  \
+        M_deallocate(b, newCap);                                              \
+        throw;                                                                \
+      }                                                                       \
+      if (impl_.b_) M_deallocate(impl_.b_, capacity());                       \
+      impl_.set(b, size() + n, newCap);                                       \
+      return impl_.b_ + idx;                                                  \
+    } else {                                                                  \
+      return position;                                                        \
+    }                                                                         \
+
+  //---------------------------------------------------------------------------
+  // insert functions
+public:
+
+  template <class... Args>
+  iterator emplace(const_iterator cpos, Args&&... args) {
+    FOLLY_FBVECTOR_INSERT_START(cpos, 1)
+      M_construct(start, std::forward<Args>(args)...);
+    FOLLY_FBVECTOR_INSERT_TRY(cpos, 1)
+      M_destroy(start);
+    FOLLY_FBVECTOR_INSERT_END(cpos, 1)
+  }
+
+  iterator insert(const_iterator cpos, const T& value) {
+    if (dataIsInternal(value)) return insert(cpos, T(value));
+
+    FOLLY_FBVECTOR_INSERT_START(cpos, 1)
+      M_construct(start, value);
+    FOLLY_FBVECTOR_INSERT_TRY(cpos, 1)
+      M_destroy(start);
+    FOLLY_FBVECTOR_INSERT_END(cpos, 1)
+  }
+
+  iterator insert(const_iterator cpos, T&& value) {
+    if (dataIsInternal(value)) return insert(cpos, T(std::move(value)));
+
+    FOLLY_FBVECTOR_INSERT_START(cpos, 1)
+      M_construct(start, std::move(value));
+    FOLLY_FBVECTOR_INSERT_TRY(cpos, 1)
+      M_destroy(start);
+    FOLLY_FBVECTOR_INSERT_END(cpos, 1)
+  }
+
+  iterator insert(const_iterator cpos, size_type n, VT value) {
+    if (n == 0) return (iterator)cpos;
+    if (dataIsInternalAndNotVT(value)) return insert(cpos, n, T(value));
+
+    FOLLY_FBVECTOR_INSERT_START(cpos, n)
+      D_uninitialized_fill_n_a(start, n, value);
+    FOLLY_FBVECTOR_INSERT_TRY(cpos, n)
+      D_destroy_range_a(start, start + n);
+    FOLLY_FBVECTOR_INSERT_END(cpos, n)
+  }
+
+  template <class It, class Category = typename
+            std::iterator_traits<It>::iterator_category>
+  iterator insert(const_iterator cpos, It first, It last) {
+    return insert(cpos, first, last, Category());
+  }
+
+  iterator insert(const_iterator cpos, std::initializer_list<T> il) {
+    return insert(cpos, il.begin(), il.end());
+  }
+
+  //---------------------------------------------------------------------------
+  // insert dispatch for iterator types
+private:
+
+  template <class FIt>
+  iterator insert(const_iterator cpos, FIt first, FIt last,
+                  std::forward_iterator_tag) {
+    size_type n = std::distance(first, last);
+    if (n == 0) return (iterator)cpos;
+
+    FOLLY_FBVECTOR_INSERT_START(cpos, n)
+      D_uninitialized_copy_a(start, first, last);
+    FOLLY_FBVECTOR_INSERT_TRY(cpos, n)
+      D_destroy_range_a(start, start + n);
+    FOLLY_FBVECTOR_INSERT_END(cpos, n)
+  }
+
+  template <class IIt>
+  iterator insert(const_iterator cpos, IIt first, IIt last,
+                  std::input_iterator_tag) {
+    T* position = const_cast<T*>(cpos);
+    assert(isValid(position));
+    size_type idx = std::distance(begin(), position);
+
+    fbvector storage(std::make_move_iterator(position),
+                     std::make_move_iterator(end()),
+                     A::select_on_container_copy_construction(impl_));
+    M_destroy_range_e(position);
+    for (; first != last; ++first) emplace_back(*first);
+    insert(cend(), std::make_move_iterator(storage.begin()),
+           std::make_move_iterator(storage.end()));
+    return impl_.b_ + idx;
+  }
+
+  //===========================================================================
+  //---------------------------------------------------------------------------
+  // lexicographical functions (others from boost::totally_ordered superclass)
+public:
+
+  bool operator==(const fbvector& other) const {
+    return size() == other.size() && std::equal(begin(), end(), other.begin());
+  }
+
+  bool operator<(const fbvector& other) const {
+    return std::lexicographical_compare(
+      begin(), end(), other.begin(), other.end());
+  }
+
+  //===========================================================================
+  //---------------------------------------------------------------------------
+  // friends
+private:
+
+  template <class _T, class _A>
+  friend _T* relinquish(fbvector<_T, _A>&);
+
+  template <class _T, class _A>
+  friend void attach(fbvector<_T, _A>&, _T* data, size_t sz, size_t cap);
+
+}; // class fbvector
+
+
+//=============================================================================
+//-----------------------------------------------------------------------------
+// outlined functions (gcc, you finicky compiler you)
+
+template <typename T, typename Allocator>
+template <class... Args>
+void fbvector<T, Allocator>::emplace_back_aux(Args&&... args) {
+  size_type byte_sz = folly::goodMallocSize(
+    computePushBackCapacity() * sizeof(T));
+  if (usingStdAllocator::value
+      && usingJEMalloc()
+      && ((impl_.z_ - impl_.b_) * sizeof(T) >=
+          folly::jemallocMinInPlaceExpandable)) {
+    // Try to reserve in place.
+    // Ask xallocx to allocate in place at least size()+1 and at most sz space.
+    // xallocx will allocate as much as possible within that range, which
+    //  is the best possible outcome: if sz space is available, take it all,
+    //  otherwise take as much as possible. If nothing is available, then fail.
+    // In this fashion, we never relocate if there is a possibility of
+    //  expanding in place, and we never reallocate by less than the desired
+    //  amount unless we cannot expand further. Hence we will not reallocate
+    //  sub-optimally twice in a row (modulo the blocking memory being freed).
+    size_type lower = folly::goodMallocSize(sizeof(T) + size() * sizeof(T));
+    size_type upper = byte_sz;
+    size_type extra = upper - lower;
+
+    void* p = impl_.b_;
+    size_t actual;
+
+    if ((actual = xallocx(p, lower, extra, 0)) >= lower) {
+      impl_.z_ = impl_.b_ + actual / sizeof(T);
+      M_construct(impl_.e_, std::forward<Args>(args)...);
+      ++impl_.e_;
+      return;
+    }
+  }
+
+  // Reallocation failed. Perform a manual relocation.
+  size_type sz = byte_sz / sizeof(T);
+  auto newB = M_allocate(sz);
+  auto newE = newB + size();
+  try {
+    if (folly::IsRelocatable<T>::value && usingStdAllocator::value) {
+      // For linear memory access, relocate before construction.
+      // By the test condition, relocate is noexcept.
+      // Note that there is no cleanup to do if M_construct throws - that's
+      //  one of the beauties of relocation.
+      // Benchmarks for this code have high variance, and seem to be close.
+      relocate_move(newB, impl_.b_, impl_.e_);
+      M_construct(newE, std::forward<Args>(args)...);
+      ++newE;
+    } else {
+      M_construct(newE, std::forward<Args>(args)...);
+      ++newE;
+      try {
+        M_relocate(newB);
+      } catch (...) {
+        M_destroy(newE - 1);
+        throw;
+      }
+    }
+  } catch (...) {
+    M_deallocate(newB, sz);
+    throw;
+  }
+  if (impl_.b_) M_deallocate(impl_.b_, size());
+  impl_.b_ = newB;
+  impl_.e_ = newE;
+  impl_.z_ = newB + sz;
+}
+
+//=============================================================================
+//-----------------------------------------------------------------------------
+// specialized functions
+
+template <class T, class A>
+void swap(fbvector<T, A>& lhs, fbvector<T, A>& rhs) noexcept {
+  lhs.swap(rhs);
+}
+
+//=============================================================================
+//-----------------------------------------------------------------------------
+// other
+
+namespace detail {
+
+// Format support.
+template <class T, class A>
+struct IndexableTraits<fbvector<T, A>>
+  : public IndexableTraitsSeq<fbvector<T, A>> {
+};
+
+}  // namespace detail
+
+template <class T, class A>
+void compactResize(fbvector<T, A>* v, size_t sz) {
+  v->resize(sz);
+  v->shrink_to_fit();
+}
+
+// DANGER
+//
+// relinquish and attach are not a members function specifically so that it is
+//  awkward to call them. It is very easy to shoot yourself in the foot with
+//  these functions.
+//
+// If you call relinquish, then it is your responsibility to free the data
+//  and the storage, both of which may have been generated in a non-standard
+//  way through the fbvector's allocator.
+//
+// If you call attach, it is your responsibility to ensure that the fbvector
+//  is fresh (size and capacity both zero), and that the supplied data is
+//  capable of being manipulated by the allocator.
+// It is acceptable to supply a stack pointer IF:
+//  (1) The vector's data does not outlive the stack pointer. This includes
+//      extension of the data's life through a move operation.
+//  (2) The pointer has enough capacity that the vector will never be
+//      relocated.
+//  (3) Insert is not called on the vector; these functions have leeway to
+//      relocate the vector even if there is enough capacity.
+//  (4) A stack pointer is compatible with the fbvector's allocator.
+//
+
+template <class T, class A>
+T* relinquish(fbvector<T, A>& v) {
+  T* ret = v.data();
+  v.impl_.b_ = v.impl_.e_ = v.impl_.z_ = nullptr;
+  return ret;
+}
+
+template <class T, class A>
+void attach(fbvector<T, A>& v, T* data, size_t sz, size_t cap) {
+  assert(v.data() == nullptr);
+  v.impl_.b_ = data;
+  v.impl_.e_ = data + sz;
+  v.impl_.z_ = data + cap;
+}
+
+} // namespace folly
+
+#endif // FOLLY_FBVECTOR_H
diff --git a/faux-folly/folly/File.cpp b/faux-folly/folly/File.cpp
new file mode 100644
index 0000000..3c53573
--- /dev/null
+++ b/faux-folly/folly/File.cpp
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2015 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <folly/File.h>
+
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/file.h>
+
+#include <folly/Exception.h>
+#include <folly/FileUtil.h>
+#include <folly/Format.h>
+#include <folly/ScopeGuard.h>
+
+#include <system_error>
+
+#include <glog/logging.h>
+
+namespace folly {
+
+File::File()
+  : fd_(-1)
+  , ownsFd_(false)
+{}
+
+File::File(int fd, bool ownsFd)
+  : fd_(fd)
+  , ownsFd_(ownsFd) {
+  CHECK_GE(fd, -1) << "fd must be -1 or non-negative";
+  CHECK(fd != -1 || !ownsFd) << "cannot own -1";
+}
+
+File::File(const char* name, int flags, mode_t mode)
+  : fd_(::open(name, flags, mode))
+  , ownsFd_(false) {
+  if (fd_ == -1) {
+    throwSystemError(folly::format("open(\"{}\", {:#o}, 0{:#o}) failed",
+                                   name, flags, mode).fbstr());
+  }
+  ownsFd_ = true;
+}
+
+File::File(const std::string& name, int flags, mode_t mode)
+  : File(name.c_str(), flags, mode) {}
+
+File::File(StringPiece name, int flags, mode_t mode)
+  : File(name.str(), flags, mode) {}
+
+File::File(File&& other) noexcept
+  : fd_(other.fd_)
+  , ownsFd_(other.ownsFd_) {
+  other.release();
+}
+
+File& File::operator=(File&& other) {
+  closeNoThrow();
+  swap(other);
+  return *this;
+}
+
+File::~File() {
+  auto fd = fd_;
+  if (!closeNoThrow()) {  // ignore most errors
+    DCHECK_NE(errno, EBADF) << "closing fd " << fd << ", it may already "
+      << "have been closed. Another time, this might close the wrong FD.";
+  }
+}
+
+/* static */ File File::temporary() {
+  // make a temp file with tmpfile(), dup the fd, then return it in a File.
+  FILE* tmpFile = tmpfile();
+  checkFopenError(tmpFile, "tmpfile() failed");
+  SCOPE_EXIT { fclose(tmpFile); };
+
+  int fd = ::dup(fileno(tmpFile));
+  checkUnixError(fd, "dup() failed");
+
+  return File(fd, true);
+}
+
+int File::release() noexcept {
+  int released = fd_;
+  fd_ = -1;
+  ownsFd_ = false;
+  return released;
+}
+
+void File::swap(File& other) {
+  using std::swap;
+  swap(fd_, other.fd_);
+  swap(ownsFd_, other.ownsFd_);
+}
+
+void swap(File& a, File& b) {
+  a.swap(b);
+}
+
+File File::dup() const {
+  if (fd_ != -1) {
+    int fd = ::dup(fd_);
+    checkUnixError(fd, "dup() failed");
+
+    return File(fd, true);
+  }
+
+  return File();
+}
+
+void File::close() {
+  if (!closeNoThrow()) {
+    throwSystemError("close() failed");
+  }
+}
+
+bool File::closeNoThrow() {
+  int r = ownsFd_ ? ::close(fd_) : 0;
+  release();
+  return r == 0;
+}
+
+void File::lock() { doLock(LOCK_EX); }
+bool File::try_lock() { return doTryLock(LOCK_EX); }
+void File::lock_shared() { doLock(LOCK_SH); }
+bool File::try_lock_shared() { return doTryLock(LOCK_SH); }
+
+void File::doLock(int op) {
+  checkUnixError(flockNoInt(fd_, op), "flock() failed (lock)");
+}
+
+bool File::doTryLock(int op) {
+  int r = flockNoInt(fd_, op | LOCK_NB);
+  // flock returns EWOULDBLOCK if already locked
+  if (r == -1 && errno == EWOULDBLOCK) return false;
+  checkUnixError(r, "flock() failed (try_lock)");
+  return true;
+}
+
+void File::unlock() {
+  checkUnixError(flockNoInt(fd_, LOCK_UN), "flock() failed (unlock)");
+}
+void File::unlock_shared() { unlock(); }
+
+}  // namespace folly
diff --git a/faux-folly/folly/File.h b/faux-folly/folly/File.h
new file mode 100644
index 0000000..4592514
--- /dev/null
+++ b/faux-folly/folly/File.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2015 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FOLLY_FILE_H_
+#define FOLLY_FILE_H_
+
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <string>
+
+#include <folly/Portability.h>
+#include <folly/Range.h>
+
+namespace folly {
+
+/**
+ * A File represents an open file.
+ */
+class File {
+ public:
+  /**
+   * Creates an empty File object, for late initialization.
+   */
+  File();
+
+  /**
+   * Create a File object from an existing file descriptor.
+   * Takes ownership of the file descriptor if ownsFd is true.
+   */
+  explicit File(int fd, bool ownsFd = false);
+
+  /**
+   * Open and create a file object.  Throws on error.
+   */
+  explicit File(const char* name, int flags = O_RDONLY, mode_t mode = 0666);
+  explicit File(
+      const std::string& name, int flags = O_RDONLY, mode_t mode = 0666);
+  explicit File(StringPiece name, int flags = O_RDONLY, mode_t mode = 0666);
+
+  ~File();
+
+  /**
+   * Create and return a temporary, owned file (uses tmpfile()).
+   */
+  static File temporary();
+
+  /**
+   * Return the file descriptor, or -1 if the file was closed.
+   */
+  int fd() const { return fd_; }
+
+  /**
+   * Returns 'true' iff the file was successfully opened.
+   */
+  explicit operator bool() const {
+    return fd_ != -1;
+  }
+
+  /**
+   * Duplicate file descriptor and return File that owns it.
+   */
+  File dup() const;
+
+  /**
+   * If we own the file descriptor, close the file and throw on error.
+   * Otherwise, do nothing.
+   */
+  void close();
+
+  /**
+   * Closes the file (if owned).  Returns true on success, false (and sets
+   * errno) on error.
+   */
+  bool closeNoThrow();
+
+  /**
+   * Returns and releases the file descriptor; no longer owned by this File.
+   * Returns -1 if the File object didn't wrap a file.
+   */
+  int release() noexcept;
+
+  /**
+   * Swap this File with another.
+   */
+  void swap(File& other);
+
+  // movable
+  File(File&&) noexcept;
+  File& operator=(File&&);
+
+  // FLOCK (INTERPROCESS) LOCKS
+  //
+  // NOTE THAT THESE LOCKS ARE flock() LOCKS.  That is, they may only be used
+  // for inter-process synchronization -- an attempt to acquire a second lock
+  // on the same file descriptor from the same process may succeed.  Attempting
+  // to acquire a second lock on a different file descriptor for the same file
+  // should fail, but some systems might implement flock() using fcntl() locks,
+  // in which case it will succeed.
+  void lock();
+  bool try_lock();
+  void unlock();
+
+  void lock_shared();
+  bool try_lock_shared();
+  void unlock_shared();
+
+ private:
+  void doLock(int op);
+  bool doTryLock(int op);
+
+  // unique
+  File(const File&) = delete;
+  File& operator=(const File&) = delete;
+
+  int fd_;
+  bool ownsFd_;
+};
+
+void swap(File& a, File& b);
+
+
+}  // namespace folly
+
+#endif /* FOLLY_FILE_H_ */
diff --git a/faux-folly/folly/FileUtil.cpp b/faux-folly/folly/FileUtil.cpp
new file mode 100644
index 0000000..728f1d2
--- /dev/null
+++ b/faux-folly/folly/FileUtil.cpp
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2015 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <folly/FileUtil.h>
+
+#include <cerrno>
+#ifdef __APPLE__
+#include <fcntl.h>
+#endif
+#include <sys/file.h>
+#include <sys/socket.h>
+
+#include <folly/detail/FileUtilDetail.h>
+
+namespace folly {
+
+using namespace fileutil_detail;
+
+int openNoInt(const char* name, int flags, mode_t mode) {
+  return wrapNoInt(open, name, flags, mode);
+}
+
+int closeNoInt(int fd) {
+  int r = close(fd);
+  // Ignore EINTR.  On Linux, close() may only return EINTR after the file
+  // descriptor has been closed, so you must not retry close() on EINTR --
+  // in the best case, you'll get EBADF, and in the worst case, you'll end up
+  // closing a different file (one opened from another thread).
+  //
+  // Interestingly enough, the Single Unix Specification says that the state
+  // of the file descriptor is unspecified if close returns EINTR.  In that
+  // case, the safe thing to do is also not to retry close() -- leaking a file
+  // descriptor is definitely better than closing the wrong file.
+  if (r == -1 && errno == EINTR) {
+    r = 0;
+  }
+  return r;
+}
+
+int fsyncNoInt(int fd) {
+  return wrapNoInt(fsync, fd);
+}
+
+int dupNoInt(int fd) {
+  return wrapNoInt(dup, fd);
+}
+
+int dup2NoInt(int oldfd, int newfd) {
+  return wrapNoInt(dup2, oldfd, newfd);
+}
+
+int fdatasyncNoInt(int fd) {
+#if defined(__APPLE__)
+  return wrapNoInt(fcntl, fd, F_FULLFSYNC);
+#elif defined(__FreeBSD__) || defined(_MSC_VER)
+  return wrapNoInt(fsync, fd);
+#else
+  return wrapNoInt(fdatasync, fd);
+#endif
+}
+
+int ftruncateNoInt(int fd, off_t len) {
+  return wrapNoInt(ftruncate, fd, len);
+}
+
+int truncateNoInt(const char* path, off_t len) {
+  return wrapNoInt(truncate, path, len);
+}
+
+int flockNoInt(int fd, int operation) {
+  return wrapNoInt(flock, fd, operation);
+}
+
+int shutdownNoInt(int fd, int how) {
+  return wrapNoInt(shutdown, fd, how);
+}
+
+ssize_t readNoInt(int fd, void* buf, size_t count) {
+  return wrapNoInt(read, fd, buf, count);
+}
+
+ssize_t preadNoInt(int fd, void* buf, size_t count, off_t offset) {
+  return wrapNoInt(pread, fd, buf, count, offset);
+}
+
+ssize_t readvNoInt(int fd, const iovec* iov, int count) {
+  return wrapNoInt(writev, fd, iov, count);
+}
+
+ssize_t writeNoInt(int fd, const void* buf, size_t count) {
+  return wrapNoInt(write, fd, buf, count);
+}
+
+ssize_t pwriteNoInt(int fd, const void* buf, size_t count, off_t offset) {
+  return wrapNoInt(pwrite, fd, buf, count, offset);
+}
+
+ssize_t writevNoInt(int fd, const iovec* iov, int count) {
+  return wrapNoInt(writev, fd, iov, count);
+}
+
+ssize_t readFull(int fd, void* buf, size_t count) {
+  return wrapFull(read, fd, buf, count);
+}
+
+ssize_t preadFull(int fd, void* buf, size_t count, off_t offset) {
+  return wrapFull(pread, fd, buf, count, offset);
+}
+
+ssize_t writeFull(int fd, const void* buf, size_t count) {
+  return wrapFull(write, fd, const_cast<void*>(buf), count);
+}
+
+ssize_t pwriteFull(int fd, const void* buf, size_t count, off_t offset) {
+  return wrapFull(pwrite, fd, const_cast<void*>(buf), count, offset);
+}
+
+ssize_t readvFull(int fd, iovec* iov, int count) {
+  return wrapvFull(readv, fd, iov, count);
+}
+
+#if FOLLY_HAVE_PREADV
+ssize_t preadvFull(int fd, iovec* iov, int count, off_t offset) {
+  return wrapvFull(preadv, fd, iov, count, offset);
+}
+#endif
+
+ssize_t writevFull(int fd, iovec* iov, int count) {
+  return wrapvFull(writev, fd, iov, count);
+}
+
+#if FOLLY_HAVE_PWRITEV
+ssize_t pwritevFull(int fd, iovec* iov, int count, off_t offset) {
+  return wrapvFull(pwritev, fd, iov, count, offset);
+}
+#endif
+
+}  // namespaces
diff --git a/faux-folly/folly/FileUtil.h b/faux-folly/folly/FileUtil.h
new file mode 100644
index 0000000..5adcd62
--- /dev/null
+++ b/faux-folly/folly/FileUtil.h
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2015 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FOLLY_FILEUTIL_H_
+#define FOLLY_FILEUTIL_H_
+
+#include <folly/Conv.h>
+#include <folly/Portability.h>
+#include <folly/ScopeGuard.h>
+
+#include <cassert>
+#include <limits>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/uio.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+namespace folly {
+
+/**
+ * Convenience wrappers around some commonly used system calls.  The *NoInt
+ * wrappers retry on EINTR.  The *Full wrappers retry on EINTR and also loop
+ * until all data is written.  Note that *Full wrappers weaken the thread
+ * semantics of underlying system calls.
+ */
+int openNoInt(const char* name, int flags, mode_t mode = 0666);
+int closeNoInt(int fd);
+int dupNoInt(int fd);
+int dup2NoInt(int oldfd, int newfd);
+int fsyncNoInt(int fd);
+int fdatasyncNoInt(int fd);
+int ftruncateNoInt(int fd, off_t len);
+int truncateNoInt(const char* path, off_t len);
+int flockNoInt(int fd, int operation);
+int shutdownNoInt(int fd, int how);
+
+ssize_t readNoInt(int fd, void* buf, size_t n);
+ssize_t preadNoInt(int fd, void* buf, size_t n, off_t offset);
+ssize_t readvNoInt(int fd, const iovec* iov, int count);
+
+ssize_t writeNoInt(int fd, const void* buf, size_t n);
+ssize_t pwriteNoInt(int fd, const void* buf, size_t n, off_t offset);
+ssize_t writevNoInt(int fd, const iovec* iov, int count);
+
+/**
+ * Wrapper around read() (and pread()) that, in addition to retrying on
+ * EINTR, will loop until all data is read.
+ *
+ * This wrapper is only useful for blocking file descriptors (for non-blocking
+ * file descriptors, you have to be prepared to deal with incomplete reads
+ * anyway), and only exists because POSIX allows read() to return an incomplete
+ * read if interrupted by a signal (instead of returning -1 and setting errno
+ * to EINTR).
+ *
+ * Note that this wrapper weakens the thread safety of read(): the file pointer
+ * is shared between threads, but the system call is atomic.  If multiple
+ * threads are reading from a file at the same time, you don't know where your
+ * data came from in the file, but you do know that the returned bytes were
+ * contiguous.  You can no longer make this assumption if using readFull().
+ * You should probably use pread() when reading from the same file descriptor
+ * from multiple threads simultaneously, anyway.
+ *
+ * Note that readvFull and preadvFull require iov to be non-const, unlike
+ * readv and preadv.  The contents of iov after these functions return
+ * is unspecified.
+ */
+ssize_t readFull(int fd, void* buf, size_t n);
+ssize_t preadFull(int fd, void* buf, size_t n, off_t offset);
+ssize_t readvFull(int fd, iovec* iov, int count);
+#if FOLLY_HAVE_PREADV
+ssize_t preadvFull(int fd, iovec* iov, int count, off_t offset);
+#endif
+
+/**
+ * Similar to readFull and preadFull above, wrappers around write() and
+ * pwrite() that loop until all data is written.
+ *
+ * Generally, the write() / pwrite() system call may always write fewer bytes
+ * than requested, just like read().  In certain cases (such as when writing to
+ * a pipe), POSIX provides stronger guarantees, but not in the general case.
+ * For example, Linux (even on a 64-bit platform) won't write more than 2GB in
+ * one write() system call.
+ *
+ * Note that writevFull and pwritevFull require iov to be non-const, unlike
+ * writev and pwritev.  The contents of iov after these functions return
+ * is unspecified.
+ */
+ssize_t writeFull(int fd, const void* buf, size_t n);
+ssize_t pwriteFull(int fd, const void* buf, size_t n, off_t offset);
+ssize_t writevFull(int fd, iovec* iov, int count);
+#if FOLLY_HAVE_PWRITEV
+ssize_t pwritevFull(int fd, iovec* iov, int count, off_t offset);
+#endif
+
+/**
+ * Read entire file (if num_bytes is defaulted) or no more than
+ * num_bytes (otherwise) into container *out. The container is assumed
+ * to be contiguous, with element size equal to 1, and offer size(),
+ * reserve(), and random access (e.g. std::vector<char>, std::string,
+ * fbstring).
+ *
+ * Returns: true on success or false on failure. In the latter case
+ * errno will be set appropriately by the failing system primitive.
+ */
+template <class Container>
+bool readFile(const char* file_name, Container& out,
+              size_t num_bytes = std::numeric_limits<size_t>::max()) {
+  static_assert(sizeof(out[0]) == 1,
+                "readFile: only containers with byte-sized elements accepted");
+  assert(file_name);
+
+  const auto fd = openNoInt(file_name, O_RDONLY);
+  if (fd == -1) return false;
+
+  size_t soFar = 0; // amount of bytes successfully read
+  SCOPE_EXIT {
+    assert(out.size() >= soFar); // resize better doesn't throw
+    out.resize(soFar);
+    // Ignore errors when closing the file
+    closeNoInt(fd);
+  };
+
+  // Obtain file size:
+  struct stat buf;
+  if (fstat(fd, &buf) == -1) return false;
+  // Some files (notably under /proc and /sys on Linux) lie about
+  // their size, so treat the size advertised by fstat under advise
+  // but don't rely on it. In particular, if the size is zero, we
+  // should attempt to read stuff. If not zero, we'll attempt to read
+  // one extra byte.
+  constexpr size_t initialAlloc = 1024 * 4;
+  out.resize(
+    std::min(
+      buf.st_size > 0 ? folly::to<size_t>(buf.st_size + 1) : initialAlloc,
+      num_bytes));
+
+  while (soFar < out.size()) {
+    const auto actual = readFull(fd, &out[soFar], out.size() - soFar);
+    if (actual == -1) {
+      return false;
+    }
+    soFar += actual;
+    if (soFar < out.size()) {
+      // File exhausted
+      break;
+    }
+    // Ew, allocate more memory. Use exponential growth to avoid
+    // quadratic behavior. Cap size to num_bytes.
+    out.resize(std::min(out.size() * 3 / 2, num_bytes));
+  }
+
+  return true;
+}
+
+/**
+ * Writes container to file. The container is assumed to be
+ * contiguous, with element size equal to 1, and offering STL-like
+ * methods empty(), size(), and indexed access
+ * (e.g. std::vector<char>, std::string, fbstring, StringPiece).
+ *
+ * "flags" dictates the open flags to use. Default is to create file
+ * if it doesn't exist and truncate it.
+ *
+ * Returns: true on success or false on failure. In the latter case
+ * errno will be set appropriately by the failing system primitive.
+ */
+template <class Container>
+bool writeFile(const Container& data, const char* filename,
+              int flags = O_WRONLY | O_CREAT | O_TRUNC) {
+  static_assert(sizeof(data[0]) == 1,
+                "writeFile works with element size equal to 1");
+  int fd = open(filename, flags, 0666);
+  if (fd == -1) {
+    return false;
+  }
+  bool ok = data.empty() ||
+    writeFull(fd, &data[0], data.size()) == static_cast<ssize_t>(data.size());
+  return closeNoInt(fd) == 0 && ok;
+}
+
+}  // namespaces
+
+#endif /* FOLLY_FILEUTIL_H_ */
diff --git a/faux-folly/folly/Foreach.h b/faux-folly/folly/Foreach.h
new file mode 100644
index 0000000..9d67c14
--- /dev/null
+++ b/faux-folly/folly/Foreach.h
@@ -0,0 +1,231 @@
+/*
+ * Copyright 2015 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FOLLY_BASE_FOREACH_H_
+#define FOLLY_BASE_FOREACH_H_
+
+/*
+ * Iterim macros (until we have C++0x range-based for) that simplify
+ * writing loops of the form
+ *
+ * for (Container<data>::iterator i = c.begin(); i != c.end(); ++i) statement
+ *
+ * Just replace the above with:
+ *
+ * FOR_EACH (i, c) statement
+ *
+ * and everything is taken care of.
+ *
+ * The implementation is a bit convoluted to make sure the container is
+ * only evaluated once (however, keep in mind that c.end() is evaluated
+ * at every pass through the loop). To ensure the container is not
+ * evaluated multiple times, the macro defines one do-nothing if
+ * statement to inject the Boolean variable FOR_EACH_state1, and then a
+ * for statement that is executed only once, which defines the variable
+ * FOR_EACH_state2 holding a rvalue reference to the container being
+ * iterated. The workhorse is the last loop, which uses the just defined
+ * rvalue reference FOR_EACH_state2.
+ *
+ * The state variables are nested so they don't interfere; you can use
+ * FOR_EACH multiple times in the same scope, either at the same level or
+ * nested.
+ *
+ * In optimized builds g++ eliminates the extra gymnastics entirely and
+ * generates code 100% identical to the handwritten loop.
+ */
+
+#include <type_traits>
+
+/*
+ * Shorthand for:
+ *   for (auto i = c.begin(); i != c.end(); ++i)
+ * except that c is only evaluated once.
+ */
+#define FOR_EACH(i, c)                              \
+  if (bool FOR_EACH_state1 = false) {} else         \
+    for (auto && FOR_EACH_state2 = (c);             \
+         !FOR_EACH_state1; FOR_EACH_state1 = true)  \
+      for (auto i = FOR_EACH_state2.begin();        \
+           i != FOR_EACH_state2.end(); ++i)
+
+/*
+ * Similar to FOR_EACH, but iterates the container backwards by
+ * using rbegin() and rend().
+ */
+#define FOR_EACH_R(i, c)                                \
+  if (bool FOR_EACH_R_state1 = false) {} else           \
+    for (auto && FOR_EACH_R_state2 = (c);               \
+         !FOR_EACH_R_state1; FOR_EACH_R_state1 = true)  \
+      for (auto i = FOR_EACH_R_state2.rbegin();         \
+           i != FOR_EACH_R_state2.rend(); ++i)
+
+/*
+ * Similar to FOR_EACH but also allows client to specify a 'count' variable
+ * to track the current iteration in the loop (starting at zero).
+ * Similar to python's enumerate() function.  For example:
+ * string commaSeparatedValues = "VALUES: ";
+ * FOR_EACH_ENUMERATE(ii, value, columns) {   // don't want comma at the end!
+ *   commaSeparatedValues += (ii == 0) ? *value : string(",") + *value;
+ * }
+ */
+#define FOR_EACH_ENUMERATE(count, i, c)                                \
+  if (bool FOR_EACH_state1 = false) {} else                            \
+    for (auto && FOR_EACH_state2 = (c);                                \
+         !FOR_EACH_state1; FOR_EACH_state1 = true)                     \
+      if (size_t FOR_EACH_privateCount = 0) {} else                    \
+        if (const size_t& count = FOR_EACH_privateCount) {} else       \
+          for (auto i = FOR_EACH_state2.begin();                       \
+               i != FOR_EACH_state2.end(); ++FOR_EACH_privateCount, ++i)
+
+/**
+ * Similar to FOR_EACH, but gives the user the key and value for each entry in
+ * the container, instead of just the iterator to the entry. For example:
+ *   map<string, string> testMap;
+ *   FOR_EACH_KV(key, value, testMap) {
+ *      cout << key << " " << value;
+ *   }
+ */
+#define FOR_EACH_KV(k, v, c)                                    \
+  if (unsigned int FOR_EACH_state1 = 0) {} else                 \
+    for (auto && FOR_EACH_state2 = (c);                         \
+         !FOR_EACH_state1; FOR_EACH_state1 = 1)                 \
+      for (auto FOR_EACH_state3 = FOR_EACH_state2.begin();      \
+           FOR_EACH_state3 != FOR_EACH_state2.end();            \
+           FOR_EACH_state1 == 2                                 \
+             ? ((FOR_EACH_state1 = 0), ++FOR_EACH_state3)       \
+             : (FOR_EACH_state3 = FOR_EACH_state2.end()))       \
+        for (auto &k = FOR_EACH_state3->first;                  \
+             !FOR_EACH_state1; ++FOR_EACH_state1)               \
+          for (auto &v = FOR_EACH_state3->second;               \
+               !FOR_EACH_state1; ++FOR_EACH_state1)
+
+namespace folly { namespace detail {
+
+// Boost 1.48 lacks has_less, we emulate a subset of it here.
+template <typename T, typename U>
+class HasLess {
+  struct BiggerThanChar { char unused[2]; };
+  template <typename C, typename D> static char test(decltype(C() < D())*);
+  template <typename, typename> static BiggerThanChar test(...);
+public:
+  enum { value = sizeof(test<T, U>(0)) == 1 };
+};
+
+/**
+ * notThereYet helps the FOR_EACH_RANGE macro by opportunistically
+ * using "<" instead of "!=" whenever available when checking for loop
+ * termination. This makes e.g. examples such as FOR_EACH_RANGE (i,
+ * 10, 5) execute zero iterations instead of looping virtually
+ * forever. At the same time, some iterator types define "!=" but not
+ * "<". The notThereYet function will dispatch differently for those.
+ *
+ * Below is the correct implementation of notThereYet. It is disabled
+ * because of a bug in Boost 1.46: The filesystem::path::iterator
+ * defines operator< (via boost::iterator_facade), but that in turn
+ * uses distance_to which is undefined for that particular
+ * iterator. So HasLess (defined above) identifies
+ * boost::filesystem::path as properly comparable with <, but in fact
+ * attempting to do so will yield a compile-time error.
+ *
+ * The else branch (active) contains a conservative
+ * implementation.
+ */
+
+#if 0
+
+template <class T, class U>
+typename std::enable_if<HasLess<T, U>::value, bool>::type
+notThereYet(T& iter, const U& end) {
+  return iter < end;
+}
+
+template <class T, class U>
+typename std::enable_if<!HasLess<T, U>::value, bool>::type
+notThereYet(T& iter, const U& end) {
+  return iter != end;
+}
+
+#else
+
+template <class T, class U>
+typename std::enable_if<
+  (std::is_arithmetic<T>::value && std::is_arithmetic<U>::value) ||
+  (std::is_pointer<T>::value && std::is_pointer<U>::value),
+  bool>::type
+notThereYet(T& iter, const U& end) {
+  return iter < end;
+}
+
+template <class T, class U>
+typename std::enable_if<
+  !(
+    (std::is_arithmetic<T>::value && std::is_arithmetic<U>::value) ||
+    (std::is_pointer<T>::value && std::is_pointer<U>::value)
+  ),
+  bool>::type
+notThereYet(T& iter, const U& end) {
+  return iter != end;
+}
+
+#endif
+
+
+/**
+ * downTo is similar to notThereYet, but in reverse - it helps the
+ * FOR_EACH_RANGE_R macro.
+ */
+template <class T, class U>
+typename std::enable_if<HasLess<U, T>::value, bool>::type
+downTo(T& iter, const U& begin) {
+  return begin < iter--;
+}
+
+template <class T, class U>
+typename std::enable_if<!HasLess<U, T>::value, bool>::type
+downTo(T& iter, const U& begin) {
+  if (iter == begin) return false;
+  --iter;
+  return true;
+}
+
+} }
+
+/*
+ * Iteration with given limits. end is assumed to be reachable from
+ * begin. end is evaluated every pass through the loop.
+ *
+ * NOTE: The type of the loop variable should be the common type of "begin"
+ *       and "end". e.g. If "begin" is "int" but "end" is "long", we want "i"
+ *       to be "long". This is done by getting the type of (true ? begin : end)
+ */
+#define FOR_EACH_RANGE(i, begin, end)           \
+  for (auto i = (true ? (begin) : (end));       \
+       ::folly::detail::notThereYet(i, (end));  \
+       ++i)
+
+/*
+ * Iteration with given limits. begin is assumed to be reachable from
+ * end by successive decrements. begin is evaluated every pass through
+ * the loop.
+ *
+ * NOTE: The type of the loop variable should be the common type of "begin"
+ *       and "end". e.g. If "begin" is "int" but "end" is "long", we want "i"
+ *       to be "long". This is done by getting the type of (false ? begin : end)
+ */
+#define FOR_EACH_RANGE_R(i, begin, end) \
+  for (auto i = (false ? (begin) : (end)); ::folly::detail::downTo(i, (begin));)
+
+#endif
diff --git a/faux-folly/folly/Format-inl.h b/faux-folly/folly/Format-inl.h
new file mode 100644
index 0000000..64591d6
--- /dev/null
+++ b/faux-folly/folly/Format-inl.h
@@ -0,0 +1,1102 @@
+/*
+ * Copyright 2015 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FOLLY_FORMAT_H_
+#error This file may only be included from Format.h.
+#endif
+
+#include <array>
+#include <deque>
+#include <map>
+#include <unordered_map>
+#include <vector>
+
+#include <folly/Exception.h>
+#include <folly/FormatTraits.h>
+#include <folly/Traits.h>
+
+// Ignore -Wformat-nonliteral warnings within this file
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wformat-nonliteral"
+
+namespace folly {
+
+namespace detail {
+
+// Updates the end of the buffer after the comma separators have been added.
+void insertThousandsGroupingUnsafe(char* start_buffer, char** end_buffer);
+
+extern const char formatHexUpper[256][2];
+extern const char formatHexLower[256][2];
+extern const char formatOctal[512][3];
+extern const char formatBinary[256][8];
+
+const size_t kMaxHexLength = 2 * sizeof(uintmax_t);
+const size_t kMaxOctalLength = 3 * sizeof(uintmax_t);
+const size_t kMaxBinaryLength = 8 * sizeof(uintmax_t);
+
+/**
+ * Convert an unsigned to hex, using repr (which maps from each possible
+ * 2-hex-bytes value to the 2-character representation).
+ *
+ * Just like folly::detail::uintToBuffer in Conv.h, writes at the *end* of
+ * the supplied buffer and returns the offset of the beginning of the string
+ * from the start of the buffer.  The formatted string will be in range
+ * [buf+begin, buf+bufLen).
+ */
+template <class Uint>
+size_t uintToHex(char* buffer, size_t bufLen, Uint v,
+                 const char (&repr)[256][2]) {
+  // 'v >>= 7, v >>= 1' is no more than a work around to get rid of shift size
+  // warning when Uint = uint8_t (it's false as v >= 256 implies sizeof(v) > 1).
+  for (; !less_than<unsigned, 256>(v); v >>= 7, v >>= 1) {
+    auto b = v & 0xff;
+    bufLen -= 2;
+    buffer[bufLen] = repr[b][0];
+    buffer[bufLen + 1] = repr[b][1];
+  }
+  buffer[--bufLen] = repr[v][1];
+  if (v >= 16) {
+    buffer[--bufLen] = repr[v][0];
+  }
+  return bufLen;
+}
+
+/**
+ * Convert an unsigned to hex, using lower-case letters for the digits
+ * above 9.  See the comments for uintToHex.
+ */
+template <class Uint>
+inline size_t uintToHexLower(char* buffer, size_t bufLen, Uint v) {
+  return uintToHex(buffer, bufLen, v, formatHexLower);
+}
+
+/**
+ * Convert an unsigned to hex, using upper-case letters for the digits
+ * above 9.  See the comments for uintToHex.
+ */
+template <class Uint>
+inline size_t uintToHexUpper(char* buffer, size_t bufLen, Uint v) {
+  return uintToHex(buffer, bufLen, v, formatHexUpper);
+}
+
+/**
+ * Convert an unsigned to octal.
+ *
+ * Just like folly::detail::uintToBuffer in Conv.h, writes at the *end* of
+ * the supplied buffer and returns the offset of the beginning of the string
+ * from the start of the buffer.  The formatted string will be in range
+ * [buf+begin, buf+bufLen).
+ */
+template <class Uint>
+size_t uintToOctal(char* buffer, size_t bufLen, Uint v) {
+  auto& repr = formatOctal;
+  // 'v >>= 7, v >>= 2' is no more than a work around to get rid of shift size
+  // warning when Uint = uint8_t (it's false as v >= 512 implies sizeof(v) > 1).
+  for (; !less_than<unsigned, 512>(v); v >>= 7, v >>= 2) {
+    auto b = v & 0x1ff;
+    bufLen -= 3;
+    buffer[bufLen] = repr[b][0];
+    buffer[bufLen + 1] = repr[b][1];
+    buffer[bufLen + 2] = repr[b][2];
+  }
+  buffer[--bufLen] = repr[v][2];
+  if (v >= 8) {
+    buffer[--bufLen] = repr[v][1];
+  }
+  if (v >= 64) {
+    buffer[--bufLen] = repr[v][0];
+  }
+  return bufLen;
+}
+
+/**
+ * Convert an unsigned to binary.
+ *
+ * Just like folly::detail::uintToBuffer in Conv.h, writes at the *end* of
+ * the supplied buffer and returns the offset of the beginning of the string
+ * from the start of the buffer.  The formatted string will be in range
+ * [buf+begin, buf+bufLen).
+ */
+template <class Uint>
+size_t uintToBinary(char* buffer, size_t bufLen, Uint v) {
+  auto& repr = formatBinary;
+  if (v == 0) {
+    buffer[--bufLen] = '0';
+    return bufLen;
+  }
+  for (; v; v >>= 7, v >>= 1) {
+    auto b = v & 0xff;
+    bufLen -= 8;
+    memcpy(buffer + bufLen, &(repr[b][0]), 8);
+  }
+  while (buffer[bufLen] == '0') {
+    ++bufLen;
+  }
+  return bufLen;
+}
+
+}  // namespace detail
+
+template <class Derived, bool containerMode, class... Args>
+BaseFormatter<Derived, containerMode, Args...>::BaseFormatter(StringPiece str,
+                                                              Args&&... args)
+    : str_(str),
+      values_(FormatValue<typename std::decay<Args>::type>(
+          std::forward<Args>(args))...) {
+  static_assert(!containerMode || sizeof...(Args) == 1,
+                "Exactly one argument required in container mode");
+}
+
+template <class Derived, bool containerMode, class... Args>
+template <class Output>
+void BaseFormatter<Derived, containerMode, Args...>::operator()(Output& out)
+    const {
+  // Copy raw string (without format specifiers) to output;
+  // not as simple as we'd like, as we still need to translate "}}" to "}"
+  // and throw if we see any lone "}"
+  auto outputString = [&out] (StringPiece s) {
+    auto p = s.begin();
+    auto end = s.end();
+    while (p != end) {
+      auto q = static_cast<const char*>(memchr(p, '}', end - p));
+      if (!q) {
+        out(StringPiece(p, end));
+        break;
+      }
+      ++q;
+      out(StringPiece(p, q));
+      p = q;
+
+      if (p == end || *p != '}') {
+        throw BadFormatArg("folly::format: single '}' in format string");
+      }
+      ++p;
+    }
+  };
+
+  auto p = str_.begin();
+  auto end = str_.end();
+
+  int nextArg = 0;
+  bool hasDefaultArgIndex = false;
+  bool hasExplicitArgIndex = false;
+  while (p != end) {
+    auto q = static_cast<const char*>(memchr(p, '{', end - p));
+    if (!q) {
+      outputString(StringPiece(p, end));
+      break;
+    }
+    outputString(StringPiece(p, q));
+    p = q + 1;
+
+    if (p == end) {
+      throw BadFormatArg("folly::format: '}' at end of format string");
+    }
+
+    // "{{" -> "{"
+    if (*p == '{') {
+      out(StringPiece(p, 1));
+      ++p;
+      continue;
+    }
+
+    // Format string
+    q = static_cast<const char*>(memchr(p, '}', end - p));
+    if (q == nullptr) {
+      throw BadFormatArg("folly::format: missing ending '}'");
+    }
+    FormatArg arg(StringPiece(p, q));
+    p = q + 1;
+
+    int argIndex = 0;
+    auto piece = arg.splitKey<true>();  // empty key component is okay
+    if (containerMode) {  // static
+      arg.enforce(arg.width != FormatArg::kDynamicWidth,
+                  "dynamic field width not supported in vformat()");
+      if (piece.empty()) {
+        arg.setNextIntKey(nextArg++);
+        hasDefaultArgIndex = true;
+      } else {
+        arg.setNextKey(piece);
+        hasExplicitArgIndex = true;
+      }
+    } else {
+      if (piece.empty()) {
+        if (arg.width == FormatArg::kDynamicWidth) {
+          arg.enforce(arg.widthIndex == FormatArg::kNoIndex,
+                      "cannot provide width arg index without value arg index");
+          int sizeArg = nextArg++;
+          arg.width = getSizeArg(sizeArg, arg);
+        }
+
+        argIndex = nextArg++;
+        hasDefaultArgIndex = true;
+      } else {
+        if (arg.width == FormatArg::kDynamicWidth) {
+          arg.enforce(arg.widthIndex != FormatArg::kNoIndex,
+                      "cannot provide value arg index without width arg index");
+          arg.width = getSizeArg(arg.widthIndex, arg);
+        }
+
+        try {
+          argIndex = to<int>(piece);
+        } catch (const std::out_of_range& e) {
+          arg.error("argument index must be integer");
+        }
+        arg.enforce(argIndex >= 0, "argument index must be non-negative");
+        hasExplicitArgIndex = true;
+      }
+    }
+
+    if (hasDefaultArgIndex && hasExplicitArgIndex) {
+      throw BadFormatArg(
+          "folly::format: may not have both default and explicit arg indexes");
+    }
+
+    doFormat(argIndex, arg, out);
+  }
+}
+
+template <class Derived, bool containerMode, class... Args>
+void writeTo(FILE* fp,
+             const BaseFormatter<Derived, containerMode, Args...>& formatter) {
+  auto writer = [fp] (StringPiece sp) {
+    size_t n = fwrite(sp.data(), 1, sp.size(), fp);
+    if (n < sp.size()) {
+      throwSystemError("Formatter writeTo", "fwrite failed");
+    }
+  };
+  formatter(writer);
+}
+
+namespace format_value {
+
+template <class FormatCallback>
+void formatString(StringPiece val, FormatArg& arg, FormatCallback& cb) {
+  if (arg.width != FormatArg::kDefaultWidth && arg.width < 0) {
+    throw BadFormatArg("folly::format: invalid width");
+  }
+  if (arg.precision != FormatArg::kDefaultPrecision && arg.precision < 0) {
+    throw BadFormatArg("folly::format: invalid precision");
+  }
+
+  // XXX: clang should be smart enough to not need the two static_cast<size_t>
+  // uses below given the above checks. If clang ever becomes that smart, we
+  // should remove the otherwise unnecessary warts.
+
+  if (arg.precision != FormatArg::kDefaultPrecision &&
+      val.size() > static_cast<size_t>(arg.precision)) {
+    val.reset(val.data(), arg.precision);
+  }
+
+  constexpr int padBufSize = 128;
+  char padBuf[padBufSize];
+
+  // Output padding, no more than padBufSize at once
+  auto pad = [&padBuf, &cb, padBufSize] (int chars) {
+    while (chars) {
+      int n = std::min(chars, padBufSize);
+      cb(StringPiece(padBuf, n));
+      chars -= n;
+    }
+  };
+
+  int padRemaining = 0;
+  if (arg.width != FormatArg::kDefaultWidth &&
+      val.size() < static_cast<size_t>(arg.width)) {
+    char fill = arg.fill == FormatArg::kDefaultFill ? ' ' : arg.fill;
+    int padChars = static_cast<int> (arg.width - val.size());
+    memset(padBuf, fill, std::min(padBufSize, padChars));
+
+    switch (arg.align) {
+    case FormatArg::Align::DEFAULT:
+    case FormatArg::Align::LEFT:
+      padRemaining = padChars;
+      break;
+    case FormatArg::Align::CENTER:
+      pad(padChars / 2);
+      padRemaining = padChars - padChars / 2;
+      break;
+    case FormatArg::Align::RIGHT:
+    case FormatArg::Align::PAD_AFTER_SIGN:
+      pad(padChars);
+      break;
+    default:
+      abort();
+      break;
+    }
+  }
+
+  cb(val);
+
+  if (padRemaining) {
+    pad(padRemaining);
+  }
+}
+
+template <class FormatCallback>
+void formatNumber(StringPiece val, int prefixLen, FormatArg& arg,
+                  FormatCallback& cb) {
+  // precision means something different for numbers
+  arg.precision = FormatArg::kDefaultPrecision;
+  if (arg.align == FormatArg::Align::DEFAULT) {
+    arg.align = FormatArg::Align::RIGHT;
+  } else if (prefixLen && arg.align == FormatArg::Align::PAD_AFTER_SIGN) {
+    // Split off the prefix, then do any padding if necessary
+    cb(val.subpiece(0, prefixLen));
+    val.advance(prefixLen);
+    arg.width = std::max(arg.width - prefixLen, 0);
+  }
+  format_value::formatString(val, arg, cb);
+}
+
+template <class FormatCallback,
+          class Derived,
+          bool containerMode,
+          class... Args>
+void formatFormatter(
+    const BaseFormatter<Derived, containerMode, Args...>& formatter,
+    FormatArg& arg,
+    FormatCallback& cb) {
+  if (arg.width == FormatArg::kDefaultWidth &&
+      arg.precision == FormatArg::kDefaultPrecision) {
+    // nothing to do
+    formatter(cb);
+  } else if (arg.align != FormatArg::Align::LEFT &&
+             arg.align != FormatArg::Align::DEFAULT) {
+    // We can only avoid creating a temporary string if we align left,
+    // as we'd need to know the size beforehand otherwise
+    format_value::formatString(formatter.fbstr(), arg, cb);
+  } else {
+    auto fn = [&arg, &cb] (StringPiece sp) mutable {
+      int sz = static_cast<int>(sp.size());
+      if (arg.precision != FormatArg::kDefaultPrecision) {
+        sz = std::min(arg.precision, sz);
+        sp.reset(sp.data(), sz);
+        arg.precision -= sz;
+      }
+      if (!sp.empty()) {
+        cb(sp);
+        if (arg.width != FormatArg::kDefaultWidth) {
+          arg.width = std::max(arg.width - sz, 0);
+        }
+      }
+    };
+    formatter(fn);
+    if (arg.width != FormatArg::kDefaultWidth && arg.width != 0) {
+      // Rely on formatString to do appropriate padding
+      format_value::formatString(StringPiece(), arg, cb);
+    }
+  }
+}
+
+}  // namespace format_value
+
+// Definitions for default FormatValue classes
+
+// Integral types (except bool)
+template <class T>
+class FormatValue<
+  T, typename std::enable_if<
+    std::is_integral<T>::value &&
+    !std::is_same<T, bool>::value>::type>
+  {
+ public:
+  explicit FormatValue(T val) : val_(val) { }
+
+  T getValue() const {
+    return val_;
+  }
+
+  template <class FormatCallback>
+  void format(FormatArg& arg, FormatCallback& cb) const {
+    arg.validate(FormatArg::Type::INTEGER);
+    doFormat(arg, cb);
+  }
+
+  template <class FormatCallback>
+  void doFormat(FormatArg& arg, FormatCallback& cb) const {
+    char presentation = arg.presentation;
+    if (presentation == FormatArg::kDefaultPresentation) {
+      presentation = std::is_same<T, char>::value ? 'c' : 'd';
+    }
+
+    // Do all work as unsigned, we'll add the prefix ('0' or '0x' if necessary)
+    // and sign ourselves.
+    typedef typename std::make_unsigned<T>::type UT;
+    UT uval;
+    char sign;
+    if (std::is_signed<T>::value) {
+      if (folly::is_negative(val_)) {
+        uval = static_cast<UT>(-val_);
+        sign = '-';
+      } else {
+        uval = static_cast<UT>(val_);
+        switch (arg.sign) {
+        case FormatArg::Sign::PLUS_OR_MINUS:
+          sign = '+';
+          break;
+        case FormatArg::Sign::SPACE_OR_MINUS:
+          sign = ' ';
+          break;
+        default:
+          sign = '\0';
+          break;
+        }
+      }
+    } else {
+      uval = val_;
+      sign = '\0';
+
+      arg.enforce(arg.sign == FormatArg::Sign::DEFAULT,
+                  "sign specifications not allowed for unsigned values");
+    }
+
+    // max of:
+    // #x: 0x prefix + 16 bytes = 18 bytes
+    // #o: 0 prefix + 22 bytes = 23 bytes
+    // #b: 0b prefix + 64 bytes = 65 bytes
+    // ,d: 26 bytes (including thousands separators!)
+    // + nul terminator
+    // + 3 for sign and prefix shenanigans (see below)
+    constexpr size_t valBufSize = 69;
+    char valBuf[valBufSize];
+    char* valBufBegin = nullptr;
+    char* valBufEnd = nullptr;
+
+    int prefixLen = 0;
+    switch (presentation) {
+    case 'n': {
+      arg.enforce(!arg.basePrefix,
+                  "base prefix not allowed with '", presentation,
+                  "' specifier");
+
+      arg.enforce(!arg.thousandsSeparator,
+                  "cannot use ',' with the '", presentation,
+                  "' specifier");
+
+      valBufBegin = valBuf + 3;  // room for sign and base prefix
+#ifdef _MSC_VER
+      char valBuf2[valBufSize];
+      snprintf(valBuf2, valBufSize, "%ju", static_cast<uintmax_t>(uval));
+      int len = GetNumberFormat(
+        LOCALE_USER_DEFAULT,
+        0,
+        valBuf2,
+        nullptr,
+        valBufBegin,
+        (int)((valBuf + valBufSize) - valBufBegin)
+      );
+#else
+      int len = snprintf(valBufBegin, (valBuf + valBufSize) - valBufBegin,
+                         "%'ju", static_cast<uintmax_t>(uval));
+#endif
+      // valBufSize should always be big enough, so this should never
+      // happen.
+      assert(len < valBuf + valBufSize - valBufBegin);
+      valBufEnd = valBufBegin + len;
+      break;
+    }
+    case 'd':
+      arg.enforce(!arg.basePrefix,
+                  "base prefix not allowed with '", presentation,
+                  "' specifier");
+      valBufBegin = valBuf + 3;  // room for sign and base prefix
+
+      // Use uintToBuffer, faster than sprintf
+      valBufEnd = valBufBegin + uint64ToBufferUnsafe(uval, valBufBegin);
+      if (arg.thousandsSeparator) {
+        detail::insertThousandsGroupingUnsafe(valBufBegin, &valBufEnd);
+      }
+      break;
+    case 'c':
+      arg.enforce(!arg.basePrefix,
+                  "base prefix not allowed with '", presentation,
+                  "' specifier");
+      arg.enforce(!arg.thousandsSeparator,
+                  "thousands separator (',') not allowed with '",
+                  presentation, "' specifier");
+      valBufBegin = valBuf + 3;
+      *valBufBegin = static_cast<char>(uval);
+      valBufEnd = valBufBegin + 1;
+      break;
+    case 'o':
+    case 'O':
+      arg.enforce(!arg.thousandsSeparator,
+                  "thousands separator (',') not allowed with '",
+                  presentation, "' specifier");
+      valBufEnd = valBuf + valBufSize - 1;
+      valBufBegin = valBuf + detail::uintToOctal(valBuf, valBufSize - 1, uval);
+      if (arg.basePrefix) {
+        *--valBufBegin = '0';
+        prefixLen = 1;
+      }
+      break;
+    case 'x':
+      arg.enforce(!arg.thousandsSeparator,
+                  "thousands separator (',') not allowed with '",
+                  presentation, "' specifier");
+      valBufEnd = valBuf + valBufSize - 1;
+      valBufBegin = valBuf + detail::uintToHexLower(valBuf, valBufSize - 1,
+                                                    uval);
+      if (arg.basePrefix) {
+        *--valBufBegin = 'x';
+        *--valBufBegin = '0';
+        prefixLen = 2;
+      }
+      break;
+    case 'X':
+      arg.enforce(!arg.thousandsSeparator,
+                  "thousands separator (',') not allowed with '",
+                  presentation, "' specifier");
+      valBufEnd = valBuf + valBufSize - 1;
+      valBufBegin = valBuf + detail::uintToHexUpper(valBuf, valBufSize - 1,
+                                                    uval);
+      if (arg.basePrefix) {
+        *--valBufBegin = 'X';
+        *--valBufBegin = '0';
+        prefixLen = 2;
+      }
+      break;
+    case 'b':
+    case 'B':
+      arg.enforce(!arg.thousandsSeparator,
+                  "thousands separator (',') not allowed with '",
+                  presentation, "' specifier");
+      valBufEnd = valBuf + valBufSize - 1;
+      valBufBegin = valBuf + detail::uintToBinary(valBuf, valBufSize - 1,
+                                                  uval);
+      if (arg.basePrefix) {
+        *--valBufBegin = presentation;  // 0b or 0B
+        *--valBufBegin = '0';
+        prefixLen = 2;
+      }
+      break;
+    default:
+      arg.error("invalid specifier '", presentation, "'");
+    }
+
+    if (sign) {
+      *--valBufBegin = sign;
+      ++prefixLen;
+    }
+
+    format_value::formatNumber(StringPiece(valBufBegin, valBufEnd), prefixLen,
+                               arg, cb);
+  }
+
+ private:
+  T val_;
+};
+
+// Bool
+template <>
+class FormatValue<bool> {
+ public:
+  explicit FormatValue(bool val) : val_(val) { }
+
+  template <class FormatCallback>
+  void format(FormatArg& arg, FormatCallback& cb) const {
+    if (arg.presentation == FormatArg::kDefaultPresentation) {
+      arg.validate(FormatArg::Type::OTHER);
+      format_value::formatString(val_ ? "true" : "false", arg, cb);
+    } else {  // number
+      FormatValue<int>(val_).format(arg, cb);
+    }
+  }
+
+ private:
+  bool val_;
+};
+
+// double
+template <>
+class FormatValue<double> {
+ public:
+  explicit FormatValue(double val) : val_(val) { }
+
+  template <class FormatCallback>
+  void format(FormatArg& arg, FormatCallback& cb) const {
+    fbstring piece;
+    int prefixLen;
+    formatHelper(piece, prefixLen, arg);
+    format_value::formatNumber(piece, prefixLen, arg, cb);
+  }
+
+ private:
+  void formatHelper(fbstring& piece, int& prefixLen, FormatArg& arg) const;
+
+  double val_;
+};
+
+// float (defer to double)
+template <>
+class FormatValue<float> {
+ public:
+  explicit FormatValue(float val) : val_(val) { }
+
+  template <class FormatCallback>
+  void format(FormatArg& arg, FormatCallback& cb) const {
+    FormatValue<double>(val_).format(arg, cb);
+  }
+
+ private:
+  float val_;
+};
+
+// Sring-y types (implicitly convertible to StringPiece, except char*)
+template <class T>
+class FormatValue<
+  T, typename std::enable_if<
+      (!std::is_pointer<T>::value ||
+       !std::is_same<char, typename std::decay<
+          typename std::remove_pointer<T>::type>::type>::value) &&
+      std::is_convertible<T, StringPiece>::value>::type>
+  {
+ public:
+  explicit FormatValue(StringPiece val) : val_(val) { }
+
+  template <class FormatCallback>
+  void format(FormatArg& arg, FormatCallback& cb) const {
+    if (arg.keyEmpty()) {
+      arg.validate(FormatArg::Type::OTHER);
+      arg.enforce(arg.presentation == FormatArg::kDefaultPresentation ||
+                  arg.presentation == 's',
+                  "invalid specifier '", arg.presentation, "'");
+      format_value::formatString(val_, arg, cb);
+    } else {
+      FormatValue<char>(val_.at(arg.splitIntKey())).format(arg, cb);
+    }
+  }
+
+ private:
+  StringPiece val_;
+};
+
+// Null
+template <>
+class FormatValue<std::nullptr_t> {
+ public:
+  explicit FormatValue(std::nullptr_t) { }
+
+  template <class FormatCallback>
+  void format(FormatArg& arg, FormatCallback& cb) const {
+    arg.validate(FormatArg::Type::OTHER);
+    arg.enforce(arg.presentation == FormatArg::kDefaultPresentation,
+                "invalid specifier '", arg.presentation, "'");
+    format_value::formatString("(null)", arg, cb);
+  }
+};
+
+// Partial specialization of FormatValue for char*
+template <class T>
+class FormatValue<
+  T*,
+  typename std::enable_if<
+      std::is_same<char, typename std::decay<T>::type>::value>::type>
+  {
+ public:
+  explicit FormatValue(T* val) : val_(val) { }
+
+  template <class FormatCallback>
+  void format(FormatArg& arg, FormatCallback& cb) const {
+    if (arg.keyEmpty()) {
+      if (!val_) {
+        FormatValue<std::nullptr_t>(nullptr).format(arg, cb);
+      } else {
+        FormatValue<StringPiece>(val_).format(arg, cb);
+      }
+    } else {
+      FormatValue<typename std::decay<T>::type>(
+          val_[arg.splitIntKey()]).format(arg, cb);
+    }
+  }
+
+ private:
+  T* val_;
+};
+
+// Partial specialization of FormatValue for void*
+template <class T>
+class FormatValue<
+  T*,
+  typename std::enable_if<
+      std::is_same<void, typename std::decay<T>::type>::value>::type>
+  {
+ public:
+  explicit FormatValue(T* val) : val_(val) { }
+
+  template <class FormatCallback>
+  void format(FormatArg& arg, FormatCallback& cb) const {
+    if (!val_) {
+      FormatValue<std::nullptr_t>(nullptr).format(arg, cb);
+    } else {
+      // Print as a pointer, in hex.
+      arg.validate(FormatArg::Type::OTHER);
+      arg.enforce(arg.presentation == FormatArg::kDefaultPresentation,
+                  "invalid specifier '", arg.presentation, "'");
+      arg.basePrefix = true;
+      arg.presentation = 'x';
+      if (arg.align == FormatArg::Align::DEFAULT) {
+        arg.align = FormatArg::Align::LEFT;
+      }
+      FormatValue<uintptr_t>(
+          reinterpret_cast<uintptr_t>(val_)).doFormat(arg, cb);
+    }
+  }
+
+ private:
+  T* val_;
+};
+
+template <class T, class = void>
+class TryFormatValue {
+ public:
+  template <class FormatCallback>
+  static void formatOrFail(T& value, FormatArg& arg, FormatCallback& cb) {
+    arg.error("No formatter available for this type");
+  }
+};
+
+template <class T>
+class TryFormatValue<
+  T,
+  typename std::enable_if<
+      0 < sizeof(FormatValue<typename std::decay<T>::type>)>::type>
+  {
+ public:
+  template <class FormatCallback>
+  static void formatOrFail(T& value, FormatArg& arg, FormatCallback& cb) {
+    FormatValue<typename std::decay<T>::type>(value).format(arg, cb);
+  }
+};
+
+// Partial specialization of FormatValue for other pointers
+template <class T>
+class FormatValue<
+  T*,
+  typename std::enable_if<
+      !std::is_same<char, typename std::decay<T>::type>::value &&
+      !std::is_same<void, typename std::decay<T>::type>::value>::type>
+  {
+ public:
+  explicit FormatValue(T* val) : val_(val) { }
+
+  template <class FormatCallback>
+  void format(FormatArg& arg, FormatCallback& cb) const {
+    if (arg.keyEmpty()) {
+      FormatValue<void*>((void*)val_).format(arg, cb);
+    } else {
+      TryFormatValue<T>::formatOrFail(val_[arg.splitIntKey()], arg, cb);
+    }
+  }
+ private:
+  T* val_;
+};
+
+namespace detail {
+
+// std::array
+template <class T, size_t N>
+struct IndexableTraits<std::array<T, N>>
+  : public IndexableTraitsSeq<std::array<T, N>> {
+};
+
+// std::vector
+template <class T, class A>
+struct IndexableTraits<std::vector<T, A>>
+  : public IndexableTraitsSeq<std::vector<T, A>> {
+};
+
+// std::deque
+template <class T, class A>
+struct IndexableTraits<std::deque<T, A>>
+  : public IndexableTraitsSeq<std::deque<T, A>> {
+};
+
+// std::map with integral keys
+template <class K, class T, class C, class A>
+struct IndexableTraits<
+  std::map<K, T, C, A>,
+  typename std::enable_if<std::is_integral<K>::value>::type>
+  : public IndexableTraitsAssoc<std::map<K, T, C, A>> {
+};
+
+// std::unordered_map with integral keys
+template <class K, class T, class H, class E, class A>
+struct IndexableTraits<
+  std::unordered_map<K, T, H, E, A>,
+  typename std::enable_if<std::is_integral<K>::value>::type>
+  : public IndexableTraitsAssoc<std::unordered_map<K, T, H, E, A>> {
+};
+
+}  // namespace detail
+
+// Partial specialization of FormatValue for integer-indexable containers
+template <class T>
+class FormatValue<
+  T,
+  typename detail::IndexableTraits<T>::enabled> {
+ public:
+  explicit FormatValue(const T& val) : val_(val) { }
+
+  template <class FormatCallback>
+  void format(FormatArg& arg, FormatCallback& cb) const {
+    FormatValue<typename std::decay<
+      typename detail::IndexableTraits<T>::value_type>::type>(
+        detail::IndexableTraits<T>::at(
+            val_, arg.splitIntKey())).format(arg, cb);
+  }
+
+ private:
+  const T& val_;
+};
+
+template <class Container, class Value>
+class FormatValue<
+  detail::DefaultValueWrapper<Container, Value>,
+  typename detail::IndexableTraits<Container>::enabled> {
+ public:
+  explicit FormatValue(const detail::DefaultValueWrapper<Container, Value>& val)
+    : val_(val) { }
+
+  template <class FormatCallback>
+  void format(FormatArg& arg, FormatCallback& cb) const {
+    FormatValue<typename std::decay<
+      typename detail::IndexableTraits<Container>::value_type>::type>(
+          detail::IndexableTraits<Container>::at(
+              val_.container,
+              arg.splitIntKey(),
+              val_.defaultValue)).format(arg, cb);
+  }
+
+ private:
+  const detail::DefaultValueWrapper<Container, Value>& val_;
+};
+
+namespace detail {
+
+// Define enabled, key_type, convert from StringPiece to the key types
+// that we support
+template <class T> struct KeyFromStringPiece;
+
+// std::string
+template <>
+struct KeyFromStringPiece<std::string> : public FormatTraitsBase {
+  typedef std::string key_type;
+  static std::string convert(StringPiece s) {
+    return s.toString();
+  }
+  typedef void enabled;
+};
+
+// fbstring
+template <>
+struct KeyFromStringPiece<fbstring> : public FormatTraitsBase {
+  typedef fbstring key_type;
+  static fbstring convert(StringPiece s) {
+    return s.toFbstring();
+  }
+};
+
+// StringPiece
+template <>
+struct KeyFromStringPiece<StringPiece> : public FormatTraitsBase {
+  typedef StringPiece key_type;
+  static StringPiece convert(StringPiece s) {
+    return s;
+  }
+};
+
+// Base class for associative types keyed by strings
+template <class T> struct KeyableTraitsAssoc : public FormatTraitsBase {
+  typedef typename T::key_type key_type;
+  typedef typename T::value_type::second_type value_type;
+  static const value_type& at(const T& map, StringPiece key) {
+    return map.at(KeyFromStringPiece<key_type>::convert(key));
+  }
+  static const value_type& at(const T& map, StringPiece key,
+                              const value_type& dflt) {
+    auto pos = map.find(KeyFromStringPiece<key_type>::convert(key));
+    return pos != map.end() ? pos->second : dflt;
+  }
+};
+
+// Define enabled, key_type, value_type, at() for supported string-keyed
+// types
+template <class T, class Enabled=void> struct KeyableTraits;
+
+// std::map with string key
+template <class K, class T, class C, class A>
+struct KeyableTraits<
+  std::map<K, T, C, A>,
+  typename KeyFromStringPiece<K>::enabled>
+  : public KeyableTraitsAssoc<std::map<K, T, C, A>> {
+};
+
+// std::unordered_map with string key
+template <class K, class T, class H, class E, class A>
+struct KeyableTraits<
+  std::unordered_map<K, T, H, E, A>,
+  typename KeyFromStringPiece<K>::enabled>
+  : public KeyableTraitsAssoc<std::unordered_map<K, T, H, E, A>> {
+};
+
+}  // namespace detail
+
+// Partial specialization of FormatValue for string-keyed containers
+template <class T>
+class FormatValue<
+  T,
+  typename detail::KeyableTraits<T>::enabled> {
+ public:
+  explicit FormatValue(const T& val) : val_(val) { }
+
+  template <class FormatCallback>
+  void format(FormatArg& arg, FormatCallback& cb) const {
+    FormatValue<typename std::decay<
+      typename detail::KeyableTraits<T>::value_type>::type>(
+        detail::KeyableTraits<T>::at(
+            val_, arg.splitKey())).format(arg, cb);
+  }
+
+ private:
+  const T& val_;
+};
+
+template <class Container, class Value>
+class FormatValue<
+  detail::DefaultValueWrapper<Container, Value>,
+  typename detail::KeyableTraits<Container>::enabled> {
+ public:
+  explicit FormatValue(const detail::DefaultValueWrapper<Container, Value>& val)
+    : val_(val) { }
+
+  template <class FormatCallback>
+  void format(FormatArg& arg, FormatCallback& cb) const {
+    FormatValue<typename std::decay<
+      typename detail::KeyableTraits<Container>::value_type>::type>(
+          detail::KeyableTraits<Container>::at(
+              val_.container,
+              arg.splitKey(),
+              val_.defaultValue)).format(arg, cb);
+  }
+
+ private:
+  const detail::DefaultValueWrapper<Container, Value>& val_;
+};
+
+// Partial specialization of FormatValue for pairs
+template <class A, class B>
+class FormatValue<std::pair<A, B>> {
+ public:
+  explicit FormatValue(const std::pair<A, B>& val) : val_(val) { }
+
+  template <class FormatCallback>
+  void format(FormatArg& arg, FormatCallback& cb) const {
+    int key = arg.splitIntKey();
+    switch (key) {
+    case 0:
+      FormatValue<typename std::decay<A>::type>(val_.first).format(arg, cb);
+      break;
+    case 1:
+      FormatValue<typename std::decay<B>::type>(val_.second).format(arg, cb);
+      break;
+    default:
+      arg.error("invalid index for pair");
+    }
+  }
+
+ private:
+  const std::pair<A, B>& val_;
+};
+
+// Partial specialization of FormatValue for tuples
+template <class... Args>
+class FormatValue<std::tuple<Args...>> {
+  typedef std::tuple<Args...> Tuple;
+ public:
+  explicit FormatValue(const Tuple& val) : val_(val) { }
+
+  template <class FormatCallback>
+  void format(FormatArg& arg, FormatCallback& cb) const {
+    int key = arg.splitIntKey();
+    arg.enforce(key >= 0, "tuple index must be non-negative");
+    doFormat(key, arg, cb);
+  }
+
+ private:
+  static constexpr size_t valueCount = std::tuple_size<Tuple>::value;
+
+  template <size_t K, class Callback>
+  typename std::enable_if<K == valueCount>::type
+  doFormatFrom(size_t i, FormatArg& arg, Callback& cb) const {
+    arg.enforce("tuple index out of range, max=", i);
+  }
+
+  template <size_t K, class Callback>
+  typename std::enable_if<(K < valueCount)>::type
+  doFormatFrom(size_t i, FormatArg& arg, Callback& cb) const {
+    if (i == K) {
+      FormatValue<typename std::decay<
+        typename std::tuple_element<K, Tuple>::type>::type>(
+          std::get<K>(val_)).format(arg, cb);
+    } else {
+      doFormatFrom<K+1>(i, arg, cb);
+    }
+  }
+
+  template <class Callback>
+  void doFormat(size_t i, FormatArg& arg, Callback& cb) const {
+    return doFormatFrom<0>(i, arg, cb);
+  }
+
+  const Tuple& val_;
+};
+
+// Partial specialization of FormatValue for nested Formatters
+template <bool containerMode, class... Args,
+          template <bool, class...> class F>
+class FormatValue<F<containerMode, Args...>,
+                  typename std::enable_if<detail::IsFormatter<
+                      F<containerMode, Args...>>::value>::type> {
+  typedef typename F<containerMode, Args...>::BaseType FormatterValue;
+
+ public:
+  explicit FormatValue(const FormatterValue& f) : f_(f) { }
+
+  template <class FormatCallback>
+  void format(FormatArg& arg, FormatCallback& cb) const {
+    format_value::formatFormatter(f_, arg, cb);
+  }
+ private:
+  const FormatterValue& f_;
+};
+
+/**
+ * Formatter objects can be appended to strings, and therefore they're
+ * compatible with folly::toAppend and folly::to.
+ */
+template <class Tgt, class Derived, bool containerMode, class... Args>
+typename std::enable_if<IsSomeString<Tgt>::value>::type toAppend(
+    const BaseFormatter<Derived, containerMode, Args...>& value, Tgt* result) {
+  value.appendTo(*result);
+}
+
+}  // namespace folly
+
+#pragma GCC diagnostic pop
diff --git a/faux-folly/folly/Format.cpp b/faux-folly/folly/Format.cpp
new file mode 100644
index 0000000..da8f69c
--- /dev/null
+++ b/faux-folly/folly/Format.cpp
@@ -0,0 +1,342 @@
+/*
+ * Copyright 2015 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <folly/Format.h>
+
+#include <double-conversion/double-conversion.h>
+
+namespace folly {
+namespace detail {
+
+extern const FormatArg::Align formatAlignTable[];
+extern const FormatArg::Sign formatSignTable[];
+
+template <typename T>
+inline constexpr
+T bespoke_max(const T a, const T b)
+{
+    return (a > b) ? a : b;
+}
+
+template <typename T>
+inline constexpr
+T bespoke_max(const T a, const T b, const T c)
+{
+    return bespoke_max(bespoke_max(a, b), c);
+}
+
+}  // namespace detail
+
+using namespace folly::detail;
+
+void FormatValue<double>::formatHelper(
+    fbstring& piece, int& prefixLen, FormatArg& arg) const {
+  using ::double_conversion::DoubleToStringConverter;
+  using ::double_conversion::StringBuilder;
+
+  arg.validate(FormatArg::Type::FLOAT);
+
+  if (arg.presentation == FormatArg::kDefaultPresentation) {
+    arg.presentation = 'g';
+  }
+
+  const char* infinitySymbol = isupper(arg.presentation) ? "INF" : "inf";
+  const char* nanSymbol = isupper(arg.presentation) ? "NAN" : "nan";
+  char exponentSymbol = isupper(arg.presentation) ? 'E' : 'e';
+
+  if (arg.precision == FormatArg::kDefaultPrecision) {
+    arg.precision = 6;
+  }
+
+  // 2+: for null terminator and optional sign shenanigans.
+  constexpr unsigned buflen = 2 + bespoke_max(
+          (2 + DoubleToStringConverter::kMaxFixedDigitsBeforePoint +
+           DoubleToStringConverter::kMaxFixedDigitsAfterPoint),
+          (8 + DoubleToStringConverter::kMaxExponentialDigits),
+          (7 + DoubleToStringConverter::kMaxPrecisionDigits));
+  char buf[buflen];
+  StringBuilder builder(buf + 1, static_cast<int> (sizeof(buf) - 1));
+
+  char plusSign;
+  switch (arg.sign) {
+  case FormatArg::Sign::PLUS_OR_MINUS:
+    plusSign = '+';
+    break;
+  case FormatArg::Sign::SPACE_OR_MINUS:
+    plusSign = ' ';
+    break;
+  default:
+    plusSign = '\0';
+    break;
+  };
+
+  auto flags =
+      DoubleToStringConverter::EMIT_POSITIVE_EXPONENT_SIGN |
+      (arg.trailingDot ? DoubleToStringConverter::EMIT_TRAILING_DECIMAL_POINT
+                       : 0);
+
+  double val = val_;
+  switch (arg.presentation) {
+  case '%':
+    val *= 100;
+  case 'f':
+  case 'F':
+    {
+      if (arg.precision >
+          DoubleToStringConverter::kMaxFixedDigitsAfterPoint) {
+        arg.precision = DoubleToStringConverter::kMaxFixedDigitsAfterPoint;
+      }
+      DoubleToStringConverter conv(flags,
+                                   infinitySymbol,
+                                   nanSymbol,
+                                   exponentSymbol,
+                                   -4,
+                                   arg.precision,
+                                   0,
+                                   0);
+      arg.enforce(conv.ToFixed(val, arg.precision, &builder),
+                  "fixed double conversion failed");
+    }
+    break;
+  case 'e':
+  case 'E':
+    {
+      if (arg.precision > DoubleToStringConverter::kMaxExponentialDigits) {
+        arg.precision = DoubleToStringConverter::kMaxExponentialDigits;
+      }
+
+      DoubleToStringConverter conv(flags,
+                                   infinitySymbol,
+                                   nanSymbol,
+                                   exponentSymbol,
+                                   -4,
+                                   arg.precision,
+                                   0,
+                                   0);
+      arg.enforce(conv.ToExponential(val, arg.precision, &builder));
+    }
+    break;
+  case 'n':  // should be locale-aware, but isn't
+  case 'g':
+  case 'G':
+    {
+      if (arg.precision < DoubleToStringConverter::kMinPrecisionDigits) {
+        arg.precision = DoubleToStringConverter::kMinPrecisionDigits;
+      } else if (arg.precision >
+                 DoubleToStringConverter::kMaxPrecisionDigits) {
+        arg.precision = DoubleToStringConverter::kMaxPrecisionDigits;
+      }
+      DoubleToStringConverter conv(flags,
+                                   infinitySymbol,
+                                   nanSymbol,
+                                   exponentSymbol,
+                                   -4,
+                                   arg.precision,
+                                   0,
+                                   0);
+      arg.enforce(conv.ToShortest(val, &builder));
+    }
+    break;
+  default:
+    arg.error("invalid specifier '", arg.presentation, "'");
+  }
+
+  int len = builder.position();
+  builder.Finalize();
+  DCHECK_GT(len, 0);
+
+  // Add '+' or ' ' sign if needed
+  char* p = buf + 1;
+  // anything that's neither negative nor nan
+  prefixLen = 0;
+  if (plusSign && (*p != '-' && *p != 'n' && *p != 'N')) {
+    *--p = plusSign;
+    ++len;
+    prefixLen = 1;
+  } else if (*p == '-') {
+    prefixLen = 1;
+  }
+
+  piece = fbstring(p, len);
+}
+
+
+void FormatArg::initSlow() {
+  auto b = fullArgString.begin();
+  auto end = fullArgString.end();
+
+  // Parse key
+  auto p = static_cast<const char*>(memchr(b, ':', end - b));
+  if (!p) {
+    key_ = StringPiece(b, end);
+    return;
+  }
+  key_ = StringPiece(b, p);
+
+  if (*p == ':') {
+    // parse format spec
+    if (++p == end) return;
+
+    // fill/align, or just align
+    Align a;
+    if (p + 1 != end &&
+        (a = formatAlignTable[static_cast<unsigned char>(p[1])]) !=
+        Align::INVALID) {
+      fill = *p;
+      align = a;
+      p += 2;
+      if (p == end) return;
+    } else if ((a = formatAlignTable[static_cast<unsigned char>(*p)]) !=
+               Align::INVALID) {
+      align = a;
+      if (++p == end) return;
+    }
+
+    Sign s;
+    unsigned char uSign = static_cast<unsigned char>(*p);
+    if ((s = formatSignTable[uSign]) != Sign::INVALID) {
+      sign = s;
+      if (++p == end) return;
+    }
+
+    if (*p == '#') {
+      basePrefix = true;
+      if (++p == end) return;
+    }
+
+    if (*p == '0') {
+      enforce(align == Align::DEFAULT, "alignment specified twice");
+      fill = '0';
+      align = Align::PAD_AFTER_SIGN;
+      if (++p == end) return;
+    }
+
+    auto readInt = [&] {
+      auto const b = p;
+      do {
+        ++p;
+      } while (p != end && *p >= '0' && *p <= '9');
+      return to<int>(StringPiece(b, p));
+    };
+
+    if (*p == '*') {
+      width = kDynamicWidth;
+      ++p;
+
+      if (p == end) return;
+
+      if (*p >= '0' && *p <= '9') widthIndex = readInt();
+
+      if (p == end) return;
+    } else if (*p >= '0' && *p <= '9') {
+      width = readInt();
+
+      if (p == end) return;
+    }
+
+    if (*p == ',') {
+      thousandsSeparator = true;
+      if (++p == end) return;
+    }
+
+    if (*p == '.') {
+      auto b = ++p;
+      while (p != end && *p >= '0' && *p <= '9') {
+        ++p;
+      }
+      if (p != b) {
+        precision = to<int>(StringPiece(b, p));
+        if (p != end && *p == '.') {
+          trailingDot = true;
+          ++p;
+        }
+      } else {
+        trailingDot = true;
+      }
+
+      if (p == end) return;
+    }
+
+    presentation = *p;
+    if (++p == end) return;
+  }
+
+  error("extra characters in format string");
+}
+
+void FormatArg::validate(Type type) const {
+  enforce(keyEmpty(), "index not allowed");
+  switch (type) {
+  case Type::INTEGER:
+    enforce(precision == kDefaultPrecision,
+            "precision not allowed on integers");
+    break;
+  case Type::FLOAT:
+    enforce(!basePrefix,
+            "base prefix ('#') specifier only allowed on integers");
+    enforce(!thousandsSeparator,
+            "thousands separator (',') only allowed on integers");
+    break;
+  case Type::OTHER:
+    enforce(align != Align::PAD_AFTER_SIGN,
+            "'='alignment only allowed on numbers");
+    enforce(sign == Sign::DEFAULT,
+            "sign specifier only allowed on numbers");
+    enforce(!basePrefix,
+            "base prefix ('#') specifier only allowed on integers");
+    enforce(!thousandsSeparator,
+            "thousands separator (',') only allowed on integers");
+    break;
+  }
+}
+
+namespace detail {
+void insertThousandsGroupingUnsafe(char* start_buffer, char** end_buffer) {
+  uint32_t remaining_digits = *end_buffer - start_buffer;
+  uint32_t separator_size = (remaining_digits - 1) / 3;
+  uint32_t result_size = remaining_digits + separator_size;
+  *end_buffer = *end_buffer + separator_size;
+
+  // get the end of the new string with the separators
+  uint32_t buffer_write_index = result_size - 1;
+  uint32_t buffer_read_index = remaining_digits - 1;
+  start_buffer[buffer_write_index + 1] = 0;
+
+  bool done = false;
+  uint32_t next_group_size = 3;
+
+  while (!done) {
+    uint32_t current_group_size = std::max<uint32_t>(1,
+      std::min<uint32_t>(remaining_digits, next_group_size));
+
+    // write out the current group's digits to the buffer index
+    for (uint32_t i = 0; i < current_group_size; i++) {
+      start_buffer[buffer_write_index--] = start_buffer[buffer_read_index--];
+    }
+
+    // if not finished, write the separator before the next group
+    if (buffer_write_index < buffer_write_index + 1) {
+      start_buffer[buffer_write_index--] = ',';
+    } else {
+      done = true;
+    }
+
+    remaining_digits -= current_group_size;
+  }
+}
+} // detail
+
+}  // namespace folly
diff --git a/faux-folly/folly/Format.h b/faux-folly/folly/Format.h
new file mode 100644
index 0000000..e3134a3
--- /dev/null
+++ b/faux-folly/folly/Format.h
@@ -0,0 +1,437 @@
+/*
+ * Copyright 2015 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FOLLY_FORMAT_H_
+#define FOLLY_FORMAT_H_
+
+#include <cstdio>
+#include <tuple>
+#include <type_traits>
+
+#include <folly/Conv.h>
+#include <folly/Range.h>
+#include <folly/Traits.h>
+#include <folly/String.h>
+#include <folly/FormatArg.h>
+
+// Ignore shadowing warnings within this file, so includers can use -Wshadow.
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
+
+namespace folly {
+
+// forward declarations
+template <bool containerMode, class... Args> class Formatter;
+template <class... Args>
+Formatter<false, Args...> format(StringPiece fmt, Args&&... args);
+template <class C>
+Formatter<true, C> vformat(StringPiece fmt, C&& container);
+template <class T, class Enable=void> class FormatValue;
+
+// meta-attribute to identify formatters in this sea of template weirdness
+namespace detail {
+class FormatterTag {};
+};
+
+/**
+ * Formatter class.
+ *
+ * Note that this class is tricky, as it keeps *references* to its arguments
+ * (and doesn't copy the passed-in format string).  Thankfully, you can't use
+ * this directly, you have to use format(...) below.
+ */
+
+/* BaseFormatter class. Currently, the only behavior that can be
+ * overridden is the actual formatting of positional parameters in
+ * `doFormatArg`. The Formatter class provides the default implementation.
+ */
+template <class Derived, bool containerMode, class... Args>
+class BaseFormatter {
+ public:
+  /**
+   * Append to output.  out(StringPiece sp) may be called (more than once)
+   */
+  template <class Output>
+  void operator()(Output& out) const;
+
+  /**
+   * Append to a string.
+   */
+  template <class Str>
+  typename std::enable_if<IsSomeString<Str>::value>::type
+  appendTo(Str& str) const {
+    auto appender = [&str] (StringPiece s) { str.append(s.data(), s.size()); };
+    (*this)(appender);
+  }
+
+  /**
+   * Conversion to string
+   */
+  std::string str() const {
+    std::string s;
+    appendTo(s);
+    return s;
+  }
+
+  /**
+   * Conversion to fbstring
+   */
+  fbstring fbstr() const {
+    fbstring s;
+    appendTo(s);
+    return s;
+  }
+
+  /**
+   * metadata to identify generated children of BaseFormatter
+   */
+  typedef detail::FormatterTag IsFormatter;
+  typedef BaseFormatter BaseType;
+
+ private:
+  typedef std::tuple<FormatValue<
+      typename std::decay<Args>::type>...> ValueTuple;
+  static constexpr size_t valueCount = std::tuple_size<ValueTuple>::value;
+
+  template <size_t K, class Callback>
+  typename std::enable_if<K == valueCount>::type
+  doFormatFrom(size_t i, FormatArg& arg, Callback& /*cb*/) const {
+    arg.error("argument index out of range, max=", i);
+  }
+
+  template <size_t K, class Callback>
+  typename std::enable_if<(K < valueCount)>::type
+  doFormatFrom(size_t i, FormatArg& arg, Callback& cb) const {
+    if (i == K) {
+      static_cast<const Derived*>(this)->template doFormatArg<K>(arg, cb);
+    } else {
+      doFormatFrom<K+1>(i, arg, cb);
+    }
+  }
+
+  template <class Callback>
+  void doFormat(size_t i, FormatArg& arg, Callback& cb) const {
+    return doFormatFrom<0>(i, arg, cb);
+  }
+
+  template <size_t K>
+  typename std::enable_if<K == valueCount, int>::type
+  getSizeArgFrom(size_t i, const FormatArg& arg) const {
+    arg.error("argument index out of range, max=", i);
+  }
+
+  template <class T>
+  typename std::enable_if<std::is_integral<T>::value &&
+                          !std::is_same<T, bool>::value, int>::type
+  getValue(const FormatValue<T>& format, const FormatArg&) const {
+    return static_cast<int>(format.getValue());
+  }
+
+  template <class T>
+  typename std::enable_if<!std::is_integral<T>::value ||
+                          std::is_same<T, bool>::value, int>::type
+  getValue(const FormatValue<T>&, const FormatArg& arg) const {
+    arg.error("dynamic field width argument must be integral");
+  }
+
+  template <size_t K>
+  typename std::enable_if<K < valueCount, int>::type
+  getSizeArgFrom(size_t i, const FormatArg& arg) const {
+    if (i == K) {
+      return getValue(std::get<K>(values_), arg);
+    }
+    return getSizeArgFrom<K+1>(i, arg);
+  }
+
+  int getSizeArg(size_t i, const FormatArg& arg) const {
+    return getSizeArgFrom<0>(i, arg);
+  }
+
+  StringPiece str_;
+
+ protected:
+  explicit BaseFormatter(StringPiece str, Args&&... args);
+
+  // Not copyable
+  BaseFormatter(const BaseFormatter&) = delete;
+  BaseFormatter& operator=(const BaseFormatter&) = delete;
+
+  // Movable, but the move constructor and assignment operator are private,
+  // for the exclusive use of format() (below).  This way, you can't create
+  // a Formatter object, but can handle references to it (for streaming,
+  // conversion to string, etc) -- which is good, as Formatter objects are
+  // dangerous (they hold references, possibly to temporaries)
+  BaseFormatter(BaseFormatter&&) = default;
+  BaseFormatter& operator=(BaseFormatter&&) = default;
+
+  ValueTuple values_;
+};
+
+template <bool containerMode, class... Args>
+class Formatter : public BaseFormatter<Formatter<containerMode, Args...>,
+                                       containerMode,
+                                       Args...> {
+ private:
+  explicit Formatter(StringPiece& str, Args&&... args)
+      : BaseFormatter<Formatter<containerMode, Args...>,
+                      containerMode,
+                      Args...>(str, std::forward<Args>(args)...) {}
+
+  template <size_t K, class Callback>
+  void doFormatArg(FormatArg& arg, Callback& cb) const {
+    std::get<K>(this->values_).format(arg, cb);
+  }
+
+  friend class BaseFormatter<Formatter<containerMode, Args...>,
+                             containerMode,
+                             Args...>;
+
+  template <class... A>
+  friend Formatter<false, A...> format(StringPiece fmt, A&&... arg);
+  template <class C>
+  friend Formatter<true, C> vformat(StringPiece fmt, C&& container);
+};
+
+/**
+ * Formatter objects can be written to streams.
+ */
+template<bool containerMode, class... Args>
+std::ostream& operator<<(std::ostream& out,
+                         const Formatter<containerMode, Args...>& formatter) {
+  auto writer = [&out] (StringPiece sp) { out.write(sp.data(), sp.size()); };
+  formatter(writer);
+  return out;
+}
+
+/**
+ * Formatter objects can be written to stdio FILEs.
+ */
+template <class Derived, bool containerMode, class... Args>
+void writeTo(FILE* fp,
+             const BaseFormatter<Derived, containerMode, Args...>& formatter);
+
+/**
+ * Create a formatter object.
+ *
+ * std::string formatted = format("{} {}", 23, 42).str();
+ * LOG(INFO) << format("{} {}", 23, 42);
+ * writeTo(stdout, format("{} {}", 23, 42));
+ */
+template <class... Args>
+Formatter<false, Args...> format(StringPiece fmt, Args&&... args) {
+  return Formatter<false, Args...>(
+      fmt, std::forward<Args>(args)...);
+}
+
+/**
+ * Like format(), but immediately returns the formatted string instead of an
+ * intermediate format object.
+ */
+template <class... Args>
+inline std::string sformat(StringPiece fmt, Args&&... args) {
+  return format(fmt, std::forward<Args>(args)...).str();
+}
+
+/**
+ * Create a formatter object that takes one argument (of container type)
+ * and uses that container to get argument values from.
+ *
+ * std::map<string, string> map { {"hello", "world"}, {"answer", "42"} };
+ *
+ * The following are equivalent:
+ * format("{0[hello]} {0[answer]}", map);
+ *
+ * vformat("{hello} {answer}", map);
+ *
+ * but the latter is cleaner.
+ */
+template <class Container>
+Formatter<true, Container> vformat(StringPiece fmt, Container&& container) {
+  return Formatter<true, Container>(
+      fmt, std::forward<Container>(container));
+}
+
+/**
+ * Like vformat(), but immediately returns the formatted string instead of an
+ * intermediate format object.
+ */
+template <class Container>
+inline std::string svformat(StringPiece fmt, Container&& container) {
+  return vformat(fmt, std::forward<Container>(container)).str();
+}
+
+/**
+ * Wrap a sequence or associative container so that out-of-range lookups
+ * return a default value rather than throwing an exception.
+ *
+ * Usage:
+ * format("[no_such_key"], defaulted(map, 42))  -> 42
+ */
+namespace detail {
+template <class Container, class Value> struct DefaultValueWrapper {
+  DefaultValueWrapper(const Container& container, const Value& defaultValue)
+    : container(container),
+      defaultValue(defaultValue) {
+  }
+
+  const Container& container;
+  const Value& defaultValue;
+};
+}  // namespace
+
+template <class Container, class Value>
+detail::DefaultValueWrapper<Container, Value>
+defaulted(const Container& c, const Value& v) {
+  return detail::DefaultValueWrapper<Container, Value>(c, v);
+}
+
+/**
+ * Append formatted output to a string.
+ *
+ * std::string foo;
+ * format(&foo, "{} {}", 42, 23);
+ *
+ * Shortcut for toAppend(format(...), &foo);
+ */
+template <class Str, class... Args>
+typename std::enable_if<IsSomeString<Str>::value>::type
+format(Str* out, StringPiece fmt, Args&&... args) {
+  format(fmt, std::forward<Args>(args)...).appendTo(*out);
+}
+
+/**
+ * Append vformatted output to a string.
+ */
+template <class Str, class Container>
+typename std::enable_if<IsSomeString<Str>::value>::type
+vformat(Str* out, StringPiece fmt, Container&& container) {
+  vformat(fmt, std::forward<Container>(container)).appendTo(*out);
+}
+
+/**
+ * Utilities for all format value specializations.
+ */
+namespace format_value {
+
+/**
+ * Format a string in "val", obeying appropriate alignment, padding, width,
+ * and precision.  Treats Align::DEFAULT as Align::LEFT, and
+ * Align::PAD_AFTER_SIGN as Align::RIGHT; use formatNumber for
+ * number-specific formatting.
+ */
+template <class FormatCallback>
+void formatString(StringPiece val, FormatArg& arg, FormatCallback& cb);
+
+/**
+ * Format a number in "val"; the first prefixLen characters form the prefix
+ * (sign, "0x" base prefix, etc) which must be left-aligned if the alignment
+ * is Align::PAD_AFTER_SIGN.  Treats Align::DEFAULT as Align::LEFT.  Ignores
+ * arg.precision, as that has a different meaning for numbers (not "maximum
+ * field width")
+ */
+template <class FormatCallback>
+void formatNumber(StringPiece val, int prefixLen, FormatArg& arg,
+                  FormatCallback& cb);
+
+
+/**
+ * Format a Formatter object recursively.  Behaves just like
+ * formatString(fmt.str(), arg, cb); but avoids creating a temporary
+ * string if possible.
+ */
+template <class FormatCallback,
+          class Derived,
+          bool containerMode,
+          class... Args>
+void formatFormatter(
+    const BaseFormatter<Derived, containerMode, Args...>& formatter,
+    FormatArg& arg,
+    FormatCallback& cb);
+
+}  // namespace format_value
+
+/*
+ * Specialize folly::FormatValue for your type.
+ *
+ * FormatValue<T> is constructed with a (reference-collapsed) T&&, which is
+ * guaranteed to stay alive until the FormatValue object is destroyed, so you
+ * may keep a reference (or pointer) to it instead of making a copy.
+ *
+ * You must define
+ *   template <class Callback>
+ *   void format(FormatArg& arg, Callback& cb) const;
+ * with the following semantics: format the value using the given argument.
+ *
+ * arg is given by non-const reference for convenience -- it won't be reused,
+ * so feel free to modify it in place if necessary.  (For example, wrap an
+ * existing conversion but change the default, or remove the "key" when
+ * extracting an element from a container)
+ *
+ * Call the callback to append data to the output.  You may call the callback
+ * as many times as you'd like (or not at all, if you want to output an
+ * empty string)
+ */
+
+namespace detail {
+
+template <class T, class Enable = void>
+struct IsFormatter : public std::false_type {};
+
+template <class T>
+struct IsFormatter<
+    T,
+    typename std::enable_if<
+        std::is_same<typename T::IsFormatter, detail::FormatterTag>::value>::
+        type> : public std::true_type {};
+} // folly::detail
+
+// Deprecated API. formatChecked() et. al. now behave identically to their
+// non-Checked counterparts.
+template <class... Args>
+Formatter<false, Args...> formatChecked(StringPiece fmt, Args&&... args) {
+  return format(fmt, std::forward<Args>(args)...);
+}
+template <class... Args>
+inline std::string sformatChecked(StringPiece fmt, Args&&... args) {
+  return formatChecked(fmt, std::forward<Args>(args)...).str();
+}
+template <class Container>
+Formatter<true, Container> vformatChecked(StringPiece fmt,
+                                          Container&& container) {
+  return vformat(fmt, std::forward<Container>(container));
+}
+template <class Container>
+inline std::string svformatChecked(StringPiece fmt, Container&& container) {
+  return vformatChecked(fmt, std::forward<Container>(container)).str();
+}
+template <class Str, class... Args>
+typename std::enable_if<IsSomeString<Str>::value>::type
+formatChecked(Str* out, StringPiece fmt, Args&&... args) {
+  formatChecked(fmt, std::forward<Args>(args)...).appendTo(*out);
+}
+template <class Str, class Container>
+typename std::enable_if<IsSomeString<Str>::value>::type
+vformatChecked(Str* out, StringPiece fmt, Container&& container) {
+  vformatChecked(fmt, std::forward<Container>(container)).appendTo(*out);
+}
+
+}  // namespace folly
+
+#include <folly/Format-inl.h>
+
+#pragma GCC diagnostic pop
+
+#endif /* FOLLY_FORMAT_H_ */
diff --git a/faux-folly/folly/FormatArg.h b/faux-folly/folly/FormatArg.h
new file mode 100644
index 0000000..c4d9473
--- /dev/null
+++ b/faux-folly/folly/FormatArg.h
@@ -0,0 +1,279 @@
+/*
+ * Copyright 2015 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FOLLY_FORMATARG_H_
+#define FOLLY_FORMATARG_H_
+
+#include <stdexcept>
+#include <folly/Conv.h>
+#include <folly/Likely.h>
+#include <folly/Portability.h>
+#include <folly/Range.h>
+
+namespace folly {
+
+class BadFormatArg : public std::invalid_argument {
+ public:
+  explicit BadFormatArg(const std::string& msg)
+    : std::invalid_argument(msg) {}
+};
+
+/**
+ * Parsed format argument.
+ */
+struct FormatArg {
+  /**
+   * Parse a format argument from a string.  Keeps a reference to the
+   * passed-in string -- does not copy the given characters.
+   */
+  explicit FormatArg(StringPiece sp)
+    : fullArgString(sp),
+      fill(kDefaultFill),
+      align(Align::DEFAULT),
+      sign(Sign::DEFAULT),
+      basePrefix(false),
+      thousandsSeparator(false),
+      trailingDot(false),
+      width(kDefaultWidth),
+      widthIndex(kNoIndex),
+      precision(kDefaultPrecision),
+      presentation(kDefaultPresentation),
+      nextKeyMode_(NextKeyMode::NONE) {
+    if (!sp.empty()) {
+      initSlow();
+    }
+  }
+
+  enum class Type {
+    INTEGER,
+    FLOAT,
+    OTHER
+  };
+  /**
+   * Validate the argument for the given type; throws on error.
+   */
+  void validate(Type type) const;
+
+  /**
+   * Throw an exception if the first argument is false.  The exception
+   * message will contain the argument string as well as any passed-in
+   * arguments to enforce, formatted using folly::to<std::string>.
+   */
+  template <typename... Args>
+  void enforce(bool v, Args&&... args) const {
+    if (UNLIKELY(!v)) {
+      error(std::forward<Args>(args)...);
+    }
+  }
+
+  template <typename... Args>
+  std::string errorStr(Args&&... args) const;
+  template <typename... Args>
+  FOLLY_NORETURN void error(Args&&... args) const;
+
+  /**
+   * Full argument string, as passed in to the constructor.
+   */
+  StringPiece fullArgString;
+
+  /**
+   * Fill
+   */
+  static constexpr char kDefaultFill = '\0';
+  char fill;
+
+  /**
+   * Alignment
+   */
+  enum class Align : uint8_t {
+    DEFAULT,
+    LEFT,
+    RIGHT,
+    PAD_AFTER_SIGN,
+    CENTER,
+    INVALID
+  };
+  Align align;
+
+  /**
+   * Sign
+   */
+  enum class Sign : uint8_t {
+    DEFAULT,
+    PLUS_OR_MINUS,
+    MINUS,
+    SPACE_OR_MINUS,
+    INVALID
+  };
+  Sign sign;
+
+  /**
+   * Output base prefix (0 for octal, 0x for hex)
+   */
+  bool basePrefix;
+
+  /**
+   * Output thousands separator (comma)
+   */
+  bool thousandsSeparator;
+
+  /**
+   * Force a trailing decimal on doubles which could be rendered as ints
+   */
+  bool trailingDot;
+
+  /**
+   * Field width and optional argument index
+   */
+  static constexpr int kDefaultWidth = -1;
+  static constexpr int kDynamicWidth = -2;
+  static constexpr int kNoIndex = -1;
+  int width;
+  int widthIndex;
+
+  /**
+   * Precision
+   */
+  static constexpr int kDefaultPrecision = -1;
+  int precision;
+
+  /**
+   * Presentation
+   */
+  static constexpr char kDefaultPresentation = '\0';
+  char presentation;
+
+  /**
+   * Split a key component from "key", which must be non-empty (an exception
+   * is thrown otherwise).
+   */
+  template <bool emptyOk=false>
+  StringPiece splitKey();
+
+  /**
+   * Is the entire key empty?
+   */
+  bool keyEmpty() const {
+    return nextKeyMode_ == NextKeyMode::NONE && key_.empty();
+  }
+
+  /**
+   * Split an key component from "key", which must be non-empty and a valid
+   * integer (an exception is thrown otherwise).
+   */
+  int splitIntKey();
+
+  void setNextIntKey(int val) {
+    assert(nextKeyMode_ == NextKeyMode::NONE);
+    nextKeyMode_ = NextKeyMode::INT;
+    nextIntKey_ = val;
+  }
+
+  void setNextKey(StringPiece val) {
+    assert(nextKeyMode_ == NextKeyMode::NONE);
+    nextKeyMode_ = NextKeyMode::STRING;
+    nextKey_ = val;
+  }
+
+ private:
+  void initSlow();
+  template <bool emptyOk>
+  StringPiece doSplitKey();
+
+  StringPiece key_;
+  int nextIntKey_;
+  StringPiece nextKey_;
+  enum class NextKeyMode {
+    NONE,
+    INT,
+    STRING,
+  };
+  NextKeyMode nextKeyMode_;
+};
+
+template <typename... Args>
+inline std::string FormatArg::errorStr(Args&&... args) const {
+  return to<std::string>(
+    "invalid format argument {", fullArgString, "}: ",
+    std::forward<Args>(args)...);
+}
+
+template <typename... Args>
+inline void FormatArg::error(Args&&... args) const {
+  throw BadFormatArg(errorStr(std::forward<Args>(args)...));
+}
+
+template <bool emptyOk>
+inline StringPiece FormatArg::splitKey() {
+  enforce(nextKeyMode_ != NextKeyMode::INT, "integer key expected");
+  return doSplitKey<emptyOk>();
+}
+
+template <bool emptyOk>
+inline StringPiece FormatArg::doSplitKey() {
+  if (nextKeyMode_ == NextKeyMode::STRING) {
+    nextKeyMode_ = NextKeyMode::NONE;
+    if (!emptyOk) {  // static
+      enforce(!nextKey_.empty(), "non-empty key required");
+    }
+    return nextKey_;
+  }
+
+  if (key_.empty()) {
+    if (!emptyOk) {  // static
+      error("non-empty key required");
+    }
+    return StringPiece();
+  }
+
+  const char* b = key_.begin();
+  const char* e = key_.end();
+  const char* p;
+  if (e[-1] == ']') {
+    --e;
+    p = static_cast<const char*>(memchr(b, '[', e - b));
+    enforce(p, "unmatched ']'");
+  } else {
+    p = static_cast<const char*>(memchr(b, '.', e - b));
+  }
+  if (p) {
+    key_.assign(p + 1, e);
+  } else {
+    p = e;
+    key_.clear();
+  }
+  if (!emptyOk) {  // static
+    enforce(b != p, "non-empty key required");
+  }
+  return StringPiece(b, p);
+}
+
+inline int FormatArg::splitIntKey() {
+  if (nextKeyMode_ == NextKeyMode::INT) {
+    nextKeyMode_ = NextKeyMode::NONE;
+    return nextIntKey_;
+  }
+  try {
+    return to<int>(doSplitKey<true>());
+  } catch (const std::out_of_range& e) {
+    error("integer key required");
+    return 0;  // unreached
+  }
+}
+
+}  // namespace folly
+
+#endif /* FOLLY_FORMATARG_H_ */
diff --git a/faux-folly/folly/FormatTraits.h b/faux-folly/folly/FormatTraits.h
new file mode 100644
index 0000000..7ca2ae5
--- /dev/null
+++ b/faux-folly/folly/FormatTraits.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2015 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FOLLY_FORMAT_TRAITS_H_
+#define FOLLY_FORMAT_TRAITS_H_
+
+#include <type_traits>
+
+namespace folly { namespace detail {
+
+// Shortcut, so we don't have to use enable_if everywhere
+struct FormatTraitsBase {
+  typedef void enabled;
+};
+
+// Traits that define enabled, value_type, and at() for anything
+// indexable with integral keys: pointers, arrays, vectors, and maps
+// with integral keys
+template <class T, class Enable = void> struct IndexableTraits;
+
+// Base class for sequences (vectors, deques)
+template <class C>
+struct IndexableTraitsSeq : public FormatTraitsBase {
+  typedef C container_type;
+  typedef typename C::value_type value_type;
+
+  static const value_type& at(const C& c, int idx) {
+    return c.at(idx);
+  }
+
+  static const value_type& at(const C& c, int idx, const value_type& dflt) {
+    return (idx >= 0 && size_t(idx) < c.size()) ? c.at(idx) : dflt;
+  }
+};
+
+// Base class for associative types (maps)
+template <class C>
+struct IndexableTraitsAssoc : public FormatTraitsBase {
+  typedef typename C::value_type::second_type value_type;
+
+  static const value_type& at(const C& c, int idx) {
+    return c.at(static_cast<typename C::key_type>(idx));
+  }
+
+  static const value_type& at(const C& c, int idx, const value_type& dflt) {
+    auto pos = c.find(static_cast<typename C::key_type>(idx));
+    return pos != c.end() ? pos->second : dflt;
+  }
+};
+
+}}  // namespaces
+
+#endif /* FOLLY_FORMAT_TRAITS_H_ */
diff --git a/faux-folly/folly/Hash.h b/faux-folly/folly/Hash.h
new file mode 100644
index 0000000..99d7dae
--- /dev/null
+++ b/faux-folly/folly/Hash.h
@@ -0,0 +1,446 @@
+/*
+ * Copyright 2015 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FOLLY_BASE_HASH_H_
+#define FOLLY_BASE_HASH_H_
+
+#include <cstring>
+#include <stdint.h>
+#include <string>
+#include <utility>
+#include <tuple>
+
+#include <folly/ApplyTuple.h>
+#include <folly/SpookyHashV1.h>
+#include <folly/SpookyHashV2.h>
+
+/*
+ * Various hashing functions.
+ */
+
+namespace folly { namespace hash {
+
+// This is a general-purpose way to create a single hash from multiple
+// hashable objects. hash_combine_generic takes a class Hasher implementing
+// hash<T>; hash_combine uses a default hasher StdHasher that uses std::hash.
+// hash_combine_generic hashes each argument and combines those hashes in
+// an order-dependent way to yield a new hash.
+
+
+// This is the Hash128to64 function from Google's cityhash (available
+// under the MIT License).  We use it to reduce multiple 64 bit hashes
+// into a single hash.
+inline uint64_t hash_128_to_64(const uint64_t upper, const uint64_t lower) {
+  // Murmur-inspired hashing.
+  const uint64_t kMul = 0x9ddfea08eb382d69ULL;
+  uint64_t a = (lower ^ upper) * kMul;
+  a ^= (a >> 47);
+  uint64_t b = (upper ^ a) * kMul;
+  b ^= (b >> 47);
+  b *= kMul;
+  return b;
+}
+
+// Never used, but gcc demands it.
+template <class Hasher>
+inline size_t hash_combine_generic() {
+  return 0;
+}
+
+template <
+    class Iter,
+    class Hash = std::hash<typename std::iterator_traits<Iter>::value_type>>
+uint64_t hash_range(Iter begin,
+                    Iter end,
+                    uint64_t hash = 0,
+                    Hash hasher = Hash()) {
+  for (; begin != end; ++begin) {
+    hash = hash_128_to_64(hash, hasher(*begin));
+  }
+  return hash;
+}
+
+template <class Hasher, typename T, typename... Ts>
+size_t hash_combine_generic(const T& t, const Ts&... ts) {
+  size_t seed = Hasher::hash(t);
+  if (sizeof...(ts) == 0) {
+    return seed;
+  }
+  size_t remainder = hash_combine_generic<Hasher>(ts...);
+  return hash_128_to_64(seed, remainder);
+}
+
+// Simply uses std::hash to hash.  Note that std::hash is not guaranteed
+// to be a very good hash function; provided std::hash doesn't collide on
+// the individual inputs, you are fine, but that won't be true for, say,
+// strings or pairs
+class StdHasher {
+ public:
+  template <typename T>
+  static size_t hash(const T& t) {
+    return std::hash<T>()(t);
+  }
+};
+
+template <typename T, typename... Ts>
+size_t hash_combine(const T& t, const Ts&... ts) {
+  return hash_combine_generic<StdHasher>(t, ts...);
+}
+
+//////////////////////////////////////////////////////////////////////
+
+/*
+ * Thomas Wang 64 bit mix hash function
+ */
+
+inline uint64_t twang_mix64(uint64_t key) {
+  key = (~key) + (key << 21);  // key *= (1 << 21) - 1; key -= 1;
+  key = key ^ (key >> 24);
+  key = key + (key << 3) + (key << 8);  // key *= 1 + (1 << 3) + (1 << 8)
+  key = key ^ (key >> 14);
+  key = key + (key << 2) + (key << 4);  // key *= 1 + (1 << 2) + (1 << 4)
+  key = key ^ (key >> 28);
+  key = key + (key << 31);  // key *= 1 + (1 << 31)
+  return key;
+}
+
+/*
+ * Inverse of twang_mix64
+ *
+ * Note that twang_unmix64 is significantly slower than twang_mix64.
+ */
+
+inline uint64_t twang_unmix64(uint64_t key) {
+  // See the comments in jenkins_rev_unmix32 for an explanation as to how this
+  // was generated
+  key *= 4611686016279904257U;
+  key ^= (key >> 28) ^ (key >> 56);
+  key *= 14933078535860113213U;
+  key ^= (key >> 14) ^ (key >> 28) ^ (key >> 42) ^ (key >> 56);
+  key *= 15244667743933553977U;
+  key ^= (key >> 24) ^ (key >> 48);
+  key = (key + 1) * 9223367638806167551U;
+  return key;
+}
+
+/*
+ * Thomas Wang downscaling hash function
+ */
+
+inline uint32_t twang_32from64(uint64_t key) {
+  key = (~key) + (key << 18);
+  key = key ^ (key >> 31);
+  key = key * 21;
+  key = key ^ (key >> 11);
+  key = key + (key << 6);
+  key = key ^ (key >> 22);
+  return (uint32_t) key;
+}
+
+/*
+ * Robert Jenkins' reversible 32 bit mix hash function
+ */
+
+inline uint32_t jenkins_rev_mix32(uint32_t key) {
+  key += (key << 12);  // key *= (1 + (1 << 12))
+  key ^= (key >> 22);
+  key += (key << 4);   // key *= (1 + (1 << 4))
+  key ^= (key >> 9);
+  key += (key << 10);  // key *= (1 + (1 << 10))
+  key ^= (key >> 2);
+  // key *= (1 + (1 << 7)) * (1 + (1 << 12))
+  key += (key << 7);
+  key += (key << 12);
+  return key;
+}
+
+/*
+ * Inverse of jenkins_rev_mix32
+ *
+ * Note that jenkinks_rev_unmix32 is significantly slower than
+ * jenkins_rev_mix32.
+ */
+
+inline uint32_t jenkins_rev_unmix32(uint32_t key) {
+  // These are the modular multiplicative inverses (in Z_2^32) of the
+  // multiplication factors in jenkins_rev_mix32, in reverse order.  They were
+  // computed using the Extended Euclidean algorithm, see
+  // http://en.wikipedia.org/wiki/Modular_multiplicative_inverse
+  key *= 2364026753U;
+
+  // The inverse of a ^= (a >> n) is
+  // b = a
+  // for (int i = n; i < 32; i += n) {
+  //   b ^= (a >> i);
+  // }
+  key ^=
+    (key >> 2) ^ (key >> 4) ^ (key >> 6) ^ (key >> 8) ^
+    (key >> 10) ^ (key >> 12) ^ (key >> 14) ^ (key >> 16) ^
+    (key >> 18) ^ (key >> 20) ^ (key >> 22) ^ (key >> 24) ^
+    (key >> 26) ^ (key >> 28) ^ (key >> 30);
+  key *= 3222273025U;
+  key ^= (key >> 9) ^ (key >> 18) ^ (key >> 27);
+  key *= 4042322161U;
+  key ^= (key >> 22);
+  key *= 16773121U;
+  return key;
+}
+
+/*
+ * Fowler / Noll / Vo (FNV) Hash
+ *     http://www.isthe.com/chongo/tech/comp/fnv/
+ */
+
+const uint32_t FNV_32_HASH_START = 2166136261UL;
+const uint64_t FNV_64_HASH_START = 14695981039346656037ULL;
+
+inline uint32_t fnv32(const char* s,
+                      uint32_t hash = FNV_32_HASH_START) {
+  for (; *s; ++s) {
+    hash += (hash << 1) + (hash << 4) + (hash << 7) +
+            (hash << 8) + (hash << 24);
+    hash ^= static_cast<signed char>(*s);
+  }
+  return hash;
+}
+
+inline uint32_t fnv32_buf(const void* buf,
+                          size_t n,
+                          uint32_t hash = FNV_32_HASH_START) {
+  const char* char_buf = reinterpret_cast<const char*>(buf);
+
+  for (size_t i = 0; i < n; ++i) {
+    hash += (hash << 1) + (hash << 4) + (hash << 7) +
+            (hash << 8) + (hash << 24);
+    hash ^= static_cast<signed char>(char_buf[i]);
+  }
+
+  return hash;
+}
+
+inline uint32_t fnv32(const std::string& str,
+                      uint32_t hash = FNV_32_HASH_START) {
+  return fnv32_buf(str.data(), str.size(), hash);
+}
+
+inline uint64_t fnv64(const char* s,
+                      uint64_t hash = FNV_64_HASH_START) {
+  for (; *s; ++s) {
+    hash += (hash << 1) + (hash << 4) + (hash << 5) + (hash << 7) +
+      (hash << 8) + (hash << 40);
+    hash ^= static_cast<signed char>(*s);
+  }
+  return hash;
+}
+
+inline uint64_t fnv64_buf(const void* buf,
+                          size_t n,
+                          uint64_t hash = FNV_64_HASH_START) {
+  const signed char* char_buf = reinterpret_cast<const signed char*>(buf);
+
+  for (size_t i = 0; i < n; ++i) {
+    hash += (hash << 1) + (hash << 4) + (hash << 5) + (hash << 7) +
+      (hash << 8) + (hash << 40);
+    hash ^= static_cast<signed char>(char_buf[i]);
+  }
+  return hash;
+}
+
+inline uint64_t fnv64(const std::string& str,
+                      uint64_t hash = FNV_64_HASH_START) {
+  return fnv64_buf(str.data(), str.size(), hash);
+}
+
+/*
+ * Paul Hsieh: http://www.azillionmonkeys.com/qed/hash.html
+ */
+
+#define get16bits(d) (*((const uint16_t*) (d)))
+
+inline uint32_t hsieh_hash32_buf(const void* buf, size_t len) {
+  const char* s = reinterpret_cast<const char*>(buf);
+  uint32_t hash = static_cast<uint32_t>(len);
+  uint32_t tmp;
+  size_t rem;
+
+  if (len <= 0 || buf == 0) {
+    return 0;
+  }
+
+  rem = len & 3;
+  len >>= 2;
+
+  /* Main loop */
+  for (;len > 0; len--) {
+    hash  += get16bits (s);
+    tmp    = (get16bits (s+2) << 11) ^ hash;
+    hash   = (hash << 16) ^ tmp;
+    s  += 2*sizeof (uint16_t);
+    hash  += hash >> 11;
+  }
+
+  /* Handle end cases */
+  switch (rem) {
+  case 3:
+    hash += get16bits(s);
+    hash ^= hash << 16;
+    hash ^= s[sizeof (uint16_t)] << 18;
+    hash += hash >> 11;
+    break;
+  case 2:
+    hash += get16bits(s);
+    hash ^= hash << 11;
+    hash += hash >> 17;
+    break;
+  case 1:
+    hash += *s;
+    hash ^= hash << 10;
+    hash += hash >> 1;
+  }
+
+  /* Force "avalanching" of final 127 bits */
+  hash ^= hash << 3;
+  hash += hash >> 5;
+  hash ^= hash << 4;
+  hash += hash >> 17;
+  hash ^= hash << 25;
+  hash += hash >> 6;
+
+  return hash;
+};
+
+#undef get16bits
+
+inline uint32_t hsieh_hash32(const char* s) {
+  return hsieh_hash32_buf(s, std::strlen(s));
+}
+
+inline uint32_t hsieh_hash32_str(const std::string& str) {
+  return hsieh_hash32_buf(str.data(), str.size());
+}
+
+//////////////////////////////////////////////////////////////////////
+
+} // namespace hash
+
+template<class Key, class Enable = void>
+struct hasher;
+
+struct Hash {
+  template <class T>
+  size_t operator()(const T& v) const {
+    return hasher<T>()(v);
+  }
+
+  template <class T, class... Ts>
+  size_t operator()(const T& t, const Ts&... ts) const {
+    return hash::hash_128_to_64((*this)(t), (*this)(ts...));
+  }
+};
+
+template<> struct hasher<int32_t> {
+  size_t operator()(int32_t key) const {
+    return hash::jenkins_rev_mix32(uint32_t(key));
+  }
+};
+
+template<> struct hasher<uint32_t> {
+  size_t operator()(uint32_t key) const {
+    return hash::jenkins_rev_mix32(key);
+  }
+};
+
+template<> struct hasher<int64_t> {
+  size_t operator()(int64_t key) const {
+    return hash::twang_mix64(uint64_t(key));
+  }
+};
+
+template<> struct hasher<uint64_t> {
+  size_t operator()(uint64_t key) const {
+    return hash::twang_mix64(key);
+  }
+};
+
+template <class T>
+struct hasher<T, typename std::enable_if<std::is_enum<T>::value, void>::type> {
+  size_t operator()(T key) const {
+    return Hash()(static_cast<typename std::underlying_type<T>::type>(key));
+  }
+};
+
+template <class T1, class T2>
+struct hasher<std::pair<T1, T2>> {
+  size_t operator()(const std::pair<T1, T2>& key) const {
+    return Hash()(key.first, key.second);
+  }
+};
+
+template <typename... Ts>
+struct hasher<std::tuple<Ts...>> {
+  size_t operator() (const std::tuple<Ts...>& key) const {
+    return applyTuple(Hash(), key);
+  }
+};
+
+// recursion
+template <size_t index, typename... Ts>
+struct TupleHasher {
+  size_t operator()(std::tuple<Ts...> const& key) const {
+    return hash::hash_combine(
+      TupleHasher<index - 1, Ts...>()(key),
+      std::get<index>(key));
+  }
+};
+
+// base
+template <typename... Ts>
+struct TupleHasher<0, Ts...> {
+  size_t operator()(std::tuple<Ts...> const& key) const {
+    // we could do std::hash here directly, but hash_combine hides all the
+    // ugly templating implicitly
+    return hash::hash_combine(std::get<0>(key));
+  }
+};
+
+} // namespace folly
+
+// Custom hash functions.
+namespace std {
+  // Hash function for pairs. Requires default hash functions for both
+  // items in the pair.
+  template <typename T1, typename T2>
+  struct hash<std::pair<T1, T2> > {
+  public:
+    size_t operator()(const std::pair<T1, T2>& x) const {
+      return folly::hash::hash_combine(x.first, x.second);
+    }
+  };
+
+  // Hash function for tuples. Requires default hash functions for all types.
+  template <typename... Ts>
+  struct hash<std::tuple<Ts...>> {
+    size_t operator()(std::tuple<Ts...> const& key) const {
+      folly::TupleHasher<
+        std::tuple_size<std::tuple<Ts...>>::value - 1, // start index
+        Ts...> hasher;
+
+      return hasher(key);
+    }
+  };
+} // namespace std
+
+#endif
diff --git a/faux-folly/folly/IndexedMemPool.h b/faux-folly/folly/IndexedMemPool.h
new file mode 100644
index 0000000..23b42ac
--- /dev/null
+++ b/faux-folly/folly/IndexedMemPool.h
@@ -0,0 +1,469 @@
+/*
+ * Copyright 2015 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FOLLY_INDEXEDMEMPOOL_H
+#define FOLLY_INDEXEDMEMPOOL_H
+
+#include <type_traits>
+#include <stdint.h>
+#include <assert.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <boost/noncopyable.hpp>
+#include <folly/AtomicStruct.h>
+#include <folly/detail/CacheLocality.h>
+
+// Ignore shadowing warnings within this file, so includers can use -Wshadow.
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
+
+namespace folly {
+
+namespace detail {
+template <typename Pool>
+struct IndexedMemPoolRecycler;
+}
+
+/// Instances of IndexedMemPool dynamically allocate and then pool their
+/// element type (T), returning 4-byte integer indices that can be passed
+/// to the pool's operator[] method to access or obtain pointers to the
+/// actual elements.  The memory backing items returned from the pool
+/// will always be readable, even if items have been returned to the pool.
+/// These two features are useful for lock-free algorithms.  The indexing
+/// behavior makes it easy to build tagged pointer-like-things, since
+/// a large number of elements can be managed using fewer bits than a
+/// full pointer.  The access-after-free behavior makes it safe to read
+/// from T-s even after they have been recycled, since it is guaranteed
+/// that the memory won't have been returned to the OS and unmapped
+/// (the algorithm must still use a mechanism to validate that the read
+/// was correct, but it doesn't have to worry about page faults), and if
+/// the elements use internal sequence numbers it can be guaranteed that
+/// there won't be an ABA match due to the element being overwritten with
+/// a different type that has the same bit pattern.
+///
+/// IndexedMemPool has two object lifecycle strategies.  The first
+/// is to construct objects when they are allocated from the pool and
+/// destroy them when they are recycled.  In this mode allocIndex and
+/// allocElem have emplace-like semantics.  In the second mode, objects
+/// are default-constructed the first time they are removed from the pool,
+/// and deleted when the pool itself is deleted.  By default the first
+/// mode is used for non-trivial T, and the second is used for trivial T.
+///
+/// IMPORTANT: Space for extra elements is allocated to account for those
+/// that are inaccessible because they are in other local lists, so the
+/// actual number of items that can be allocated ranges from capacity to
+/// capacity + (NumLocalLists_-1)*LocalListLimit_.  This is important if
+/// you are trying to maximize the capacity of the pool while constraining
+/// the bit size of the resulting pointers, because the pointers will
+/// actually range up to the boosted capacity.  See maxIndexForCapacity
+/// and capacityForMaxIndex.
+///
+/// To avoid contention, NumLocalLists_ free lists of limited (less than
+/// or equal to LocalListLimit_) size are maintained, and each thread
+/// retrieves and returns entries from its associated local list.  If the
+/// local list becomes too large then elements are placed in bulk in a
+/// global free list.  This allows items to be efficiently recirculated
+/// from consumers to producers.  AccessSpreader is used to access the
+/// local lists, so there is no performance advantage to having more
+/// local lists than L1 caches.
+///
+/// The pool mmap-s the entire necessary address space when the pool is
+/// constructed, but delays element construction.  This means that only
+/// elements that are actually returned to the caller get paged into the
+/// process's resident set (RSS).
+template <typename T,
+          int NumLocalLists_ = 32,
+          int LocalListLimit_ = 200,
+          template<typename> class Atom = std::atomic,
+          bool EagerRecycleWhenTrivial = false,
+          bool EagerRecycleWhenNotTrivial = true>
+struct IndexedMemPool : boost::noncopyable {
+  typedef T value_type;
+
+  typedef std::unique_ptr<T, detail::IndexedMemPoolRecycler<IndexedMemPool>>
+      UniquePtr;
+
+  static_assert(LocalListLimit_ <= 255, "LocalListLimit must fit in 8 bits");
+  enum {
+    NumLocalLists = NumLocalLists_,
+    LocalListLimit = LocalListLimit_
+  };
+
+
+  static constexpr bool eagerRecycle() {
+    return std::is_trivial<T>::value
+        ? EagerRecycleWhenTrivial : EagerRecycleWhenNotTrivial;
+  }
+
+  // these are public because clients may need to reason about the number
+  // of bits required to hold indices from a pool, given its capacity
+
+  static constexpr uint32_t maxIndexForCapacity(uint32_t capacity) {
+    // index of uint32_t(-1) == UINT32_MAX is reserved for isAllocated tracking
+    return std::min(uint64_t(capacity) + (NumLocalLists - 1) * LocalListLimit,
+                    uint64_t(uint32_t(-1) - 1));
+  }
+
+  static constexpr uint32_t capacityForMaxIndex(uint32_t maxIndex) {
+    return maxIndex - (NumLocalLists - 1) * LocalListLimit;
+  }
+
+
+  /// Constructs a pool that can allocate at least _capacity_ elements,
+  /// even if all the local lists are full
+  explicit IndexedMemPool(uint32_t capacity)
+    : actualCapacity_(maxIndexForCapacity(capacity))
+    , size_(0)
+    , globalHead_(TaggedPtr{})
+  {
+    const size_t needed = sizeof(Slot) * (actualCapacity_ + 1);
+    long pagesize = sysconf(_SC_PAGESIZE);
+    mmapLength_ = ((needed - 1) & ~(pagesize - 1)) + pagesize;
+    assert(needed <= mmapLength_ && mmapLength_ < needed + pagesize);
+    assert((mmapLength_ % pagesize) == 0);
+
+    slots_ = static_cast<Slot*>(mmap(nullptr, mmapLength_,
+                                     PROT_READ | PROT_WRITE,
+                                     MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
+    if (slots_ == MAP_FAILED) {
+      assert(errno == ENOMEM);
+      throw std::bad_alloc();
+    }
+  }
+
+  /// Destroys all of the contained elements
+  ~IndexedMemPool() {
+    if (!eagerRecycle()) {
+      for (size_t i = size_; i > 0; --i) {
+        slots_[i].~Slot();
+      }
+    }
+    munmap(slots_, mmapLength_);
+  }
+
+  /// Returns a lower bound on the number of elements that may be
+  /// simultaneously allocated and not yet recycled.  Because of the
+  /// local lists it is possible that more elements than this are returned
+  /// successfully
+  size_t capacity() {
+    return capacityForMaxIndex(actualCapacity_);
+  }
+
+  /// Finds a slot with a non-zero index, emplaces a T there if we're
+  /// using the eager recycle lifecycle mode, and returns the index,
+  /// or returns 0 if no elements are available.
+  template <typename ...Args>
+  uint32_t allocIndex(Args&&... args) {
+    static_assert(sizeof...(Args) == 0 || eagerRecycle(),
+        "emplace-style allocation requires eager recycle, "
+        "which is defaulted only for non-trivial types");
+    auto idx = localPop(localHead());
+    if (idx != 0 && eagerRecycle()) {
+      T* ptr = &slot(idx).elem;
+      new (ptr) T(std::forward<Args>(args)...);
+    }
+    return idx;
+  }
+
+  /// If an element is available, returns a std::unique_ptr to it that will
+  /// recycle the element to the pool when it is reclaimed, otherwise returns
+  /// a null (falsy) std::unique_ptr
+  template <typename ...Args>
+  UniquePtr allocElem(Args&&... args) {
+    auto idx = allocIndex(std::forward<Args>(args)...);
+    T* ptr = idx == 0 ? nullptr : &slot(idx).elem;
+    return UniquePtr(ptr, typename UniquePtr::deleter_type(this));
+  }
+
+  /// Gives up ownership previously granted by alloc()
+  void recycleIndex(uint32_t idx) {
+    assert(isAllocated(idx));
+    if (eagerRecycle()) {
+      slot(idx).elem.~T();
+    }
+    localPush(localHead(), idx);
+  }
+
+  /// Provides access to the pooled element referenced by idx
+  T& operator[](uint32_t idx) {
+    return slot(idx).elem;
+  }
+
+  /// Provides access to the pooled element referenced by idx
+  const T& operator[](uint32_t idx) const {
+    return slot(idx).elem;
+  }
+
+  /// If elem == &pool[idx], then pool.locateElem(elem) == idx.  Also,
+  /// pool.locateElem(nullptr) == 0
+  uint32_t locateElem(const T* elem) const {
+    if (!elem) {
+      return 0;
+    }
+
+    static_assert(std::is_standard_layout<Slot>::value, "offsetof needs POD");
+
+    auto slot = reinterpret_cast<const Slot*>(
+        reinterpret_cast<const char*>(elem) - offsetof(Slot, elem));
+    auto rv = slot - slots_;
+
+    // this assert also tests that rv is in range
+    assert(elem == &(*this)[rv]);
+    return rv;
+  }
+
+  /// Returns true iff idx has been alloc()ed and not recycleIndex()ed
+  bool isAllocated(uint32_t idx) const {
+    return slot(idx).localNext == uint32_t(-1);
+  }
+
+
+ private:
+  ///////////// types
+
+  struct Slot {
+    T elem;
+    uint32_t localNext;
+    uint32_t globalNext;
+
+    Slot() : localNext{}, globalNext{} {}
+  };
+
+  struct TaggedPtr {
+    uint32_t idx;
+
+    // size is bottom 8 bits, tag in top 24.  g++'s code generation for
+    // bitfields seems to depend on the phase of the moon, plus we can
+    // do better because we can rely on other checks to avoid masking
+    uint32_t tagAndSize;
+
+    enum : uint32_t {
+        SizeBits = 8,
+        SizeMask = (1U << SizeBits) - 1,
+        TagIncr = 1U << SizeBits,
+    };
+
+    uint32_t size() const {
+      return tagAndSize & SizeMask;
+    }
+
+    TaggedPtr withSize(uint32_t repl) const {
+      assert(repl <= LocalListLimit);
+      return TaggedPtr{ idx, (tagAndSize & ~SizeMask) | repl };
+    }
+
+    TaggedPtr withSizeIncr() const {
+      assert(size() < LocalListLimit);
+      return TaggedPtr{ idx, tagAndSize + 1 };
+    }
+
+    TaggedPtr withSizeDecr() const {
+      assert(size() > 0);
+      return TaggedPtr{ idx, tagAndSize - 1 };
+    }
+
+    TaggedPtr withIdx(uint32_t repl) const {
+      return TaggedPtr{ repl, tagAndSize + TagIncr };
+    }
+
+    TaggedPtr withEmpty() const {
+      return withIdx(0).withSize(0);
+    }
+  };
+
+  struct FOLLY_ALIGN_TO_AVOID_FALSE_SHARING LocalList {
+    AtomicStruct<TaggedPtr,Atom> head;
+
+    LocalList() : head(TaggedPtr{}) {}
+  };
+
+  ////////// fields
+
+  /// the actual number of slots that we will allocate, to guarantee
+  /// that we will satisfy the capacity requested at construction time.
+  /// They will be numbered 1..actualCapacity_ (note the 1-based counting),
+  /// and occupy slots_[1..actualCapacity_].
+  size_t actualCapacity_;
+
+  /// the number of bytes allocated from mmap, which is a multiple of
+  /// the page size of the machine
+  size_t mmapLength_;
+
+  /// this records the number of slots that have actually been constructed.
+  /// To allow use of atomic ++ instead of CAS, we let this overflow.
+  /// The actual number of constructed elements is min(actualCapacity_,
+  /// size_)
+  Atom<uint32_t> size_;
+
+  /// raw storage, only 1..min(size_,actualCapacity_) (inclusive) are
+  /// actually constructed.  Note that slots_[0] is not constructed or used
+  FOLLY_ALIGN_TO_AVOID_FALSE_SHARING Slot* slots_;
+
+  /// use AccessSpreader to find your list.  We use stripes instead of
+  /// thread-local to avoid the need to grow or shrink on thread start
+  /// or join.   These are heads of lists chained with localNext
+  LocalList local_[NumLocalLists];
+
+  /// this is the head of a list of node chained by globalNext, that are
+  /// themselves each the head of a list chained by localNext
+  FOLLY_ALIGN_TO_AVOID_FALSE_SHARING AtomicStruct<TaggedPtr,Atom> globalHead_;
+
+  ///////////// private methods
+
+  size_t slotIndex(uint32_t idx) const {
+    assert(0 < idx &&
+           idx <= actualCapacity_ &&
+           idx <= size_.load(std::memory_order_acquire));
+    return idx;
+  }
+
+  Slot& slot(uint32_t idx) {
+    return slots_[slotIndex(idx)];
+  }
+
+  const Slot& slot(uint32_t idx) const {
+    return slots_[slotIndex(idx)];
+  }
+
+  // localHead references a full list chained by localNext.  s should
+  // reference slot(localHead), it is passed as a micro-optimization
+  void globalPush(Slot& s, uint32_t localHead) {
+    while (true) {
+      TaggedPtr gh = globalHead_.load(std::memory_order_acquire);
+      s.globalNext = gh.idx;
+      if (globalHead_.compare_exchange_strong(gh, gh.withIdx(localHead))) {
+        // success
+        return;
+      }
+    }
+  }
+
+  // idx references a single node
+  void localPush(AtomicStruct<TaggedPtr,Atom>& head, uint32_t idx) {
+    Slot& s = slot(idx);
+    TaggedPtr h = head.load(std::memory_order_acquire);
+    while (true) {
+      s.localNext = h.idx;
+
+      if (h.size() == LocalListLimit) {
+        // push will overflow local list, steal it instead
+        if (head.compare_exchange_strong(h, h.withEmpty())) {
+          // steal was successful, put everything in the global list
+          globalPush(s, idx);
+          return;
+        }
+      } else {
+        // local list has space
+        if (head.compare_exchange_strong(h, h.withIdx(idx).withSizeIncr())) {
+          // success
+          return;
+        }
+      }
+      // h was updated by failing CAS
+    }
+  }
+
+  // returns 0 if empty
+  uint32_t globalPop() {
+    while (true) {
+      TaggedPtr gh = globalHead_.load(std::memory_order_acquire);
+      if (gh.idx == 0 || globalHead_.compare_exchange_strong(
+                  gh, gh.withIdx(slot(gh.idx).globalNext))) {
+        // global list is empty, or pop was successful
+        return gh.idx;
+      }
+    }
+  }
+
+  // returns 0 if allocation failed
+  uint32_t localPop(AtomicStruct<TaggedPtr,Atom>& head) {
+    while (true) {
+      TaggedPtr h = head.load(std::memory_order_acquire);
+      if (h.idx != 0) {
+        // local list is non-empty, try to pop
+        Slot& s = slot(h.idx);
+        if (head.compare_exchange_strong(
+                    h, h.withIdx(s.localNext).withSizeDecr())) {
+          // success
+          s.localNext = uint32_t(-1);
+          return h.idx;
+        }
+        continue;
+      }
+
+      uint32_t idx = globalPop();
+      if (idx == 0) {
+        // global list is empty, allocate and construct new slot
+        if (size_.load(std::memory_order_relaxed) >= actualCapacity_ ||
+            (idx = ++size_) > actualCapacity_) {
+          // allocation failed
+          return 0;
+        }
+        // default-construct it now if we aren't going to construct and
+        // destroy on each allocation
+        if (!eagerRecycle()) {
+          T* ptr = &slot(idx).elem;
+          new (ptr) T();
+        }
+        slot(idx).localNext = uint32_t(-1);
+        return idx;
+      }
+
+      Slot& s = slot(idx);
+      if (head.compare_exchange_strong(
+                  h, h.withIdx(s.localNext).withSize(LocalListLimit))) {
+        // global list moved to local list, keep head for us
+        s.localNext = uint32_t(-1);
+        return idx;
+      }
+      // local bulk push failed, return idx to the global list and try again
+      globalPush(s, idx);
+    }
+  }
+
+  AtomicStruct<TaggedPtr,Atom>& localHead() {
+    auto stripe = detail::AccessSpreader<Atom>::current(NumLocalLists);
+    return local_[stripe].head;
+  }
+};
+
+namespace detail {
+
+/// This is a stateful Deleter functor, which allows std::unique_ptr
+/// to track elements allocated from an IndexedMemPool by tracking the
+/// associated pool.  See IndexedMemPool::allocElem.
+template <typename Pool>
+struct IndexedMemPoolRecycler {
+  Pool* pool;
+
+  explicit IndexedMemPoolRecycler(Pool* pool) : pool(pool) {}
+
+  IndexedMemPoolRecycler(const IndexedMemPoolRecycler<Pool>& rhs)
+      = default;
+  IndexedMemPoolRecycler& operator= (const IndexedMemPoolRecycler<Pool>& rhs)
+      = default;
+
+  void operator()(typename Pool::value_type* elem) const {
+    pool->recycleIndex(pool->locateElem(elem));
+  }
+};
+
+}
+
+} // namespace folly
+
+# pragma GCC diagnostic pop
+#endif
diff --git a/faux-folly/folly/Lazy.h b/faux-folly/folly/Lazy.h
new file mode 100644
index 0000000..bca89d9
--- /dev/null
+++ b/faux-folly/folly/Lazy.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2015 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef FOLLY_LAZY_H_
+#define FOLLY_LAZY_H_
+
+#include <utility>
+#include <type_traits>
+
+#include <folly/Optional.h>
+
+namespace folly {
+
+//////////////////////////////////////////////////////////////////////
+
+/*
+ * Lazy -- for delayed initialization of a value.  The value's
+ * initialization will be computed on demand at its first use, but
+ * will not be recomputed if its value is requested again.  The value
+ * may still be mutated after its initialization if the lazy is not
+ * declared const.
+ *
+ * The value is created using folly::lazy, usually with a lambda, and
+ * its value is requested using operator().
+ *
+ * Note that the value is not safe for concurrent accesses by multiple
+ * threads, even if you declare it const.  See note below.
+ *
+ *
+ * Example Usage:
+ *
+ *   void foo() {
+ *     auto const val = folly::lazy([&]{
+ *       return something_expensive(blah());
+ *     });
+ *
+ *     if (condition1) {
+ *       use(val());
+ *     }
+ *     if (condition2) {
+ *       useMaybeAgain(val());
+ *     } else {
+ *       // Unneeded in this branch.
+ *     }
+ *   }
+ *
+ *
+ * Rationale:
+ *
+ *    - operator() is used to request the value instead of an implicit
+ *      conversion because the slight syntactic overhead in common
+ *      seems worth the increased clarity.
+ *
+ *    - Lazy values do not model CopyConstructible because it is
+ *      unclear what semantics would be desirable.  Either copies
+ *      should share the cached value (adding overhead to cases that
+ *      don't need to support copies), or they could recompute the
+ *      value unnecessarily.  Sharing with mutable lazies would also
+ *      leave them with non-value semantics despite looking
+ *      value-like.
+ *
+ *    - Not thread safe for const accesses.  Many use cases for lazy
+ *      values are local variables on the stack, where multiple
+ *      threads shouldn't even be able to reach the value.  It still
+ *      is useful to indicate/check that the value doesn't change with
+ *      const, particularly when it is captured by a large family of
+ *      lambdas.  Adding internal synchronization seems like it would
+ *      pessimize the most common use case in favor of less likely use
+ *      cases.
+ *
+ */
+
+//////////////////////////////////////////////////////////////////////
+
+namespace detail {
+
+template<class Func>
+struct Lazy {
+  typedef typename std::result_of<Func()>::type result_type;
+
+  explicit Lazy(Func&& f) : func_(std::move(f)) {}
+  explicit Lazy(Func& f)  : func_(f) {}
+
+  Lazy(Lazy&& o)
+    : value_(std::move(o.value_))
+    , func_(std::move(o.func_))
+  {}
+
+  Lazy(const Lazy&) = delete;
+  Lazy& operator=(const Lazy&) = delete;
+  Lazy& operator=(Lazy&&) = delete;
+
+  const result_type& operator()() const {
+    return const_cast<Lazy&>(*this)();
+  }
+
+  result_type& operator()() {
+    if (!value_) value_ = func_();
+    return *value_;
+  }
+
+private:
+  Optional<result_type> value_;
+  Func func_;
+};
+
+}
+
+//////////////////////////////////////////////////////////////////////
+
+template<class Func>
+detail::Lazy<typename std::remove_reference<Func>::type>
+lazy(Func&& fun) {
+  return detail::Lazy<typename std::remove_reference<Func>::type>(
+    std::forward<Func>(fun)
+  );
+}
+
+//////////////////////////////////////////////////////////////////////
+
+}
+
+#endif
diff --git a/faux-folly/folly/LifoSem.cpp b/faux-folly/folly/LifoSem.cpp
new file mode 100644
index 0000000..eab911d
--- /dev/null
+++ b/faux-folly/folly/LifoSem.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2015 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <folly/LifoSem.h>
+
+/// Raw node storage is preallocated in a contiguous memory segment,
+/// but we use an anonymous mmap so the physical memory used (RSS) will
+/// only reflect the maximum number of waiters that actually existed
+/// concurrently.  For blocked threads the max node count is limited by the
+/// number of threads, so we can conservatively estimate that this will be
+/// < 10k.  For LifoEventSem, however, we could potentially have many more.
+///
+/// On a 64-bit architecture each LifoSemRawNode takes 16 bytes.  We make
+/// the pool 1 million entries.
+
+LIFOSEM_DECLARE_POOL(std::atomic, 1000000)
+
+namespace folly {
+
+ShutdownSemError::ShutdownSemError(const std::string& msg)
+  : std::runtime_error(msg)
+{}
+
+ShutdownSemError::~ShutdownSemError() noexcept {
+}
+
+}
diff --git a/faux-folly/folly/LifoSem.h b/faux-folly/folly/LifoSem.h
new file mode 100644
index 0000000..24b37c8
--- /dev/null
+++ b/faux-folly/folly/LifoSem.h
@@ -0,0 +1,600 @@
+/*
+ * Copyright 2015 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FOLLY_LIFOSEM_H
+#define FOLLY_LIFOSEM_H
+
+#include <string.h>
+#include <stdint.h>
+#include <atomic>
+#include <algorithm>
+#include <memory>
+#include <system_error>
+
+#include <folly/AtomicStruct.h>
+#include <folly/Baton.h>
+#include <folly/IndexedMemPool.h>
+#include <folly/Likely.h>
+#include <folly/detail/CacheLocality.h>
+
+namespace folly {
+
+template <template<typename> class Atom = std::atomic,
+          class BatonType = Baton<Atom>>
+struct LifoSemImpl;
+
+/// LifoSem is a semaphore that wakes its waiters in a manner intended to
+/// maximize performance rather than fairness.  It should be preferred
+/// to a mutex+condvar or POSIX sem_t solution when all of the waiters
+/// are equivalent.  It is faster than a condvar or sem_t, and it has a
+/// shutdown state that might save you a lot of complexity when it comes
+/// time to shut down your work pipelines.  LifoSem is larger than sem_t,
+/// but that is only because it uses padding and alignment to avoid
+/// false sharing.
+///
+/// LifoSem allows multi-post and multi-tryWait, and provides a shutdown
+/// state that awakens all waiters.  LifoSem is faster than sem_t because
+/// it performs exact wakeups, so it often requires fewer system calls.
+/// It provides all of the functionality of sem_t except for timed waiting.
+/// It is called LifoSem because its wakeup policy is approximately LIFO,
+/// rather than the usual FIFO.
+///
+/// The core semaphore operations provided are:
+///
+/// -- post() -- if there is a pending waiter, wake it up, otherwise
+/// increment the value of the semaphore.  If the value of the semaphore
+/// is already 2^32-1, does nothing.  Compare to sem_post().
+///
+/// -- post(n) -- equivalent to n calls to post(), but much more efficient.
+/// sem_t has no equivalent to this method.
+///
+/// -- bool tryWait() -- if the semaphore's value is positive, decrements it
+/// and returns true, otherwise returns false.  Compare to sem_trywait().
+///
+/// -- uint32_t tryWait(uint32_t n) -- attempts to decrement the semaphore's
+/// value by n, returning the amount by which it actually was decremented
+/// (a value from 0 to n inclusive).  Not atomic.  Equivalent to n calls
+/// to tryWait().  sem_t has no equivalent to this method.
+///
+/// -- wait() -- waits until tryWait() can succeed.  Compare to sem_wait().
+///
+/// LifoSem also has the notion of a shutdown state, in which any calls
+/// that would block (or are already blocked) throw ShutdownSemError.
+/// Note the difference between a call to wait() and a call to wait()
+/// that might block.  In the former case tryWait() would succeed, and no
+/// isShutdown() check is performed.  In the latter case an exception is
+/// thrown.  This behavior allows a LifoSem controlling work distribution
+/// to drain.  If you want to immediately stop all waiting on shutdown,
+/// you can just check isShutdown() yourself (preferrably wrapped in
+/// an UNLIKELY).  This fast-stop behavior is easy to add, but difficult
+/// to remove if you want the draining behavior, which is why we have
+/// chosen the former.  Since wait() is the only method that can block,
+/// it is the only one that is affected by the shutdown state.
+///
+/// All LifoSem operations operations except valueGuess() are guaranteed
+/// to be linearizable.
+typedef LifoSemImpl<> LifoSem;
+
+
+/// The exception thrown when wait()ing on an isShutdown() LifoSem
+struct ShutdownSemError : public std::runtime_error {
+  explicit ShutdownSemError(const std::string& msg);
+  virtual ~ShutdownSemError() noexcept;
+};
+
+namespace detail {
+
+// Internally, a LifoSem is either a value or a linked list of wait nodes.
+// This union is captured in the LifoSemHead type, which holds either a
+// value or an indexed pointer to the list.  LifoSemHead itself is a value
+// type, the head is a mutable atomic box containing a LifoSemHead value.
+// Each wait node corresponds to exactly one waiter.  Values can flow
+// through the semaphore either by going into and out of the head's value,
+// or by direct communication from a poster to a waiter.  The former path
+// is taken when there are no pending waiters, the latter otherwise.  The
+// general flow of a post is to try to increment the value or pop-and-post
+// a wait node.  Either of those have the effect of conveying one semaphore
+// unit.  Waiting is the opposite, either a decrement of the value or
+// push-and-wait of a wait node.  The generic LifoSemBase abstracts the
+// actual mechanism by which a wait node's post->wait communication is
+// performed, which is why we have LifoSemRawNode and LifoSemNode.
+
+/// LifoSemRawNode is the actual pooled storage that backs LifoSemNode
+/// for user-specified Handoff types.  This is done so that we can have
+/// a large static IndexedMemPool of nodes, instead of per-type pools
+template <template<typename> class Atom>
+struct LifoSemRawNode {
+  std::aligned_storage<sizeof(void*),alignof(void*)>::type raw;
+
+  /// The IndexedMemPool index of the next node in this chain, or 0
+  /// if none.  This will be set to uint32_t(-1) if the node is being
+  /// posted due to a shutdown-induced wakeup
+  uint32_t next;
+
+  bool isShutdownNotice() const { return next == uint32_t(-1); }
+  void clearShutdownNotice() { next = 0; }
+  void setShutdownNotice() { next = uint32_t(-1); }
+
+  typedef folly::IndexedMemPool<LifoSemRawNode<Atom>,32,200,Atom> Pool;
+
+  /// Storage for all of the waiter nodes for LifoSem-s that use Atom
+  static Pool pool;
+};
+
+/// Use this macro to declare the static storage that backs the raw nodes
+/// for the specified atomic type
+#define LIFOSEM_DECLARE_POOL(Atom, capacity)                       \
+    template<>                                                     \
+    folly::detail::LifoSemRawNode<Atom>::Pool                      \
+        folly::detail::LifoSemRawNode<Atom>::pool((capacity));
+
+/// Handoff is a type not bigger than a void* that knows how to perform a
+/// single post() -> wait() communication.  It must have a post() method.
+/// If it has a wait() method then LifoSemBase's wait() implementation
+/// will work out of the box, otherwise you will need to specialize
+/// LifoSemBase::wait accordingly.
+template <typename Handoff, template<typename> class Atom>
+struct LifoSemNode : public LifoSemRawNode<Atom> {
+
+  static_assert(sizeof(Handoff) <= sizeof(LifoSemRawNode<Atom>::raw),
+      "Handoff too big for small-object optimization, use indirection");
+  static_assert(alignof(Handoff) <=
+                alignof(decltype(LifoSemRawNode<Atom>::raw)),
+      "Handoff alignment constraint not satisfied");
+
+  template <typename ...Args>
+  void init(Args&&... args) {
+    new (&this->raw) Handoff(std::forward<Args>(args)...);
+  }
+
+  void destroy() {
+    handoff().~Handoff();
+#ifndef NDEBUG
+    memset(&this->raw, 'F', sizeof(this->raw));
+#endif
+  }
+
+  Handoff& handoff() {
+    return *static_cast<Handoff*>(static_cast<void*>(&this->raw));
+  }
+
+  const Handoff& handoff() const {
+    return *static_cast<const Handoff*>(static_cast<const void*>(&this->raw));
+  }
+};
+
+template <typename Handoff, template<typename> class Atom>
+struct LifoSemNodeRecycler {
+  void operator()(LifoSemNode<Handoff,Atom>* elem) const {
+    elem->destroy();
+    auto idx = LifoSemRawNode<Atom>::pool.locateElem(elem);
+    LifoSemRawNode<Atom>::pool.recycleIndex(idx);
+  }
+};
+
+/// LifoSemHead is a 64-bit struct that holds a 32-bit value, some state
+/// bits, and a sequence number used to avoid ABA problems in the lock-free
+/// management of the LifoSem's wait lists.  The value can either hold
+/// an integral semaphore value (if there are no waiters) or a node index
+/// (see IndexedMemPool) for the head of a list of wait nodes
+class LifoSemHead {
+  // What we really want are bitfields:
+  //  uint64_t data : 32; uint64_t isNodeIdx : 1; uint64_t seq : 31;
+  // Unfortunately g++ generates pretty bad code for this sometimes (I saw
+  // -O3 code from gcc 4.7.1 copying the bitfields one at a time instead of
+  // in bulk, for example).  We can generate better code anyway by assuming
+  // that setters won't be given values that cause under/overflow, and
+  // putting the sequence at the end where its planned overflow doesn't
+  // need any masking.
+  //
+  // data == 0 (empty list) with isNodeIdx is conceptually the same
+  // as data == 0 (no unclaimed increments) with !isNodeIdx, we always
+  // convert the former into the latter to make the logic simpler.
+  enum {
+    IsNodeIdxShift = 32,
+    IsShutdownShift = 33,
+    SeqShift = 34,
+  };
+  enum : uint64_t {
+    IsNodeIdxMask = uint64_t(1) << IsNodeIdxShift,
+    IsShutdownMask = uint64_t(1) << IsShutdownShift,
+    SeqIncr = uint64_t(1) << SeqShift,
+    SeqMask = ~(SeqIncr - 1),
+  };
+
+ public:
+
+  uint64_t bits;
+
+  //////// getters
+
+  inline uint32_t idx() const {
+    assert(isNodeIdx());
+    assert(uint32_t(bits) != 0);
+    return uint32_t(bits);
+  }
+  inline uint32_t value() const {
+    assert(!isNodeIdx());
+    return uint32_t(bits);
+  }
+  inline constexpr bool isNodeIdx() const {
+    return (bits & IsNodeIdxMask) != 0;
+  }
+  inline constexpr bool isShutdown() const {
+    return (bits & IsShutdownMask) != 0;
+  }
+  inline constexpr uint32_t seq() const {
+    return uint32_t(bits >> SeqShift);
+  }
+
+  //////// setter-like things return a new struct
+
+  /// This should only be used for initial construction, not for setting
+  /// the value, because it clears the sequence number
+  static inline constexpr LifoSemHead fresh(uint32_t value) {
+    return LifoSemHead{ value };
+  }
+
+  /// Returns the LifoSemHead that results from popping a waiter node,
+  /// given the current waiter node's next ptr
+  inline LifoSemHead withPop(uint32_t idxNext) const {
+    assert(isNodeIdx());
+    if (idxNext == 0) {
+      // no isNodeIdx bit or data bits.  Wraparound of seq bits is okay
+      return LifoSemHead{ (bits & (SeqMask | IsShutdownMask)) + SeqIncr };
+    } else {
+      // preserve sequence bits (incremented with wraparound okay) and
+      // isNodeIdx bit, replace all data bits
+      return LifoSemHead{
+          (bits & (SeqMask | IsShutdownMask | IsNodeIdxMask)) +
+          SeqIncr + idxNext };
+    }
+  }
+
+  /// Returns the LifoSemHead that results from pushing a new waiter node
+  inline LifoSemHead withPush(uint32_t _idx) const {
+    assert(isNodeIdx() || value() == 0);
+    assert(!isShutdown());
+    assert(_idx != 0);
+    return LifoSemHead{ (bits & SeqMask) | IsNodeIdxMask | _idx };
+  }
+
+  /// Returns the LifoSemHead with value increased by delta, with
+  /// saturation if the maximum value is reached
+  inline LifoSemHead withValueIncr(uint32_t delta) const {
+    assert(!isNodeIdx());
+    auto rv = LifoSemHead{ bits + SeqIncr + delta };
+    if (UNLIKELY(rv.isNodeIdx())) {
+      // value has overflowed into the isNodeIdx bit
+      rv = LifoSemHead{ (rv.bits & ~IsNodeIdxMask) | (IsNodeIdxMask - 1) };
+    }
+    return rv;
+  }
+
+  /// Returns the LifoSemHead that results from decrementing the value
+  inline LifoSemHead withValueDecr(uint32_t delta) const {
+    assert(delta > 0 && delta <= value());
+    return LifoSemHead{ bits + SeqIncr - delta };
+  }
+
+  /// Returns the LifoSemHead with the same state as the current node,
+  /// but with the shutdown bit set
+  inline LifoSemHead withShutdown() const {
+    return LifoSemHead{ bits | IsShutdownMask };
+  }
+
+  inline constexpr bool operator== (const LifoSemHead& rhs) const {
+    return bits == rhs.bits;
+  }
+  inline constexpr bool operator!= (const LifoSemHead& rhs) const {
+    return !(*this == rhs);
+  }
+};
+
+/// LifoSemBase is the engine for several different types of LIFO
+/// semaphore.  LifoSemBase handles storage of positive semaphore values
+/// and wait nodes, but the actual waiting and notification mechanism is
+/// up to the client.
+///
+/// The Handoff type is responsible for arranging one wakeup notification.
+/// See LifoSemNode for more information on how to make your own.
+template <typename Handoff,
+          template<typename> class Atom = std::atomic>
+struct LifoSemBase : boost::noncopyable {
+
+  /// Constructor
+  explicit LifoSemBase(uint32_t initialValue = 0)
+    : head_(LifoSemHead::fresh(initialValue)) {}
+
+  /// Silently saturates if value is already 2^32-1
+  void post() {
+    auto idx = incrOrPop(1);
+    if (idx != 0) {
+      idxToNode(idx).handoff().post();
+    }
+  }
+
+  /// Equivalent to n calls to post(), except may be much more efficient.
+  /// At any point in time at which the semaphore's value would exceed
+  /// 2^32-1 if tracked with infinite precision, it may be silently
+  /// truncated to 2^32-1.  This saturation is not guaranteed to be exact,
+  /// although it is guaranteed that overflow won't result in wrap-around.
+  /// There would be a substantial performance and complexity cost in
+  /// guaranteeing exact saturation (similar to the cost of maintaining
+  /// linearizability near the zero value, but without as much of
+  /// a benefit).
+  void post(uint32_t n) {
+    uint32_t idx;
+    while (n > 0 && (idx = incrOrPop(n)) != 0) {
+      // pop accounts for only 1
+      idxToNode(idx).handoff().post();
+      --n;
+    }
+  }
+
+  /// Returns true iff shutdown() has been called
+  bool isShutdown() const {
+    return UNLIKELY(head_.load(std::memory_order_acquire).isShutdown());
+  }
+
+  /// Prevents blocking on this semaphore, causing all blocking wait()
+  /// calls to throw ShutdownSemError.  Both currently blocked wait() and
+  /// future calls to wait() for which tryWait() would return false will
+  /// cause an exception.  Calls to wait() for which the matching post()
+  /// has already occurred will proceed normally.
+  void shutdown() {
+    // first set the shutdown bit
+    auto h = head_.load(std::memory_order_acquire);
+    while (!h.isShutdown()) {
+      if (head_.compare_exchange_strong(h, h.withShutdown())) {
+        // success
+        h = h.withShutdown();
+        break;
+      }
+      // compare_exchange_strong rereads h, retry
+    }
+
+    // now wake up any waiters
+    while (h.isNodeIdx()) {
+      auto& node = idxToNode(h.idx());
+      auto repl = h.withPop(node.next);
+      if (head_.compare_exchange_strong(h, repl)) {
+        // successful pop, wake up the waiter and move on.  The next
+        // field is used to convey that this wakeup didn't consume a value
+        node.setShutdownNotice();
+        node.handoff().post();
+        h = repl;
+      }
+    }
+  }
+
+  /// Returns true iff value was decremented
+  bool tryWait() {
+    uint32_t n = 1;
+    auto rv = decrOrPush(n, 0);
+    assert((rv == WaitResult::DECR && n == 0) ||
+           (rv != WaitResult::DECR && n == 1));
+    // SHUTDOWN is okay here, since we don't actually wait
+    return rv == WaitResult::DECR;
+  }
+
+  /// Equivalent to (but may be much more efficient than) n calls to
+  /// tryWait().  Returns the total amount by which the semaphore's value
+  /// was decreased
+  uint32_t tryWait(uint32_t n) {
+    auto const orig = n;
+    while (n > 0) {
+#ifndef NDEBUG
+      auto prev = n;
+#endif
+      auto rv = decrOrPush(n, 0);
+      assert((rv == WaitResult::DECR && n < prev) ||
+             (rv != WaitResult::DECR && n == prev));
+      if (rv != WaitResult::DECR) {
+        break;
+      }
+    }
+    return orig - n;
+  }
+
+  /// Blocks the current thread until there is a matching post or the
+  /// semaphore is shut down.  Throws ShutdownSemError if the semaphore
+  /// has been shut down and this method would otherwise be blocking.
+  /// Note that wait() doesn't throw during shutdown if tryWait() would
+  /// return true
+  void wait() {
+    // early check isn't required for correctness, but is an important
+    // perf win if we can avoid allocating and deallocating a node
+    if (tryWait()) {
+      return;
+    }
+
+    // allocateNode() won't compile unless Handoff has a default
+    // constructor
+    UniquePtr node = allocateNode();
+
+    auto rv = tryWaitOrPush(*node);
+    if (UNLIKELY(rv == WaitResult::SHUTDOWN)) {
+      assert(isShutdown());
+      throw ShutdownSemError("wait() would block but semaphore is shut down");
+    }
+
+    if (rv == WaitResult::PUSH) {
+      node->handoff().wait();
+      if (UNLIKELY(node->isShutdownNotice())) {
+        // this wait() didn't consume a value, it was triggered by shutdown
+        assert(isShutdown());
+        throw ShutdownSemError(
+            "blocking wait() interrupted by semaphore shutdown");
+      }
+
+      // node->handoff().wait() can't return until after the node has
+      // been popped and post()ed, so it is okay for the UniquePtr to
+      // recycle the node now
+    }
+    // else node wasn't pushed, so it is safe to recycle
+  }
+
+  /// Returns a guess at the current value, designed for debugging.
+  /// If there are no concurrent posters or waiters then this will
+  /// be correct
+  uint32_t valueGuess() const {
+    // this is actually linearizable, but we don't promise that because
+    // we may want to add striping in the future to help under heavy
+    // contention
+    auto h = head_.load(std::memory_order_acquire);
+    return h.isNodeIdx() ? 0 : h.value();
+  }
+
+ protected:
+
+  enum class WaitResult {
+    PUSH,
+    DECR,
+    SHUTDOWN,
+  };
+
+  /// The type of a std::unique_ptr that will automatically return a
+  /// LifoSemNode to the appropriate IndexedMemPool
+  typedef std::unique_ptr<LifoSemNode<Handoff, Atom>,
+                          LifoSemNodeRecycler<Handoff, Atom>> UniquePtr;
+
+  /// Returns a node that can be passed to decrOrLink
+  template <typename... Args>
+  UniquePtr allocateNode(Args&&... args) {
+    auto idx = LifoSemRawNode<Atom>::pool.allocIndex();
+    if (idx != 0) {
+      auto& node = idxToNode(idx);
+      node.clearShutdownNotice();
+      try {
+        node.init(std::forward<Args>(args)...);
+      } catch (...) {
+        LifoSemRawNode<Atom>::pool.recycleIndex(idx);
+        throw;
+      }
+      return UniquePtr(&node);
+    } else {
+      return UniquePtr();
+    }
+  }
+
+  /// Returns DECR if the semaphore value was decremented (and waiterNode
+  /// was untouched), PUSH if a reference to the wait node was pushed,
+  /// or SHUTDOWN if decrement was not possible and push wasn't allowed
+  /// because isShutdown().  Ownership of the wait node remains the
+  /// responsibility of the caller, who must not release it until after
+  /// the node's Handoff has been posted.
+  WaitResult tryWaitOrPush(LifoSemNode<Handoff, Atom>& waiterNode) {
+    uint32_t n = 1;
+    return decrOrPush(n, nodeToIdx(waiterNode));
+  }
+
+ private:
+
+  FOLLY_ALIGN_TO_AVOID_FALSE_SHARING
+  folly::AtomicStruct<LifoSemHead,Atom> head_;
+
+  char padding_[folly::detail::CacheLocality::kFalseSharingRange -
+      sizeof(LifoSemHead)];
+
+
+  static LifoSemNode<Handoff, Atom>& idxToNode(uint32_t idx) {
+    auto raw = &LifoSemRawNode<Atom>::pool[idx];
+    return *static_cast<LifoSemNode<Handoff, Atom>*>(raw);
+  }
+
+  static uint32_t nodeToIdx(const LifoSemNode<Handoff, Atom>& node) {
+    return LifoSemRawNode<Atom>::pool.locateElem(&node);
+  }
+
+  /// Either increments by n and returns 0, or pops a node and returns it.
+  /// If n + the stripe's value overflows, then the stripe's value
+  /// saturates silently at 2^32-1
+  uint32_t incrOrPop(uint32_t n) {
+    while (true) {
+      assert(n > 0);
+
+      auto head = head_.load(std::memory_order_acquire);
+      if (head.isNodeIdx()) {
+        auto& node = idxToNode(head.idx());
+        if (head_.compare_exchange_strong(head, head.withPop(node.next))) {
+          // successful pop
+          return head.idx();
+        }
+      } else {
+        auto after = head.withValueIncr(n);
+        if (head_.compare_exchange_strong(head, after)) {
+          // successful incr
+          return 0;
+        }
+      }
+      // retry
+    }
+  }
+
+  /// Returns DECR if some amount was decremented, with that amount
+  /// subtracted from n.  If n is 1 and this function returns DECR then n
+  /// must be 0 afterward.  Returns PUSH if no value could be decremented
+  /// and idx was pushed, or if idx was zero and no push was performed but
+  /// a push would have been performed with a valid node.  Returns SHUTDOWN
+  /// if the caller should have blocked but isShutdown().  If idx == 0,
+  /// may return PUSH even after isShutdown() or may return SHUTDOWN
+  WaitResult decrOrPush(uint32_t& n, uint32_t idx) {
+    assert(n > 0);
+
+    while (true) {
+      auto head = head_.load(std::memory_order_acquire);
+
+      if (!head.isNodeIdx() && head.value() > 0) {
+        // decr
+        auto delta = std::min(n, head.value());
+        if (head_.compare_exchange_strong(head, head.withValueDecr(delta))) {
+          n -= delta;
+          return WaitResult::DECR;
+        }
+      } else {
+        // push
+        if (idx == 0) {
+          return WaitResult::PUSH;
+        }
+
+        if (UNLIKELY(head.isShutdown())) {
+          return WaitResult::SHUTDOWN;
+        }
+
+        auto& node = idxToNode(idx);
+        node.next = head.isNodeIdx() ? head.idx() : 0;
+        if (head_.compare_exchange_strong(head, head.withPush(idx))) {
+          // push succeeded
+          return WaitResult::PUSH;
+        }
+      }
+    }
+    // retry
+  }
+};
+
+} // namespace detail
+
+template <template<typename> class Atom, class BatonType>
+struct LifoSemImpl : public detail::LifoSemBase<BatonType, Atom> {
+  explicit LifoSemImpl(uint32_t v = 0)
+    : detail::LifoSemBase<BatonType, Atom>(v) {}
+};
+
+} // namespace folly
+
+#endif
diff --git a/faux-folly/folly/Likely.h b/faux-folly/folly/Likely.h
new file mode 100644
index 0000000..1286736
--- /dev/null
+++ b/faux-folly/folly/Likely.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Compiler hints to indicate the fast path of an "if" branch: whether
+ * the if condition is likely to be true or false.
+ *
+ * @author Tudor Bosman (tudorb@fb.com)
+ */
+
+#ifndef FOLLY_BASE_LIKELY_H_
+#define FOLLY_BASE_LIKELY_H_
+
+#undef LIKELY
+#undef UNLIKELY
+
+#if defined(__GNUC__) && __GNUC__ >= 4
+#define LIKELY(x)   (__builtin_expect((x), 1))
+#define UNLIKELY(x) (__builtin_expect((x), 0))
+#else
+#define LIKELY(x)   (x)
+#define UNLIKELY(x) (x)
+#endif
+
+#endif /* FOLLY_BASE_LIKELY_H_ */
diff --git a/faux-folly/folly/Logging.h b/faux-folly/folly/Logging.h
new file mode 100644
index 0000000..d5a3173
--- /dev/null
+++ b/faux-folly/folly/Logging.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2015 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FOLLY_LOGGING_H_
+#define FOLLY_LOGGING_H_
+
+#include <atomic>
+#include <chrono>
+#include <glog/logging.h>
+
+#ifndef FB_LOG_EVERY_MS
+/**
+ * Issues a LOG(severity) no more often than every
+ * milliseconds. Example:
+ *
+ * FB_LOG_EVERY_MS(INFO, 10000) << "At least ten seconds passed"
+ *   " since you last saw this.";
+ *
+ * The implementation uses for statements to introduce variables in
+ * a nice way that doesn't mess surrounding statements.  It is thread
+ * safe.  Non-positive intervals will always log.
+ */
+#define FB_LOG_EVERY_MS(severity, milli_interval)                            \